obj-m := $(O_TARGET)
--- linux-2.4.20-hp4_pnnl1/fs/ext3/super.c~exports_hp Tue Apr 1 20:36:07 2003
+++ linux-2.4.20-hp4_pnnl1-braam/fs/ext3/super.c Tue Apr 1 20:36:07 2003
-@@ -1769,7 +1769,7 @@ static void __exit exit_ext3_fs(void)
+@@ -1769,7 +1769,7 @@
unregister_filesystem(&ext3_fs_type);
}
MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
--- linux-2.4.20-hp4_pnnl1/include/linux/fs.h~exports_hp Tue Apr 1 20:36:07 2003
+++ linux-2.4.20-hp4_pnnl1-braam/include/linux/fs.h Tue Apr 1 20:36:52 2003
-@@ -1020,6 +1020,7 @@ extern int unregister_filesystem(struct
+@@ -1020,6 +1020,7 @@
extern struct vfsmount *kern_mount(struct file_system_type *);
extern int may_umount(struct vfsmount *);
extern long do_mount(char *, char *, char *, unsigned long, void *);
--- linux-2.4.20-hp4_pnnl1/kernel/ksyms.c~exports_hp Tue Apr 1 20:36:07 2003
+++ linux-2.4.20-hp4_pnnl1-braam/kernel/ksyms.c Tue Apr 1 20:36:07 2003
-@@ -308,6 +308,10 @@ EXPORT_SYMBOL(dcache_dir_fsync);
+@@ -308,6 +308,11 @@
EXPORT_SYMBOL(dcache_readdir);
EXPORT_SYMBOL(dcache_dir_ops);
+/* lustre */
+EXPORT_SYMBOL(pagecache_lock_cacheline);
++EXPORT_SYMBOL(panic_notifier_list);
+EXPORT_SYMBOL(do_kern_mount);
+
/* for stackable file systems (lofs, wrapfs, cryptfs, etc.) */
--- /dev/null
+ fs/ext3/Makefile | 2
+ fs/ext3/dir.c | 299 +++++++++
+ fs/ext3/file.c | 3
+ fs/ext3/hash.c | 215 ++++++
+ fs/ext3/namei.c | 1387 ++++++++++++++++++++++++++++++++++++++++-----
+ fs/ext3/super.c | 7
+ include/linux/ext3_fs.h | 85 ++
+ include/linux/ext3_fs_sb.h | 2
+ include/linux/ext3_jbd.h | 2
+ include/linux/rbtree.h | 2
+ lib/rbtree.c | 42 +
+ 11 files changed, 1886 insertions(+), 160 deletions(-)
+
+--- linux-2.4.20/fs/ext3/Makefile~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/fs/ext3/Makefile Sat Apr 5 03:57:05 2003
+@@ -12,7 +12,7 @@ O_TARGET := ext3.o
+ export-objs := super.o inode.o
+
+ obj-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+- ioctl.o namei.o super.o symlink.o
++ ioctl.o namei.o super.o symlink.o hash.o
+ obj-m := $(O_TARGET)
+
+ include $(TOPDIR)/Rules.make
+--- linux-2.4.20/fs/ext3/dir.c~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/fs/ext3/dir.c Sat Apr 5 03:56:31 2003
+@@ -21,12 +21,16 @@
+ #include <linux/fs.h>
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
++#include <linux/slab.h>
++#include <linux/rbtree.h>
+
+ static unsigned char ext3_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+ static int ext3_readdir(struct file *, void *, filldir_t);
++static int ext3_dx_readdir(struct file * filp,
++ void * dirent, filldir_t filldir);
+
+ struct file_operations ext3_dir_operations = {
+ read: generic_read_dir,
+@@ -35,6 +39,17 @@ struct file_operations ext3_dir_operatio
+ fsync: ext3_sync_file, /* BKL held */
+ };
+
++
++static unsigned char get_dtype(struct super_block *sb, int filetype)
++{
++ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
++ (filetype >= EXT3_FT_MAX))
++ return DT_UNKNOWN;
++
++ return (ext3_filetype_table[filetype]);
++}
++
++
+ int ext3_check_dir_entry (const char * function, struct inode * dir,
+ struct ext3_dir_entry_2 * de,
+ struct buffer_head * bh,
+@@ -79,6 +94,16 @@ static int ext3_readdir(struct file * fi
+
+ sb = inode->i_sb;
+
++ if (is_dx(inode)) {
++ err = ext3_dx_readdir(filp, dirent, filldir);
++ if (err != ERR_BAD_DX_DIR)
++ return err;
++ /*
++ * We don't set the inode dirty flag since it's not
++ * critical that it get flushed back to the disk.
++ */
++ EXT3_I(filp->f_dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL;
++ }
+ stored = 0;
+ bh = NULL;
+ offset = filp->f_pos & (sb->s_blocksize - 1);
+@@ -162,18 +187,12 @@ revalidate:
+ * during the copy operation.
+ */
+ unsigned long version = filp->f_version;
+- unsigned char d_type = DT_UNKNOWN;
+
+- if (EXT3_HAS_INCOMPAT_FEATURE(sb,
+- EXT3_FEATURE_INCOMPAT_FILETYPE)
+- && de->file_type < EXT3_FT_MAX)
+- d_type =
+- ext3_filetype_table[de->file_type];
+ error = filldir(dirent, de->name,
+ de->name_len,
+ filp->f_pos,
+ le32_to_cpu(de->inode),
+- d_type);
++ get_dtype(sb, de->file_type));
+ if (error)
+ break;
+ if (version != filp->f_version)
+@@ -188,3 +207,269 @@ revalidate:
+ UPDATE_ATIME(inode);
+ return 0;
+ }
++
++#ifdef CONFIG_EXT3_INDEX
++/*
++ * These functions convert from the major/minor hash to an f_pos
++ * value.
++ *
++ * Currently we only use major hash numer. This is unfortunate, but
++ * on 32-bit machines, the same VFS interface is used for lseek and
++ * llseek, so if we use the 64 bit offset, then the 32-bit versions of
++ * lseek/telldir/seekdir will blow out spectacularly, and from within
++ * the ext2 low-level routine, we don't know if we're being called by
++ * a 64-bit version of the system call or the 32-bit version of the
++ * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
++ * cookie. Sigh.
++ */
++#define hash2pos(major, minor) (major >> 1)
++#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
++#define pos2min_hash(pos) (0)
++
++/*
++ * This structure holds the nodes of the red-black tree used to store
++ * the directory entry in hash order.
++ */
++struct fname {
++ __u32 hash;
++ __u32 minor_hash;
++ rb_node_t rb_hash;
++ struct fname *next;
++ __u32 inode;
++ __u8 name_len;
++ __u8 file_type;
++ char name[0];
++};
++
++/*
++ * This functoin implements a non-recursive way of freeing all of the
++ * nodes in the red-black tree.
++ */
++static void free_rb_tree_fname(rb_root_t *root)
++{
++ rb_node_t *n = root->rb_node;
++ rb_node_t *parent;
++ struct fname *fname;
++
++ while (n) {
++ /* Do the node's children first */
++ if ((n)->rb_left) {
++ n = n->rb_left;
++ continue;
++ }
++ if (n->rb_right) {
++ n = n->rb_right;
++ continue;
++ }
++ /*
++ * The node has no children; free it, and then zero
++ * out parent's link to it. Finally go to the
++ * beginning of the loop and try to free the parent
++ * node.
++ */
++ parent = n->rb_parent;
++ fname = rb_entry(n, struct fname, rb_hash);
++ kfree(fname);
++ if (!parent)
++ root->rb_node = 0;
++ else if (parent->rb_left == n)
++ parent->rb_left = 0;
++ else if (parent->rb_right == n)
++ parent->rb_right = 0;
++ n = parent;
++ }
++ root->rb_node = 0;
++}
++
++
++struct dir_private_info *create_dir_info(loff_t pos)
++{
++ struct dir_private_info *p;
++
++ p = kmalloc(sizeof(struct dir_private_info), GFP_KERNEL);
++ if (!p)
++ return NULL;
++ p->root.rb_node = 0;
++ p->curr_node = 0;
++ p->extra_fname = 0;
++ p->last_pos = 0;
++ p->curr_hash = pos2maj_hash(pos);
++ p->curr_minor_hash = pos2min_hash(pos);
++ p->next_hash = 0;
++ return p;
++}
++
++void ext3_htree_free_dir_info(struct dir_private_info *p)
++{
++ free_rb_tree_fname(&p->root);
++ kfree(p);
++}
++
++/*
++ * Given a directory entry, enter it into the fname rb tree.
++ */
++void ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
++ __u32 minor_hash,
++ struct ext3_dir_entry_2 *dirent)
++{
++ rb_node_t **p, *parent = NULL;
++ struct fname * fname, *new_fn;
++ struct dir_private_info *info;
++ int len;
++
++ info = (struct dir_private_info *) dir_file->private_data;
++ p = &info->root.rb_node;
++
++ /* Create and allocate the fname structure */
++ len = sizeof(struct fname) + dirent->name_len + 1;
++ new_fn = kmalloc(len, GFP_KERNEL);
++ memset(new_fn, 0, len);
++ new_fn->hash = hash;
++ new_fn->minor_hash = minor_hash;
++ new_fn->inode = le32_to_cpu(dirent->inode);
++ new_fn->name_len = dirent->name_len;
++ new_fn->file_type = dirent->file_type;
++ memcpy(new_fn->name, dirent->name, dirent->name_len);
++ new_fn->name[dirent->name_len] = 0;
++
++ while (*p) {
++ parent = *p;
++ fname = rb_entry(parent, struct fname, rb_hash);
++
++ /*
++ * If the hash and minor hash match up, then we put
++ * them on a linked list. This rarely happens...
++ */
++ if ((new_fn->hash == fname->hash) &&
++ (new_fn->minor_hash == fname->minor_hash)) {
++ new_fn->next = fname->next;
++ fname->next = new_fn;
++ return;
++ }
++
++ if (new_fn->hash < fname->hash)
++ p = &(*p)->rb_left;
++ else if (new_fn->hash > fname->hash)
++ p = &(*p)->rb_right;
++ else if (new_fn->minor_hash < fname->minor_hash)
++ p = &(*p)->rb_left;
++ else /* if (new_fn->minor_hash > fname->minor_hash) */
++ p = &(*p)->rb_right;
++ }
++
++ rb_link_node(&new_fn->rb_hash, parent, p);
++ rb_insert_color(&new_fn->rb_hash, &info->root);
++}
++
++
++
++/*
++ * This is a helper function for ext3_dx_readdir. It calls filldir
++ * for all entres on the fname linked list. (Normally there is only
++ * one entry on the linked list, unless there are 62 bit hash collisions.)
++ */
++static int call_filldir(struct file * filp, void * dirent,
++ filldir_t filldir, struct fname *fname)
++{
++ struct dir_private_info *info = filp->private_data;
++ loff_t curr_pos;
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct super_block * sb;
++ int error;
++
++ sb = inode->i_sb;
++
++ if (!fname) {
++ printk("call_filldir: called with null fname?!?\n");
++ return 0;
++ }
++ curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ while (fname) {
++ error = filldir(dirent, fname->name,
++ fname->name_len, curr_pos,
++ fname->inode,
++ get_dtype(sb, fname->file_type));
++ if (error) {
++ filp->f_pos = curr_pos;
++ info->extra_fname = fname->next;
++ return error;
++ }
++ fname = fname->next;
++ }
++ return 0;
++}
++
++static int ext3_dx_readdir(struct file * filp,
++ void * dirent, filldir_t filldir)
++{
++ struct dir_private_info *info = filp->private_data;
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct fname *fname;
++ int ret;
++
++ if (!info) {
++ info = create_dir_info(filp->f_pos);
++ if (!info)
++ return -ENOMEM;
++ filp->private_data = info;
++ }
++
++ /* Some one has messed with f_pos; reset the world */
++ if (info->last_pos != filp->f_pos) {
++ free_rb_tree_fname(&info->root);
++ info->curr_node = 0;
++ info->extra_fname = 0;
++ info->curr_hash = pos2maj_hash(filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ }
++
++ /*
++ * If there are any leftover names on the hash collision
++ * chain, return them first.
++ */
++ if (info->extra_fname &&
++ call_filldir(filp, dirent, filldir, info->extra_fname))
++ goto finished;
++
++ if (!info->curr_node)
++ info->curr_node = rb_get_first(&info->root);
++
++ while (1) {
++ /*
++ * Fill the rbtree if we have no more entries,
++ * or the inode has changed since we last read in the
++ * cached entries.
++ */
++ if ((!info->curr_node) ||
++ (filp->f_version != inode->i_version)) {
++ info->curr_node = 0;
++ free_rb_tree_fname(&info->root);
++ filp->f_version = inode->i_version;
++ ret = ext3_htree_fill_tree(filp, info->curr_hash,
++ info->curr_minor_hash,
++ &info->next_hash);
++ if (ret < 0)
++ return ret;
++ if (ret == 0)
++ break;
++ info->curr_node = rb_get_first(&info->root);
++ }
++
++ fname = rb_entry(info->curr_node, struct fname, rb_hash);
++ info->curr_hash = fname->hash;
++ info->curr_minor_hash = fname->minor_hash;
++ if (call_filldir(filp, dirent, filldir, fname))
++ break;
++
++ info->curr_node = rb_get_next(info->curr_node);
++ if (!info->curr_node) {
++ info->curr_hash = info->next_hash;
++ info->curr_minor_hash = 0;
++ }
++ }
++finished:
++ info->last_pos = filp->f_pos;
++ UPDATE_ATIME(inode);
++ return 0;
++}
++#endif
+--- linux-2.4.20/fs/ext3/file.c~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/fs/ext3/file.c Sat Apr 5 03:56:31 2003
+@@ -35,6 +35,9 @@ static int ext3_release_file (struct ino
+ {
+ if (filp->f_mode & FMODE_WRITE)
+ ext3_discard_prealloc (inode);
++ if (is_dx(inode) && filp->private_data)
++ ext3_htree_free_dir_info(filp->private_data);
++
+ return 0;
+ }
+
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/fs/ext3/hash.c Sat Apr 5 03:56:31 2003
+@@ -0,0 +1,215 @@
++/*
++ * linux/fs/ext3/hash.c
++ *
++ * Copyright (C) 2002 by Theodore Ts'o
++ *
++ * This file is released under the GPL v2.
++ *
++ * This file may be redistributed under the terms of the GNU Public
++ * License.
++ */
++
++#include <linux/fs.h>
++#include <linux/jbd.h>
++#include <linux/sched.h>
++#include <linux/ext3_fs.h>
++
++#define DELTA 0x9E3779B9
++
++static void TEA_transform(__u32 buf[4], __u32 const in[])
++{
++ __u32 sum = 0;
++ __u32 b0 = buf[0], b1 = buf[1];
++ __u32 a = in[0], b = in[1], c = in[2], d = in[3];
++ int n = 16;
++
++ do {
++ sum += DELTA;
++ b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
++ b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
++ } while(--n);
++
++ buf[0] += b0;
++ buf[1] += b1;
++}
++
++/* F, G and H are basic MD4 functions: selection, majority, parity */
++#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
++#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
++#define H(x, y, z) ((x) ^ (y) ^ (z))
++
++/*
++ * The generic round function. The application is so specific that
++ * we don't bother protecting all the arguments with parens, as is generally
++ * good macro practice, in favor of extra legibility.
++ * Rotation is separate from addition to prevent recomputation
++ */
++#define ROUND(f, a, b, c, d, x, s) \
++ (a += f(b, c, d) + x, a = (a << s) | (a >> (32-s)))
++#define K1 0
++#define K2 013240474631UL
++#define K3 015666365641UL
++
++/*
++ * Basic cut-down MD4 transform. Returns only 32 bits of result.
++ */
++static void halfMD4Transform (__u32 buf[4], __u32 const in[])
++{
++ __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
++
++ /* Round 1 */
++ ROUND(F, a, b, c, d, in[0] + K1, 3);
++ ROUND(F, d, a, b, c, in[1] + K1, 7);
++ ROUND(F, c, d, a, b, in[2] + K1, 11);
++ ROUND(F, b, c, d, a, in[3] + K1, 19);
++ ROUND(F, a, b, c, d, in[4] + K1, 3);
++ ROUND(F, d, a, b, c, in[5] + K1, 7);
++ ROUND(F, c, d, a, b, in[6] + K1, 11);
++ ROUND(F, b, c, d, a, in[7] + K1, 19);
++
++ /* Round 2 */
++ ROUND(G, a, b, c, d, in[1] + K2, 3);
++ ROUND(G, d, a, b, c, in[3] + K2, 5);
++ ROUND(G, c, d, a, b, in[5] + K2, 9);
++ ROUND(G, b, c, d, a, in[7] + K2, 13);
++ ROUND(G, a, b, c, d, in[0] + K2, 3);
++ ROUND(G, d, a, b, c, in[2] + K2, 5);
++ ROUND(G, c, d, a, b, in[4] + K2, 9);
++ ROUND(G, b, c, d, a, in[6] + K2, 13);
++
++ /* Round 3 */
++ ROUND(H, a, b, c, d, in[3] + K3, 3);
++ ROUND(H, d, a, b, c, in[7] + K3, 9);
++ ROUND(H, c, d, a, b, in[2] + K3, 11);
++ ROUND(H, b, c, d, a, in[6] + K3, 15);
++ ROUND(H, a, b, c, d, in[1] + K3, 3);
++ ROUND(H, d, a, b, c, in[5] + K3, 9);
++ ROUND(H, c, d, a, b, in[0] + K3, 11);
++ ROUND(H, b, c, d, a, in[4] + K3, 15);
++
++ buf[0] += a;
++ buf[1] += b;
++ buf[2] += c;
++ buf[3] += d;
++}
++
++#undef ROUND
++#undef F
++#undef G
++#undef H
++#undef K1
++#undef K2
++#undef K3
++
++/* The old legacy hash */
++static __u32 dx_hack_hash (const char *name, int len)
++{
++ __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
++ while (len--) {
++ __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
++
++ if (hash & 0x80000000) hash -= 0x7fffffff;
++ hash1 = hash0;
++ hash0 = hash;
++ }
++ return (hash0 << 1);
++}
++
++static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
++{
++ __u32 pad, val;
++ int i;
++
++ pad = (__u32)len | ((__u32)len << 8);
++ pad |= pad << 16;
++
++ val = pad;
++ if (len > num*4)
++ len = num * 4;
++ for (i=0; i < len; i++) {
++ if ((i % 4) == 0)
++ val = pad;
++ val = msg[i] + (val << 8);
++ if ((i % 4) == 3) {
++ *buf++ = val;
++ val = pad;
++ num--;
++ }
++ }
++ if (--num >= 0)
++ *buf++ = val;
++ while (--num >= 0)
++ *buf++ = pad;
++}
++
++/*
++ * Returns the hash of a filename. If len is 0 and name is NULL, then
++ * this function can be used to test whether or not a hash version is
++ * supported.
++ *
++ * The seed is an 4 longword (32 bits) "secret" which can be used to
++ * uniquify a hash. If the seed is all zero's, then some default seed
++ * may be used.
++ *
++ * A particular hash version specifies whether or not the seed is
++ * represented, and whether or not the returned hash is 32 bits or 64
++ * bits. 32 bit hashes will return 0 for the minor hash.
++ */
++int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
++{
++ __u32 hash;
++ __u32 minor_hash = 0;
++ const char *p;
++ int i;
++ __u32 in[8], buf[4];
++
++ /* Initialize the default seed for the hash checksum functions */
++ buf[0] = 0x67452301;
++ buf[1] = 0xefcdab89;
++ buf[2] = 0x98badcfe;
++ buf[3] = 0x10325476;
++
++ /* Check to see if the seed is all zero's */
++ if (hinfo->seed) {
++ for (i=0; i < 4; i++) {
++ if (hinfo->seed[i])
++ break;
++ }
++ if (i < 4)
++ memcpy(buf, hinfo->seed, sizeof(buf));
++ }
++
++ switch (hinfo->hash_version) {
++ case DX_HASH_LEGACY:
++ hash = dx_hack_hash(name, len);
++ break;
++ case DX_HASH_HALF_MD4:
++ p = name;
++ while (len > 0) {
++ str2hashbuf(p, len, in, 8);
++ halfMD4Transform(buf, in);
++ len -= 32;
++ p += 32;
++ }
++ minor_hash = buf[2];
++ hash = buf[1];
++ break;
++ case DX_HASH_TEA:
++ p = name;
++ while (len > 0) {
++ str2hashbuf(p, len, in, 4);
++ TEA_transform(buf, in);
++ len -= 16;
++ p += 16;
++ }
++ hash = buf[0];
++ minor_hash = buf[1];
++ break;
++ default:
++ hinfo->hash = 0;
++ return -1;
++ }
++ hinfo->hash = hash & ~1;
++ hinfo->minor_hash = minor_hash;
++ return 0;
++}
+--- linux-2.4.20/fs/ext3/namei.c~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/fs/ext3/namei.c Sat Apr 5 03:56:31 2003
+@@ -16,6 +16,12 @@
+ * David S. Miller (davem@caip.rutgers.edu), 1995
+ * Directory entry file type support and forward compatibility hooks
+ * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
++ * Hash Tree Directory indexing (c)
++ * Daniel Phillips, 2001
++ * Hash Tree Directory indexing porting
++ * Christopher Li, 2002
++ * Hash Tree Directory indexing cleanup
++ * Theodore Ts'o, 2002
+ */
+
+ #include <linux/fs.h>
+@@ -38,6 +44,630 @@
+ #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+ #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
+
++static struct buffer_head *ext3_append(handle_t *handle,
++ struct inode *inode,
++ u32 *block, int *err)
++{
++ struct buffer_head *bh;
++
++ *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
++
++ if ((bh = ext3_bread(handle, inode, *block, 1, err))) {
++ inode->i_size += inode->i_sb->s_blocksize;
++ EXT3_I(inode)->i_disksize = inode->i_size;
++ ext3_journal_get_write_access(handle,bh);
++ }
++ return bh;
++}
++
++#ifndef assert
++#define assert(test) J_ASSERT(test)
++#endif
++
++#ifndef swap
++#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
++#endif
++
++typedef struct { u32 v; } le_u32;
++typedef struct { u16 v; } le_u16;
++
++#ifdef DX_DEBUG
++#define dxtrace(command) command
++#else
++#define dxtrace(command)
++#endif
++
++struct fake_dirent
++{
++ /*le*/u32 inode;
++ /*le*/u16 rec_len;
++ u8 name_len;
++ u8 file_type;
++};
++
++struct dx_countlimit
++{
++ le_u16 limit;
++ le_u16 count;
++};
++
++struct dx_entry
++{
++ le_u32 hash;
++ le_u32 block;
++};
++
++/*
++ * dx_root_info is laid out so that if it should somehow get overlaid by a
++ * dirent the two low bits of the hash version will be zero. Therefore, the
++ * hash version mod 4 should never be 0. Sincerely, the paranoia department.
++ */
++
++struct dx_root
++{
++ struct fake_dirent dot;
++ char dot_name[4];
++ struct fake_dirent dotdot;
++ char dotdot_name[4];
++ struct dx_root_info
++ {
++ le_u32 reserved_zero;
++ u8 hash_version;
++ u8 info_length; /* 8 */
++ u8 indirect_levels;
++ u8 unused_flags;
++ }
++ info;
++ struct dx_entry entries[0];
++};
++
++struct dx_node
++{
++ struct fake_dirent fake;
++ struct dx_entry entries[0];
++};
++
++
++struct dx_frame
++{
++ struct buffer_head *bh;
++ struct dx_entry *entries;
++ struct dx_entry *at;
++};
++
++struct dx_map_entry
++{
++ u32 hash;
++ u32 offs;
++};
++
++#ifdef CONFIG_EXT3_INDEX
++static inline unsigned dx_get_block (struct dx_entry *entry);
++static void dx_set_block (struct dx_entry *entry, unsigned value);
++static inline unsigned dx_get_hash (struct dx_entry *entry);
++static void dx_set_hash (struct dx_entry *entry, unsigned value);
++static unsigned dx_get_count (struct dx_entry *entries);
++static unsigned dx_get_limit (struct dx_entry *entries);
++static void dx_set_count (struct dx_entry *entries, unsigned value);
++static void dx_set_limit (struct dx_entry *entries, unsigned value);
++static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
++static unsigned dx_node_limit (struct inode *dir);
++static struct dx_frame *dx_probe(struct dentry *dentry,
++ struct inode *dir,
++ struct dx_hash_info *hinfo,
++ struct dx_frame *frame,
++ int *err);
++static void dx_release (struct dx_frame *frames);
++static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
++ struct dx_hash_info *hinfo, struct dx_map_entry map[]);
++static void dx_sort_map(struct dx_map_entry *map, unsigned count);
++static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
++ struct dx_map_entry *offsets, int count);
++static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
++static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
++static int ext3_htree_next_block(struct inode *dir, __u32 hash,
++ struct dx_frame *frame,
++ struct dx_frame *frames, int *err,
++ __u32 *start_hash);
++static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
++ struct ext3_dir_entry_2 **res_dir, int *err);
++static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
++ struct inode *inode);
++
++/*
++ * Future: use high four bits of block for coalesce-on-delete flags
++ * Mask them off for now.
++ */
++
++static inline unsigned dx_get_block (struct dx_entry *entry)
++{
++ return le32_to_cpu(entry->block.v) & 0x00ffffff;
++}
++
++static inline void dx_set_block (struct dx_entry *entry, unsigned value)
++{
++ entry->block.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_hash (struct dx_entry *entry)
++{
++ return le32_to_cpu(entry->hash.v);
++}
++
++static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
++{
++ entry->hash.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_count (struct dx_entry *entries)
++{
++ return le16_to_cpu(((struct dx_countlimit *) entries)->count.v);
++}
++
++static inline unsigned dx_get_limit (struct dx_entry *entries)
++{
++ return le16_to_cpu(((struct dx_countlimit *) entries)->limit.v);
++}
++
++static inline void dx_set_count (struct dx_entry *entries, unsigned value)
++{
++ ((struct dx_countlimit *) entries)->count.v = cpu_to_le16(value);
++}
++
++static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
++{
++ ((struct dx_countlimit *) entries)->limit.v = cpu_to_le16(value);
++}
++
++static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
++{
++ unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
++ EXT3_DIR_REC_LEN(2) - infosize;
++ return 0? 20: entry_space / sizeof(struct dx_entry);
++}
++
++static inline unsigned dx_node_limit (struct inode *dir)
++{
++ unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
++ return 0? 22: entry_space / sizeof(struct dx_entry);
++}
++
++/*
++ * Debug
++ */
++#ifdef DX_DEBUG
++struct stats
++{
++ unsigned names;
++ unsigned space;
++ unsigned bcount;
++};
++
++static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
++ int size, int show_names)
++{
++ unsigned names = 0, space = 0;
++ char *base = (char *) de;
++ struct dx_hash_info h = *hinfo;
++
++ printk("names: ");
++ while ((char *) de < base + size)
++ {
++ if (de->inode)
++ {
++ if (show_names)
++ {
++ int len = de->name_len;
++ char *name = de->name;
++ while (len--) printk("%c", *name++);
++ ext3fs_dirhash(de->name, de->name_len, &h);
++ printk(":%x.%u ", h.hash,
++ ((char *) de - base));
++ }
++ space += EXT3_DIR_REC_LEN(de->name_len);
++ names++;
++ }
++ de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
++ }
++ printk("(%i)\n", names);
++ return (struct stats) { names, space, 1 };
++}
++
++struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
++ struct dx_entry *entries, int levels)
++{
++ unsigned blocksize = dir->i_sb->s_blocksize;
++ unsigned count = dx_get_count (entries), names = 0, space = 0, i;
++ unsigned bcount = 0;
++ struct buffer_head *bh;
++ int err;
++ printk("%i indexed blocks...\n", count);
++ for (i = 0; i < count; i++, entries++)
++ {
++ u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
++ u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
++ struct stats stats;
++ printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
++ if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
++ stats = levels?
++ dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
++ dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
++ names += stats.names;
++ space += stats.space;
++ bcount += stats.bcount;
++ brelse (bh);
++ }
++ if (bcount)
++ printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ",
++ names, space/bcount,(space/bcount)*100/blocksize);
++ return (struct stats) { names, space, bcount};
++}
++#endif /* DX_DEBUG */
++
++/*
++ * Probe for a directory leaf block to search.
++ *
++ * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
++ * error in the directory index, and the caller should fall back to
++ * searching the directory normally. The callers of dx_probe **MUST**
++ * check for this error code, and make sure it never gets reflected
++ * back to userspace.
++ */
++static struct dx_frame *
++dx_probe(struct dentry *dentry, struct inode *dir,
++ struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
++{
++ unsigned count, indirect;
++ struct dx_entry *at, *entries, *p, *q, *m;
++ struct dx_root *root;
++ struct buffer_head *bh;
++ struct dx_frame *frame = frame_in;
++ u32 hash;
++
++ frame->bh = NULL;
++ if (dentry)
++ dir = dentry->d_parent->d_inode;
++ if (!(bh = ext3_bread (NULL,dir, 0, 0, err)))
++ goto fail;
++ root = (struct dx_root *) bh->b_data;
++ if (root->info.hash_version != DX_HASH_TEA &&
++ root->info.hash_version != DX_HASH_HALF_MD4 &&
++ root->info.hash_version != DX_HASH_LEGACY) {
++ ext3_warning(dir->i_sb, __FUNCTION__,
++ "Unrecognised inode hash code %d",
++ root->info.hash_version);
++ brelse(bh);
++ *err = ERR_BAD_DX_DIR;
++ goto fail;
++ }
++ hinfo->hash_version = root->info.hash_version;
++ hinfo->seed = dir->i_sb->u.ext3_sb.s_hash_seed;
++ if (dentry)
++ ext3fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo);
++ hash = hinfo->hash;
++
++ if (root->info.unused_flags & 1) {
++ ext3_warning(dir->i_sb, __FUNCTION__,
++ "Unimplemented inode hash flags: %#06x",
++ root->info.unused_flags);
++ brelse(bh);
++ *err = ERR_BAD_DX_DIR;
++ goto fail;
++ }
++
++ if ((indirect = root->info.indirect_levels) > 1) {
++ ext3_warning(dir->i_sb, __FUNCTION__,
++ "Unimplemented inode hash depth: %#06x",
++ root->info.indirect_levels);
++ brelse(bh);
++ *err = ERR_BAD_DX_DIR;
++ goto fail;
++ }
++
++ entries = (struct dx_entry *) (((char *)&root->info) +
++ root->info.info_length);
++ assert(dx_get_limit(entries) == dx_root_limit(dir,
++ root->info.info_length));
++ dxtrace (printk("Look up %x", hash));
++ while (1)
++ {
++ count = dx_get_count(entries);
++ assert (count && count <= dx_get_limit(entries));
++ p = entries + 1;
++ q = entries + count - 1;
++ while (p <= q)
++ {
++ m = p + (q - p)/2;
++ dxtrace(printk("."));
++ if (dx_get_hash(m) > hash)
++ q = m - 1;
++ else
++ p = m + 1;
++ }
++
++ if (0) // linear search cross check
++ {
++ unsigned n = count - 1;
++ at = entries;
++ while (n--)
++ {
++ dxtrace(printk(","));
++ if (dx_get_hash(++at) > hash)
++ {
++ at--;
++ break;
++ }
++ }
++ assert (at == p - 1);
++ }
++
++ at = p - 1;
++ dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
++ frame->bh = bh;
++ frame->entries = entries;
++ frame->at = at;
++ if (!indirect--) return frame;
++ if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
++ goto fail2;
++ at = entries = ((struct dx_node *) bh->b_data)->entries;
++ assert (dx_get_limit(entries) == dx_node_limit (dir));
++ frame++;
++ }
++fail2:
++ while (frame >= frame_in) {
++ brelse(frame->bh);
++ frame--;
++ }
++fail:
++ return NULL;
++}
++
++static void dx_release (struct dx_frame *frames)
++{
++ if (frames[0].bh == NULL)
++ return;
++
++ if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
++ brelse(frames[1].bh);
++ brelse(frames[0].bh);
++}
++
++/*
++ * This function increments the frame pointer to search the next leaf
++ * block, and reads in the necessary intervening nodes if the search
++ * should be necessary. Whether or not the search is necessary is
++ * controlled by the hash parameter. If the hash value is even, then
++ * the search is only continued if the next block starts with that
++ * hash value. This is used if we are searching for a specific file.
++ *
++ * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
++ *
++ * This function returns 1 if the caller should continue to search,
++ * or 0 if it should not. If there is an error reading one of the
++ * index blocks, it will return -1.
++ *
++ * If start_hash is non-null, it will be filled in with the starting
++ * hash of the next page.
++ */
++static int ext3_htree_next_block(struct inode *dir, __u32 hash,
++ struct dx_frame *frame,
++ struct dx_frame *frames, int *err,
++ __u32 *start_hash)
++{
++ struct dx_frame *p;
++ struct buffer_head *bh;
++ int num_frames = 0;
++ __u32 bhash;
++
++ *err = ENOENT;
++ p = frame;
++ /*
++ * Find the next leaf page by incrementing the frame pointer.
++ * If we run out of entries in the interior node, loop around and
++ * increment pointer in the parent node. When we break out of
++ * this loop, num_frames indicates the number of interior
++ * nodes need to be read.
++ */
++ while (1) {
++ if (++(p->at) < p->entries + dx_get_count(p->entries))
++ break;
++ if (p == frames)
++ return 0;
++ num_frames++;
++ p--;
++ }
++
++ /*
++ * If the hash is 1, then continue only if the next page has a
++ * continuation hash of any value. This is used for readdir
++ * handling. Otherwise, check to see if the hash matches the
++ * desired contiuation hash. If it doesn't, return since
++ * there's no point to read in the successive index pages.
++ */
++ bhash = dx_get_hash(p->at);
++ if (start_hash)
++ *start_hash = bhash;
++ if ((hash & 1) == 0) {
++ if ((bhash & ~1) != hash)
++ return 0;
++ }
++ /*
++ * If the hash is HASH_NB_ALWAYS, we always go to the next
++ * block so no check is necessary
++ */
++ while (num_frames--) {
++ if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at),
++ 0, err)))
++ return -1; /* Failure */
++ p++;
++ brelse (p->bh);
++ p->bh = bh;
++ p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
++ }
++ return 1;
++}
++
++
++/*
++ * p is at least 6 bytes before the end of page
++ */
++static inline struct ext3_dir_entry_2 *ext3_next_entry(struct ext3_dir_entry_2 *p)
++{
++ return (struct ext3_dir_entry_2 *)((char*)p + le16_to_cpu(p->rec_len));
++}
++
++/*
++ * This function fills a red-black tree with information from a
++ * directory. We start scanning the directory in hash order, starting
++ * at start_hash and start_minor_hash.
++ *
++ * This function returns the number of entries inserted into the tree,
++ * or a negative error code.
++ */
++int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
++ __u32 start_minor_hash, __u32 *next_hash)
++{
++ struct dx_hash_info hinfo;
++ struct buffer_head *bh;
++ struct ext3_dir_entry_2 *de, *top;
++ static struct dx_frame frames[2], *frame;
++ struct inode *dir;
++ int block, err;
++ int count = 0;
++ int ret;
++ __u32 hashval;
++
++ dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
++ start_minor_hash));
++ dir = dir_file->f_dentry->d_inode;
++ hinfo.hash = start_hash;
++ hinfo.minor_hash = 0;
++ frame = dx_probe(0, dir_file->f_dentry->d_inode, &hinfo, frames, &err);
++ if (!frame)
++ return err;
++
++ while (1) {
++ block = dx_get_block(frame->at);
++ dxtrace(printk("Reading block %d\n", block));
++ if (!(bh = ext3_bread (NULL, dir, block, 0, &err)))
++ goto errout;
++
++ de = (struct ext3_dir_entry_2 *) bh->b_data;
++ top = (struct ext3_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize -
++ EXT3_DIR_REC_LEN(0));
++ for (; de < top; de = ext3_next_entry(de)) {
++ ext3fs_dirhash(de->name, de->name_len, &hinfo);
++ if ((hinfo.hash < start_hash) ||
++ ((hinfo.hash == start_hash) &&
++ (hinfo.minor_hash < start_minor_hash)))
++ continue;
++ ext3_htree_store_dirent(dir_file, hinfo.hash,
++ hinfo.minor_hash, de);
++ count++;
++ }
++ brelse (bh);
++ hashval = ~1;
++ ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
++ frame, frames, &err, &hashval);
++ if (next_hash)
++ *next_hash = hashval;
++ if (ret == -1)
++ goto errout;
++ /*
++ * Stop if: (a) there are no more entries, or
++ * (b) we have inserted at least one entry and the
++ * next hash value is not a continuation
++ */
++ if ((ret == 0) ||
++ (count && ((hashval & 1) == 0)))
++ break;
++ }
++ dx_release(frames);
++ dxtrace(printk("Fill tree: returned %d entries\n", count));
++ return count;
++errout:
++ dx_release(frames);
++ return (err);
++}
++
++
++/*
++ * Directory block splitting, compacting
++ */
++
++static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
++ struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
++{
++ int count = 0;
++ char *base = (char *) de;
++ struct dx_hash_info h = *hinfo;
++
++ while ((char *) de < base + size)
++ {
++ if (de->name_len && de->inode) {
++ ext3fs_dirhash(de->name, de->name_len, &h);
++ map_tail--;
++ map_tail->hash = h.hash;
++ map_tail->offs = (u32) ((char *) de - base);
++ count++;
++ }
++ /* XXX: do we need to check rec_len == 0 case? -Chris */
++ de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
++ }
++ return count;
++}
++
++static void dx_sort_map (struct dx_map_entry *map, unsigned count)
++{
++ struct dx_map_entry *p, *q, *top = map + count - 1;
++ int more;
++ /* Combsort until bubble sort doesn't suck */
++ while (count > 2)
++ {
++ count = count*10/13;
++ if (count - 9 < 2) /* 9, 10 -> 11 */
++ count = 11;
++ for (p = top, q = p - count; q >= map; p--, q--)
++ if (p->hash < q->hash)
++ swap(*p, *q);
++ }
++ /* Garden variety bubble sort */
++ do {
++ more = 0;
++ q = top;
++ while (q-- > map)
++ {
++ if (q[1].hash >= q[0].hash)
++ continue;
++ swap(*(q+1), *q);
++ more = 1;
++ }
++ } while(more);
++}
++
++static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
++{
++ struct dx_entry *entries = frame->entries;
++ struct dx_entry *old = frame->at, *new = old + 1;
++ int count = dx_get_count(entries);
++
++ assert(count < dx_get_limit(entries));
++ assert(old < entries + count);
++ memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
++ dx_set_hash(new, hash);
++ dx_set_block(new, block);
++ dx_set_count(entries, count + 1);
++}
++#endif
++
++
++static void ext3_update_dx_flag(struct inode *inode)
++{
++ if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT3_FEATURE_COMPAT_DIR_INDEX))
++ EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
++}
++
+ /*
+ * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
+ *
+@@ -94,6 +724,7 @@ static int inline search_dirblock(struct
+ return 0;
+ }
+
++
+ /*
+ * ext3_find_entry()
+ *
+@@ -105,6 +736,8 @@ static int inline search_dirblock(struct
+ * The returned buffer_head has ->b_count elevated. The caller is expected
+ * to brelse() it when appropriate.
+ */
++
++
+ static struct buffer_head * ext3_find_entry (struct dentry *dentry,
+ struct ext3_dir_entry_2 ** res_dir)
+ {
+@@ -119,12 +752,32 @@ static struct buffer_head * ext3_find_en
+ int num = 0;
+ int nblocks, i, err;
+ struct inode *dir = dentry->d_parent->d_inode;
++ int namelen;
++ const u8 *name;
++ unsigned blocksize;
+
+ *res_dir = NULL;
+ sb = dir->i_sb;
+-
++ blocksize = sb->s_blocksize;
++ namelen = dentry->d_name.len;
++ name = dentry->d_name.name;
++ if (namelen > EXT3_NAME_LEN)
++ return NULL;
++#ifdef CONFIG_EXT3_INDEX
++ if (is_dx(dir)) {
++ bh = ext3_dx_find_entry(dentry, res_dir, &err);
++ /*
++ * On success, or if the error was file not found,
++ * return. Otherwise, fall back to doing a search the
++ * old fashioned way.
++ */
++ if (bh || (err != ERR_BAD_DX_DIR))
++ return bh;
++ dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
++ }
++#endif
+ nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
+- start = dir->u.ext3_i.i_dir_start_lookup;
++ start = EXT3_I(dir)->i_dir_start_lookup;
+ if (start >= nblocks)
+ start = 0;
+ block = start;
+@@ -165,7 +818,7 @@ restart:
+ i = search_dirblock(bh, dir, dentry,
+ block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
+ if (i == 1) {
+- dir->u.ext3_i.i_dir_start_lookup = block;
++ EXT3_I(dir)->i_dir_start_lookup = block;
+ ret = bh;
+ goto cleanup_and_exit;
+ } else {
+@@ -196,6 +849,66 @@ cleanup_and_exit:
+ return ret;
+ }
+
++#ifdef CONFIG_EXT3_INDEX
++static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
++ struct ext3_dir_entry_2 **res_dir, int *err)
++{
++ struct super_block * sb;
++ struct dx_hash_info hinfo;
++ u32 hash;
++ struct dx_frame frames[2], *frame;
++ struct ext3_dir_entry_2 *de, *top;
++ struct buffer_head *bh;
++ unsigned long block;
++ int retval;
++ int namelen = dentry->d_name.len;
++ const u8 *name = dentry->d_name.name;
++ struct inode *dir = dentry->d_parent->d_inode;
++
++ sb = dir->i_sb;
++ if (!(frame = dx_probe (dentry, 0, &hinfo, frames, err)))
++ return NULL;
++ hash = hinfo.hash;
++ do {
++ block = dx_get_block(frame->at);
++ if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
++ goto errout;
++ de = (struct ext3_dir_entry_2 *) bh->b_data;
++ top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
++ EXT3_DIR_REC_LEN(0));
++ for (; de < top; de = ext3_next_entry(de))
++ if (ext3_match (namelen, name, de)) {
++ if (!ext3_check_dir_entry("ext3_find_entry",
++ dir, de, bh,
++ (block<<EXT3_BLOCK_SIZE_BITS(sb))
++ +((char *)de - bh->b_data))) {
++ brelse (bh);
++ goto errout;
++ }
++ *res_dir = de;
++ dx_release (frames);
++ return bh;
++ }
++ brelse (bh);
++ /* Check to see if we should continue to search */
++ retval = ext3_htree_next_block(dir, hash, frame,
++ frames, err, 0);
++ if (retval == -1) {
++ ext3_warning(sb, __FUNCTION__,
++ "error reading index page in directory #%lu",
++ dir->i_ino);
++ goto errout;
++ }
++ } while (retval == 1);
++
++ *err = -ENOENT;
++errout:
++ dxtrace(printk("%s not found\n", name));
++ dx_release (frames);
++ return NULL;
++}
++#endif
++
+ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry)
+ {
+ struct inode * inode;
+@@ -212,8 +925,9 @@ static struct dentry *ext3_lookup(struct
+ brelse (bh);
+ inode = iget(dir->i_sb, ino);
+
+- if (!inode)
++ if (!inode) {
+ return ERR_PTR(-EACCES);
++ }
+ }
+ d_add(dentry, inode);
+ return NULL;
+@@ -237,6 +951,300 @@ static inline void ext3_set_de_type(stru
+ de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+ }
+
++#ifdef CONFIG_EXT3_INDEX
++static struct ext3_dir_entry_2 *
++dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
++{
++ unsigned rec_len = 0;
++
++ while (count--) {
++ struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
++ rec_len = EXT3_DIR_REC_LEN(de->name_len);
++ memcpy (to, de, rec_len);
++ ((struct ext3_dir_entry_2 *) to)->rec_len = rec_len;
++ de->inode = 0;
++ map++;
++ to += rec_len;
++ }
++ return (struct ext3_dir_entry_2 *) (to - rec_len);
++}
++
++static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
++{
++ struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base;
++ unsigned rec_len = 0;
++
++ prev = to = de;
++ while ((char*)de < base + size) {
++ next = (struct ext3_dir_entry_2 *) ((char *) de +
++ le16_to_cpu(de->rec_len));
++ if (de->inode && de->name_len) {
++ rec_len = EXT3_DIR_REC_LEN(de->name_len);
++ if (de > to)
++ memmove(to, de, rec_len);
++ to->rec_len = rec_len;
++ prev = to;
++ to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
++ }
++ de = next;
++ }
++ return prev;
++}
++
++static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
++ struct buffer_head **bh,struct dx_frame *frame,
++ struct dx_hash_info *hinfo, int *error)
++{
++ unsigned blocksize = dir->i_sb->s_blocksize;
++ unsigned count, continued;
++ struct buffer_head *bh2;
++ u32 newblock;
++ u32 hash2;
++ struct dx_map_entry *map;
++ char *data1 = (*bh)->b_data, *data2;
++ unsigned split;
++ struct ext3_dir_entry_2 *de = NULL, *de2;
++ int err;
++
++ bh2 = ext3_append (handle, dir, &newblock, error);
++ if (!(bh2)) {
++ brelse(*bh);
++ *bh = NULL;
++ goto errout;
++ }
++
++ BUFFER_TRACE(*bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, *bh);
++ if (err) {
++ journal_error:
++ brelse(*bh);
++ brelse(bh2);
++ *bh = NULL;
++ ext3_std_error(dir->i_sb, err);
++ goto errout;
++ }
++ BUFFER_TRACE(frame->bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, frame->bh);
++ if (err)
++ goto journal_error;
++
++ data2 = bh2->b_data;
++
++ /* create map in the end of data2 block */
++ map = (struct dx_map_entry *) (data2 + blocksize);
++ count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
++ blocksize, hinfo, map);
++ map -= count;
++ split = count/2; // need to adjust to actual middle
++ dx_sort_map (map, count);
++ hash2 = map[split].hash;
++ continued = hash2 == map[split - 1].hash;
++ dxtrace(printk("Split block %i at %x, %i/%i\n",
++ dx_get_block(frame->at), hash2, split, count-split));
++
++ /* Fancy dance to stay within two buffers */
++ de2 = dx_move_dirents(data1, data2, map + split, count - split);
++ de = dx_pack_dirents(data1,blocksize);
++ de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
++ de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2);
++ dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
++ dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
++
++ /* Which block gets the new entry? */
++ if (hinfo->hash >= hash2)
++ {
++ swap(*bh, bh2);
++ de = de2;
++ }
++ dx_insert_block (frame, hash2 + continued, newblock);
++ err = ext3_journal_dirty_metadata (handle, bh2);
++ if (err)
++ goto journal_error;
++ err = ext3_journal_dirty_metadata (handle, frame->bh);
++ if (err)
++ goto journal_error;
++ brelse (bh2);
++ dxtrace(dx_show_index ("frame", frame->entries));
++errout:
++ return de;
++}
++#endif
++
++
++/*
++ * Add a new entry into a directory (leaf) block. If de is non-NULL,
++ * it points to a directory entry which is guaranteed to be large
++ * enough for new directory entry. If de is NULL, then
++ * add_dirent_to_buf will attempt search the directory block for
++ * space. It will return -ENOSPC if no space is available, and -EIO
++ * and -EEXIST if directory entry already exists.
++ *
++ * NOTE! bh is NOT released in the case where ENOSPC is returned. In
++ * all other cases bh is released.
++ */
++static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
++ struct inode *inode, struct ext3_dir_entry_2 *de,
++ struct buffer_head * bh)
++{
++ struct inode *dir = dentry->d_parent->d_inode;
++ const char *name = dentry->d_name.name;
++ int namelen = dentry->d_name.len;
++ unsigned long offset = 0;
++ unsigned short reclen;
++ int nlen, rlen, err;
++ char *top;
++
++ reclen = EXT3_DIR_REC_LEN(namelen);
++ if (!de) {
++ de = (struct ext3_dir_entry_2 *)bh->b_data;
++ top = bh->b_data + dir->i_sb->s_blocksize - reclen;
++ while ((char *) de <= top) {
++ if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
++ bh, offset)) {
++ brelse (bh);
++ return -EIO;
++ }
++ if (ext3_match (namelen, name, de)) {
++ brelse (bh);
++ return -EEXIST;
++ }
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ if ((de->inode? rlen - nlen: rlen) >= reclen)
++ break;
++ de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
++ offset += rlen;
++ }
++ if ((char *) de > top)
++ return -ENOSPC;
++ }
++ BUFFER_TRACE(bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, bh);
++ if (err) {
++ ext3_std_error(dir->i_sb, err);
++ brelse(bh);
++ return err;
++ }
++
++ /* By now the buffer is marked for journaling */
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ if (de->inode) {
++ struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
++ de1->rec_len = cpu_to_le16(rlen - nlen);
++ de->rec_len = cpu_to_le16(nlen);
++ de = de1;
++ }
++ de->file_type = EXT3_FT_UNKNOWN;
++ if (inode) {
++ de->inode = cpu_to_le32(inode->i_ino);
++ ext3_set_de_type(dir->i_sb, de, inode->i_mode);
++ } else
++ de->inode = 0;
++ de->name_len = namelen;
++ memcpy (de->name, name, namelen);
++ /*
++ * XXX shouldn't update any times until successful
++ * completion of syscall, but too many callers depend
++ * on this.
++ *
++ * XXX similarly, too many callers depend on
++ * ext3_new_inode() setting the times, but error
++ * recovery deletes the inode, so the worst that can
++ * happen is that the times are slightly out of date
++ * and/or different from the directory change time.
++ */
++ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
++ ext3_update_dx_flag(dir);
++ dir->i_version = ++event;
++ ext3_mark_inode_dirty(handle, dir);
++ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
++ err = ext3_journal_dirty_metadata(handle, bh);
++ if (err)
++ ext3_std_error(dir->i_sb, err);
++ brelse(bh);
++ return 0;
++}
++
++#ifdef CONFIG_EXT3_INDEX
++/*
++ * This converts a one block unindexed directory to a 3 block indexed
++ * directory, and adds the dentry to the indexed directory.
++ */
++static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
++ struct inode *inode, struct buffer_head *bh)
++{
++ struct inode *dir = dentry->d_parent->d_inode;
++ const char *name = dentry->d_name.name;
++ int namelen = dentry->d_name.len;
++ struct buffer_head *bh2;
++ struct dx_root *root;
++ struct dx_frame frames[2], *frame;
++ struct dx_entry *entries;
++ struct ext3_dir_entry_2 *de, *de2;
++ char *data1, *top;
++ unsigned len;
++ int retval;
++ unsigned blocksize;
++ struct dx_hash_info hinfo;
++ u32 block;
++
++ blocksize = dir->i_sb->s_blocksize;
++ dxtrace(printk("Creating index\n"));
++ retval = ext3_journal_get_write_access(handle, bh);
++ if (retval) {
++ ext3_std_error(dir->i_sb, retval);
++ brelse(bh);
++ return retval;
++ }
++ root = (struct dx_root *) bh->b_data;
++
++ EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
++ bh2 = ext3_append (handle, dir, &block, &retval);
++ if (!(bh2)) {
++ brelse(bh);
++ return retval;
++ }
++ data1 = bh2->b_data;
++
++ /* The 0th block becomes the root, move the dirents out */
++ de = (struct ext3_dir_entry_2 *) &root->info;
++ len = ((char *) root) + blocksize - (char *) de;
++ memcpy (data1, de, len);
++ de = (struct ext3_dir_entry_2 *) data1;
++ top = data1 + len;
++ while (((char *) de2=(char*)de+le16_to_cpu(de->rec_len)) < top)
++ de = de2;
++ de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
++ /* Initialize the root; the dot dirents already exist */
++ de = (struct ext3_dir_entry_2 *) (&root->dotdot);
++ de->rec_len = cpu_to_le16(blocksize - EXT3_DIR_REC_LEN(2));
++ memset (&root->info, 0, sizeof(root->info));
++ root->info.info_length = sizeof(root->info);
++ root->info.hash_version = dir->i_sb->u.ext3_sb.s_def_hash_version;
++ entries = root->entries;
++ dx_set_block (entries, 1);
++ dx_set_count (entries, 1);
++ dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
++
++ /* Initialize as for dx_probe */
++ hinfo.hash_version = root->info.hash_version;
++ hinfo.seed = dir->i_sb->u.ext3_sb.s_hash_seed;
++ ext3fs_dirhash(name, namelen, &hinfo);
++ frame = frames;
++ frame->entries = entries;
++ frame->at = entries;
++ frame->bh = bh;
++ bh = bh2;
++ de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
++ dx_release (frames);
++ if (!(de))
++ return retval;
++
++ return add_dirent_to_buf(handle, dentry, inode, de, bh);
++}
++#endif
++
+ /*
+ * ext3_add_entry()
+ *
+@@ -247,127 +1255,198 @@ static inline void ext3_set_de_type(stru
+ * may not sleep between calling this and putting something into
+ * the entry, as someone else might have used it while you slept.
+ */
+-
+-/*
+- * AKPM: the journalling code here looks wrong on the error paths
+- */
+ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
+ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+- const char *name = dentry->d_name.name;
+- int namelen = dentry->d_name.len;
+ unsigned long offset;
+- unsigned short rec_len;
+ struct buffer_head * bh;
+- struct ext3_dir_entry_2 * de, * de1;
++ struct ext3_dir_entry_2 *de;
+ struct super_block * sb;
+ int retval;
++#ifdef CONFIG_EXT3_INDEX
++ int dx_fallback=0;
++#endif
++ unsigned blocksize;
++ unsigned nlen, rlen;
++ u32 block, blocks;
+
+ sb = dir->i_sb;
+-
+- if (!namelen)
++ blocksize = sb->s_blocksize;
++ if (!dentry->d_name.len)
+ return -EINVAL;
+- bh = ext3_bread (handle, dir, 0, 0, &retval);
++#ifdef CONFIG_EXT3_INDEX
++ if (is_dx(dir)) {
++ retval = ext3_dx_add_entry(handle, dentry, inode);
++ if (!retval || (retval != ERR_BAD_DX_DIR))
++ return retval;
++ EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
++ dx_fallback++;
++ ext3_mark_inode_dirty(handle, dir);
++ }
++#endif
++ blocks = dir->i_size >> sb->s_blocksize_bits;
++ for (block = 0, offset = 0; block < blocks; block++) {
++ bh = ext3_bread(handle, dir, block, 0, &retval);
++ if(!bh)
++ return retval;
++ retval = add_dirent_to_buf(handle, dentry, inode, 0, bh);
++ if (retval != -ENOSPC)
++ return retval;
++
++#ifdef CONFIG_EXT3_INDEX
++ if (blocks == 1 && !dx_fallback &&
++ EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
++ return make_indexed_dir(handle, dentry, inode, bh);
++#endif
++ brelse(bh);
++ }
++ bh = ext3_append(handle, dir, &block, &retval);
+ if (!bh)
+ return retval;
+- rec_len = EXT3_DIR_REC_LEN(namelen);
+- offset = 0;
+ de = (struct ext3_dir_entry_2 *) bh->b_data;
+- while (1) {
+- if ((char *)de >= sb->s_blocksize + bh->b_data) {
+- brelse (bh);
+- bh = NULL;
+- bh = ext3_bread (handle, dir,
+- offset >> EXT3_BLOCK_SIZE_BITS(sb), 1, &retval);
+- if (!bh)
+- return retval;
+- if (dir->i_size <= offset) {
+- if (dir->i_size == 0) {
+- brelse(bh);
+- return -ENOENT;
+- }
++ de->inode = 0;
++ de->rec_len = cpu_to_le16(rlen = blocksize);
++ nlen = 0;
++ return add_dirent_to_buf(handle, dentry, inode, de, bh);
++}
+
+- ext3_debug ("creating next block\n");
++#ifdef CONFIG_EXT3_INDEX
++/*
++ * Returns 0 for success, or a negative error value
++ */
++static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
++ struct inode *inode)
++{
++ struct dx_frame frames[2], *frame;
++ struct dx_entry *entries, *at;
++ struct dx_hash_info hinfo;
++ struct buffer_head * bh;
++ struct inode *dir = dentry->d_parent->d_inode;
++ struct super_block * sb = dir->i_sb;
++ struct ext3_dir_entry_2 *de;
++ int err;
+
+- BUFFER_TRACE(bh, "get_write_access");
+- ext3_journal_get_write_access(handle, bh);
+- de = (struct ext3_dir_entry_2 *) bh->b_data;
+- de->inode = 0;
+- de->rec_len = le16_to_cpu(sb->s_blocksize);
+- dir->u.ext3_i.i_disksize =
+- dir->i_size = offset + sb->s_blocksize;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- ext3_mark_inode_dirty(handle, dir);
+- } else {
++ frame = dx_probe(dentry, 0, &hinfo, frames, &err);
++ if (!frame)
++ return err;
++ entries = frame->entries;
++ at = frame->at;
+
+- ext3_debug ("skipping to next block\n");
++ if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
++ goto cleanup;
+
+- de = (struct ext3_dir_entry_2 *) bh->b_data;
+- }
+- }
+- if (!ext3_check_dir_entry ("ext3_add_entry", dir, de, bh,
+- offset)) {
+- brelse (bh);
+- return -ENOENT;
+- }
+- if (ext3_match (namelen, name, de)) {
+- brelse (bh);
+- return -EEXIST;
++ BUFFER_TRACE(bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, bh);
++ if (err)
++ goto journal_error;
++
++ err = add_dirent_to_buf(handle, dentry, inode, 0, bh);
++ if (err != -ENOSPC) {
++ bh = 0;
++ goto cleanup;
++ }
++
++ /* Block full, should compress but for now just split */
++ dxtrace(printk("using %u of %u node entries\n",
++ dx_get_count(entries), dx_get_limit(entries)));
++ /* Need to split index? */
++ if (dx_get_count(entries) == dx_get_limit(entries)) {
++ u32 newblock;
++ unsigned icount = dx_get_count(entries);
++ int levels = frame - frames;
++ struct dx_entry *entries2;
++ struct dx_node *node2;
++ struct buffer_head *bh2;
++
++ if (levels && (dx_get_count(frames->entries) ==
++ dx_get_limit(frames->entries))) {
++ ext3_warning(sb, __FUNCTION__,
++ "Directory index full!\n");
++ err = -ENOSPC;
++ goto cleanup;
+ }
+- if ((le32_to_cpu(de->inode) == 0 &&
+- le16_to_cpu(de->rec_len) >= rec_len) ||
+- (le16_to_cpu(de->rec_len) >=
+- EXT3_DIR_REC_LEN(de->name_len) + rec_len)) {
+- BUFFER_TRACE(bh, "get_write_access");
+- ext3_journal_get_write_access(handle, bh);
+- /* By now the buffer is marked for journaling */
+- offset += le16_to_cpu(de->rec_len);
+- if (le32_to_cpu(de->inode)) {
+- de1 = (struct ext3_dir_entry_2 *) ((char *) de +
+- EXT3_DIR_REC_LEN(de->name_len));
+- de1->rec_len =
+- cpu_to_le16(le16_to_cpu(de->rec_len) -
+- EXT3_DIR_REC_LEN(de->name_len));
+- de->rec_len = cpu_to_le16(
+- EXT3_DIR_REC_LEN(de->name_len));
+- de = de1;
++ bh2 = ext3_append (handle, dir, &newblock, &err);
++ if (!(bh2))
++ goto cleanup;
++ node2 = (struct dx_node *)(bh2->b_data);
++ entries2 = node2->entries;
++ node2->fake.rec_len = cpu_to_le16(sb->s_blocksize);
++ node2->fake.inode = 0;
++ BUFFER_TRACE(frame->bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, frame->bh);
++ if (err)
++ goto journal_error;
++ if (levels) {
++ unsigned icount1 = icount/2, icount2 = icount - icount1;
++ unsigned hash2 = dx_get_hash(entries + icount1);
++ dxtrace(printk("Split index %i/%i\n", icount1, icount2));
++
++ BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
++ err = ext3_journal_get_write_access(handle,
++ frames[0].bh);
++ if (err)
++ goto journal_error;
++
++ memcpy ((char *) entries2, (char *) (entries + icount1),
++ icount2 * sizeof(struct dx_entry));
++ dx_set_count (entries, icount1);
++ dx_set_count (entries2, icount2);
++ dx_set_limit (entries2, dx_node_limit(dir));
++
++ /* Which index block gets the new entry? */
++ if (at - entries >= icount1) {
++ frame->at = at = at - entries - icount1 + entries2;
++ frame->entries = entries = entries2;
++ swap(frame->bh, bh2);
+ }
+- de->file_type = EXT3_FT_UNKNOWN;
+- if (inode) {
+- de->inode = cpu_to_le32(inode->i_ino);
+- ext3_set_de_type(dir->i_sb, de, inode->i_mode);
+- } else
+- de->inode = 0;
+- de->name_len = namelen;
+- memcpy (de->name, name, namelen);
+- /*
+- * XXX shouldn't update any times until successful
+- * completion of syscall, but too many callers depend
+- * on this.
+- *
+- * XXX similarly, too many callers depend on
+- * ext3_new_inode() setting the times, but error
+- * recovery deletes the inode, so the worst that can
+- * happen is that the times are slightly out of date
+- * and/or different from the directory change time.
+- */
+- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- dir->i_version = ++event;
+- ext3_mark_inode_dirty(handle, dir);
+- BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+- ext3_journal_dirty_metadata(handle, bh);
+- brelse(bh);
+- return 0;
++ dx_insert_block (frames + 0, hash2, newblock);
++ dxtrace(dx_show_index ("node", frames[1].entries));
++ dxtrace(dx_show_index ("node",
++ ((struct dx_node *) bh2->b_data)->entries));
++ err = ext3_journal_dirty_metadata(handle, bh2);
++ if (err)
++ goto journal_error;
++ brelse (bh2);
++ } else {
++ dxtrace(printk("Creating second level index...\n"));
++ memcpy((char *) entries2, (char *) entries,
++ icount * sizeof(struct dx_entry));
++ dx_set_limit(entries2, dx_node_limit(dir));
++
++ /* Set up root */
++ dx_set_count(entries, 1);
++ dx_set_block(entries + 0, newblock);
++ ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
++
++ /* Add new access path frame */
++ frame = frames + 1;
++ frame->at = at = at - entries + entries2;
++ frame->entries = entries = entries2;
++ frame->bh = bh2;
++ err = ext3_journal_get_write_access(handle,
++ frame->bh);
++ if (err)
++ goto journal_error;
+ }
+- offset += le16_to_cpu(de->rec_len);
+- de = (struct ext3_dir_entry_2 *)
+- ((char *) de + le16_to_cpu(de->rec_len));
++ ext3_journal_dirty_metadata(handle, frames[0].bh);
+ }
+- brelse (bh);
+- return -ENOSPC;
++ de = do_split(handle, dir, &bh, frame, &hinfo, &err);
++ if (!de)
++ goto cleanup;
++ err = add_dirent_to_buf(handle, dentry, inode, de, bh);
++ bh = 0;
++ goto cleanup;
++
++journal_error:
++ ext3_std_error(dir->i_sb, err);
++cleanup:
++ if (bh)
++ brelse(bh);
++ dx_release(frames);
++ return err;
+ }
++#endif
+
+ /*
+ * ext3_delete_entry deletes a directory entry by merging it with the
+@@ -451,9 +1530,11 @@ static int ext3_create (struct inode * d
+ struct inode * inode;
+ int err;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -478,9 +1559,11 @@ static int ext3_mknod (struct inode * di
+ struct inode *inode;
+ int err;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -507,9 +1590,11 @@ static int ext3_mkdir(struct inode * dir
+ if (dir->i_nlink >= EXT3_LINK_MAX)
+ return -EMLINK;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -521,7 +1606,7 @@ static int ext3_mkdir(struct inode * dir
+
+ inode->i_op = &ext3_dir_inode_operations;
+ inode->i_fop = &ext3_dir_operations;
+- inode->i_size = inode->u.ext3_i.i_disksize = inode->i_sb->s_blocksize;
++ inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+ inode->i_blocks = 0;
+ dir_block = ext3_bread (handle, inode, 0, 1, &err);
+ if (!dir_block) {
+@@ -554,21 +1639,19 @@ static int ext3_mkdir(struct inode * dir
+ inode->i_mode |= S_ISGID;
+ ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_entry (handle, dentry, inode);
+- if (err)
+- goto out_no_entry;
++ if (err) {
++ inode->i_nlink = 0;
++ ext3_mark_inode_dirty(handle, inode);
++ iput (inode);
++ goto out_stop;
++ }
+ dir->i_nlink++;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+ d_instantiate(dentry, inode);
+ out_stop:
+ ext3_journal_stop(handle, dir);
+ return err;
+-
+-out_no_entry:
+- inode->i_nlink = 0;
+- ext3_mark_inode_dirty(handle, inode);
+- iput (inode);
+- goto out_stop;
+ }
+
+ /*
+@@ -655,7 +1738,7 @@ int ext3_orphan_add(handle_t *handle, st
+ int err = 0, rc;
+
+ lock_super(sb);
+- if (!list_empty(&inode->u.ext3_i.i_orphan))
++ if (!list_empty(&EXT3_I(inode)->i_orphan))
+ goto out_unlock;
+
+ /* Orphan handling is only valid for files with data blocks
+@@ -696,7 +1779,7 @@ int ext3_orphan_add(handle_t *handle, st
+ * This is safe: on error we're going to ignore the orphan list
+ * anyway on the next recovery. */
+ if (!err)
+- list_add(&inode->u.ext3_i.i_orphan, &EXT3_SB(sb)->s_orphan);
++ list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
+
+ jbd_debug(4, "superblock will point to %ld\n", inode->i_ino);
+ jbd_debug(4, "orphan inode %ld will point to %d\n",
+@@ -714,25 +1797,26 @@ out_unlock:
+ int ext3_orphan_del(handle_t *handle, struct inode *inode)
+ {
+ struct list_head *prev;
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ struct ext3_sb_info *sbi;
+ ino_t ino_next;
+ struct ext3_iloc iloc;
+ int err = 0;
+
+ lock_super(inode->i_sb);
+- if (list_empty(&inode->u.ext3_i.i_orphan)) {
++ if (list_empty(&ei->i_orphan)) {
+ unlock_super(inode->i_sb);
+ return 0;
+ }
+
+ ino_next = NEXT_ORPHAN(inode);
+- prev = inode->u.ext3_i.i_orphan.prev;
++ prev = ei->i_orphan.prev;
+ sbi = EXT3_SB(inode->i_sb);
+
+ jbd_debug(4, "remove inode %ld from orphan list\n", inode->i_ino);
+
+- list_del(&inode->u.ext3_i.i_orphan);
+- INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
++ list_del(&ei->i_orphan);
++ INIT_LIST_HEAD(&ei->i_orphan);
+
+ /* If we're on an error path, we may not have a valid
+ * transaction handle with which to update the orphan list on
+@@ -793,8 +1877,9 @@ static int ext3_rmdir (struct inode * di
+ handle_t *handle;
+
+ handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ retval = -ENOENT;
+ bh = ext3_find_entry (dentry, &de);
+@@ -832,7 +1917,7 @@ static int ext3_rmdir (struct inode * di
+ dir->i_nlink--;
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ ext3_mark_inode_dirty(handle, inode);
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+
+ end_rmdir:
+@@ -850,8 +1935,9 @@ static int ext3_unlink(struct inode * di
+ handle_t *handle;
+
+ handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -878,7 +1964,7 @@ static int ext3_unlink(struct inode * di
+ if (retval)
+ goto end_unlink;
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+ inode->i_nlink--;
+ if (!inode->i_nlink)
+@@ -904,9 +1990,11 @@ static int ext3_symlink (struct inode *
+ if (l > dir->i_sb->s_blocksize)
+ return -ENAMETOOLONG;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 5);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -916,7 +2004,7 @@ static int ext3_symlink (struct inode *
+ if (IS_ERR(inode))
+ goto out_stop;
+
+- if (l > sizeof (inode->u.ext3_i.i_data)) {
++ if (l > sizeof (EXT3_I(inode)->i_data)) {
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+ /*
+@@ -925,25 +2013,23 @@ static int ext3_symlink (struct inode *
+ * i_size in generic_commit_write().
+ */
+ err = block_symlink(inode, symname, l);
+- if (err)
+- goto out_no_entry;
++ if (err) {
++ ext3_dec_count(handle, inode);
++ ext3_mark_inode_dirty(handle, inode);
++ iput (inode);
++ goto out_stop;
++ }
+ } else {
+ inode->i_op = &ext3_fast_symlink_inode_operations;
+- memcpy((char*)&inode->u.ext3_i.i_data,symname,l);
++ memcpy((char*)&EXT3_I(inode)->i_data,symname,l);
+ inode->i_size = l-1;
+ }
+- inode->u.ext3_i.i_disksize = inode->i_size;
++ EXT3_I(inode)->i_disksize = inode->i_size;
+ err = ext3_add_nondir(handle, dentry, inode);
+ ext3_mark_inode_dirty(handle, inode);
+ out_stop:
+ ext3_journal_stop(handle, dir);
+ return err;
+-
+-out_no_entry:
+- ext3_dec_count(handle, inode);
+- ext3_mark_inode_dirty(handle, inode);
+- iput (inode);
+- goto out_stop;
+ }
+
+ static int ext3_link (struct dentry * old_dentry,
+@@ -956,12 +2042,15 @@ static int ext3_link (struct dentry * ol
+ if (S_ISDIR(inode->i_mode))
+ return -EPERM;
+
+- if (inode->i_nlink >= EXT3_LINK_MAX)
++ if (inode->i_nlink >= EXT3_LINK_MAX) {
+ return -EMLINK;
++ }
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -995,9 +2084,11 @@ static int ext3_rename (struct inode * o
+
+ old_bh = new_bh = dir_bh = NULL;
+
+- handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS + 2);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(old_dir) || IS_SYNC(new_dir))
+ handle->h_sync = 1;
+@@ -1077,7 +2168,7 @@ static int ext3_rename (struct inode * o
+ new_inode->i_ctime = CURRENT_TIME;
+ }
+ old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+- old_dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(old_dir);
+ if (dir_bh) {
+ BUFFER_TRACE(dir_bh, "get_write_access");
+ ext3_journal_get_write_access(handle, dir_bh);
+@@ -1089,7 +2180,7 @@ static int ext3_rename (struct inode * o
+ new_inode->i_nlink--;
+ } else {
+ new_dir->i_nlink++;
+- new_dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(new_dir);
+ ext3_mark_inode_dirty(handle, new_dir);
+ }
+ }
+--- linux-2.4.20/fs/ext3/super.c~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/fs/ext3/super.c Sat Apr 5 03:56:31 2003
+@@ -707,6 +707,7 @@ static int ext3_setup_super(struct super
+ es->s_mtime = cpu_to_le32(CURRENT_TIME);
+ ext3_update_dynamic_rev(sb);
+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
++
+ ext3_commit_super (sb, es, 1);
+ if (test_opt (sb, DEBUG))
+ printk (KERN_INFO
+@@ -717,6 +718,7 @@ static int ext3_setup_super(struct super
+ EXT3_BLOCKS_PER_GROUP(sb),
+ EXT3_INODES_PER_GROUP(sb),
+ sbi->s_mount_opt);
++
+ printk(KERN_INFO "EXT3 FS " EXT3FS_VERSION ", " EXT3FS_DATE " on %s, ",
+ bdevname(sb->s_dev));
+ if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
+@@ -890,6 +892,7 @@ static loff_t ext3_max_size(int bits)
+ return res;
+ }
+
++
+ struct super_block * ext3_read_super (struct super_block * sb, void * data,
+ int silent)
+ {
+@@ -1066,6 +1069,9 @@ struct super_block * ext3_read_super (st
+ sbi->s_mount_state = le16_to_cpu(es->s_state);
+ sbi->s_addr_per_block_bits = log2(EXT3_ADDR_PER_BLOCK(sb));
+ sbi->s_desc_per_block_bits = log2(EXT3_DESC_PER_BLOCK(sb));
++ for (i=0; i < 4; i++)
++ sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
++ sbi->s_def_hash_version = es->s_def_hash_version;
+
+ if (sbi->s_blocks_per_group > blocksize * 8) {
+ printk (KERN_ERR
+@@ -1769,6 +1775,7 @@ static void __exit exit_ext3_fs(void)
+ unregister_filesystem(&ext3_fs_type);
+ }
+
++EXPORT_SYMBOL(ext3_force_commit);
+ EXPORT_SYMBOL(ext3_bread);
+
+ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+--- linux-2.4.20/include/linux/ext3_fs.h~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/include/linux/ext3_fs.h Sat Apr 5 03:56:31 2003
+@@ -40,6 +40,11 @@
+ #define EXT3FS_VERSION "2.4-0.9.19"
+
+ /*
++ * Always enable hashed directories
++ */
++#define CONFIG_EXT3_INDEX
++
++/*
+ * Debug code
+ */
+ #ifdef EXT3FS_DEBUG
+@@ -437,8 +442,11 @@ struct ext3_super_block {
+ /*E0*/ __u32 s_journal_inum; /* inode number of journal file */
+ __u32 s_journal_dev; /* device number of journal file */
+ __u32 s_last_orphan; /* start of list of inodes to delete */
+-
+-/*EC*/ __u32 s_reserved[197]; /* Padding to the end of the block */
++ __u32 s_hash_seed[4]; /* HTREE hash seed */
++ __u8 s_def_hash_version; /* Default hash version to use */
++ __u8 s_reserved_char_pad;
++ __u16 s_reserved_word_pad;
++ __u32 s_reserved[192]; /* Padding to the end of the block */
+ };
+
+ #ifdef __KERNEL__
+@@ -575,9 +583,46 @@ struct ext3_dir_entry_2 {
+ #define EXT3_DIR_ROUND (EXT3_DIR_PAD - 1)
+ #define EXT3_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT3_DIR_ROUND) & \
+ ~EXT3_DIR_ROUND)
++/*
++ * Hash Tree Directory indexing
++ * (c) Daniel Phillips, 2001
++ */
++
++#ifdef CONFIG_EXT3_INDEX
++ #define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
++ EXT3_FEATURE_COMPAT_DIR_INDEX) && \
++ (EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
++#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
++#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
++#else
++ #define is_dx(dir) 0
++#define EXT3_DIR_LINK_MAX(dir) ((dir)->i_nlink >= EXT3_LINK_MAX)
++#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2)
++#endif
++
++/* Legal values for the dx_root hash_version field: */
++
++#define DX_HASH_LEGACY 0
++#define DX_HASH_HALF_MD4 1
++#define DX_HASH_TEA 2
++
++/* hash info structure used by the directory hash */
++struct dx_hash_info
++{
++ u32 hash;
++ u32 minor_hash;
++ int hash_version;
++ u32 *seed;
++};
+
+ #ifdef __KERNEL__
+ /*
++ * Control parameters used by ext3_htree_next_block
++ */
++#define HASH_NB_ALWAYS 1
++
++
++/*
+ * Describe an inode's exact location on disk and in memory
+ */
+ struct ext3_iloc
+@@ -587,6 +632,27 @@ struct ext3_iloc
+ unsigned long block_group;
+ };
+
++
++/*
++ * This structure is stuffed into the struct file's private_data field
++ * for directories. It is where we put information so that we can do
++ * readdir operations in hash tree order.
++ */
++struct dir_private_info {
++ rb_root_t root;
++ rb_node_t *curr_node;
++ struct fname *extra_fname;
++ loff_t last_pos;
++ __u32 curr_hash;
++ __u32 curr_minor_hash;
++ __u32 next_hash;
++};
++
++/*
++ * Special error return code only used by dx_probe() and its callers.
++ */
++#define ERR_BAD_DX_DIR -75000
++
+ /*
+ * Function prototypes
+ */
+@@ -614,11 +680,20 @@ extern struct ext3_group_desc * ext3_get
+
+ /* dir.c */
+ extern int ext3_check_dir_entry(const char *, struct inode *,
+- struct ext3_dir_entry_2 *, struct buffer_head *,
+- unsigned long);
++ struct ext3_dir_entry_2 *,
++ struct buffer_head *, unsigned long);
++extern void ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
++ __u32 minor_hash,
++ struct ext3_dir_entry_2 *dirent);
++extern void ext3_htree_free_dir_info(struct dir_private_info *p);
++
+ /* fsync.c */
+ extern int ext3_sync_file (struct file *, struct dentry *, int);
+
++/* hash.c */
++extern int ext3fs_dirhash(const char *name, int len, struct
++ dx_hash_info *hinfo);
++
+ /* ialloc.c */
+ extern struct inode * ext3_new_inode (handle_t *, const struct inode *, int);
+ extern void ext3_free_inode (handle_t *, struct inode *);
+@@ -650,6 +725,8 @@ extern int ext3_ioctl (struct inode *, s
+ /* namei.c */
+ extern int ext3_orphan_add(handle_t *, struct inode *);
+ extern int ext3_orphan_del(handle_t *, struct inode *);
++extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
++ __u32 start_minor_hash, __u32 *next_hash);
+
+ /* super.c */
+ extern void ext3_error (struct super_block *, const char *, const char *, ...)
+--- linux-2.4.20/include/linux/ext3_fs_sb.h~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/include/linux/ext3_fs_sb.h Sat Apr 5 03:56:31 2003
+@@ -62,6 +62,8 @@ struct ext3_sb_info {
+ int s_inode_size;
+ int s_first_ino;
+ u32 s_next_generation;
++ u32 s_hash_seed[4];
++ int s_def_hash_version;
+
+ /* Journaling */
+ struct inode * s_journal_inode;
+--- linux-2.4.20/include/linux/ext3_jbd.h~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/include/linux/ext3_jbd.h Sat Apr 5 03:56:31 2003
+@@ -63,6 +63,8 @@ extern int ext3_writepage_trans_blocks(s
+
+ #define EXT3_RESERVE_TRANS_BLOCKS 12
+
++#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
++
+ int
+ ext3_mark_iloc_dirty(handle_t *handle,
+ struct inode *inode,
+--- linux-2.4.20/include/linux/rbtree.h~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/include/linux/rbtree.h Sat Apr 5 03:56:31 2003
+@@ -120,6 +120,8 @@ rb_root_t;
+
+ extern void rb_insert_color(rb_node_t *, rb_root_t *);
+ extern void rb_erase(rb_node_t *, rb_root_t *);
++extern rb_node_t *rb_get_first(rb_root_t *root);
++extern rb_node_t *rb_get_next(rb_node_t *n);
+
+ static inline void rb_link_node(rb_node_t * node, rb_node_t * parent, rb_node_t ** rb_link)
+ {
+--- linux-2.4.20/lib/rbtree.c~ext-2.4-patch-1 Sat Apr 5 03:56:31 2003
++++ linux-2.4.20-braam/lib/rbtree.c Sat Apr 5 03:56:31 2003
+@@ -17,6 +17,8 @@
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/lib/rbtree.c
++
++ rb_get_first and rb_get_next written by Theodore Ts'o, 9/8/2002
+ */
+
+ #include <linux/rbtree.h>
+@@ -294,3 +296,43 @@ void rb_erase(rb_node_t * node, rb_root_
+ __rb_erase_color(child, parent, root);
+ }
+ EXPORT_SYMBOL(rb_erase);
++
++/*
++ * This function returns the first node (in sort order) of the tree.
++ */
++rb_node_t *rb_get_first(rb_root_t *root)
++{
++ rb_node_t *n;
++
++ n = root->rb_node;
++ if (!n)
++ return 0;
++ while (n->rb_left)
++ n = n->rb_left;
++ return n;
++}
++EXPORT_SYMBOL(rb_get_first);
++
++/*
++ * Given a node, this function will return the next node in the tree.
++ */
++rb_node_t *rb_get_next(rb_node_t *n)
++{
++ rb_node_t *parent;
++
++ if (n->rb_right) {
++ n = n->rb_right;
++ while (n->rb_left)
++ n = n->rb_left;
++ return n;
++ } else {
++ while ((parent = n->rb_parent)) {
++ if (n == parent->rb_left)
++ return parent;
++ n = parent;
++ }
++ return 0;
++ }
++}
++EXPORT_SYMBOL(rb_get_next);
++
+
+_
--- /dev/null
+# This is a BitKeeper generated patch for the following project:
+# Project Name: Linux kernel tree
+#
+# namei.c | 9 +++++++++
+# 1 files changed, 9 insertions(+)
+#
+# The following is the BitKeeper ChangeSet Log
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.777
+# Add '.' and '..' entries to be returned by readdir of htree directories
+#
+# This patch from Chris Li adds '.' and '..' to the rbtree so that they
+# are properly returned by readdir.
+# --------------------------------------------
+#
+diff -Nru a/fs/ext3/namei.c b/fs/ext3/namei.c
+--- a/fs/ext3/namei.c Thu Nov 7 10:57:30 2002
++++ b/fs/ext3/namei.c Thu Nov 7 10:57:30 2002
+@@ -546,6 +546,15 @@
+ if (!frame)
+ return err;
+
++ /* Add '.' and '..' from the htree header */
++ if (!start_hash && !start_minor_hash) {
++ de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
++ ext3_htree_store_dirent(dir_file, 0, 0, de);
++ de = ext3_next_entry(de);
++ ext3_htree_store_dirent(dir_file, 0, 0, de);
++ count += 2;
++ }
++
+ while (1) {
+ block = dx_get_block(frame->at);
+ dxtrace(printk("Reading block %d\n", block));
--- /dev/null
+# This is a BitKeeper generated patch for the following project:
+# Project Name: Linux kernel tree
+#
+# fs/ext3/dir.c | 7 +++++--
+# fs/ext3/namei.c | 11 +++++++----
+# include/linux/ext3_fs.h | 2 +-
+# 3 files changed, 13 insertions(+), 7 deletions(-)
+#
+# The following is the BitKeeper ChangeSet Log
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.778
+# Check for failed kmalloc() in ext3_htree_store_dirent()
+#
+# This patch checks for a failed kmalloc() in ext3_htree_store_dirent(),
+# and passes the error up to its caller, ext3_htree_fill_tree().
+# --------------------------------------------
+#
+diff -Nru a/fs/ext3/dir.c b/fs/ext3/dir.c
+--- a/fs/ext3/dir.c Thu Nov 7 10:57:34 2002
++++ b/fs/ext3/dir.c Thu Nov 7 10:57:34 2002
+@@ -308,7 +308,7 @@
+ /*
+ * Given a directory entry, enter it into the fname rb tree.
+ */
+-void ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
++int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
+ __u32 minor_hash,
+ struct ext3_dir_entry_2 *dirent)
+ {
+@@ -323,6 +323,8 @@
+ /* Create and allocate the fname structure */
+ len = sizeof(struct fname) + dirent->name_len + 1;
+ new_fn = kmalloc(len, GFP_KERNEL);
++ if (!new_fn)
++ return -ENOMEM;
+ memset(new_fn, 0, len);
+ new_fn->hash = hash;
+ new_fn->minor_hash = minor_hash;
+@@ -344,7 +346,7 @@
+ (new_fn->minor_hash == fname->minor_hash)) {
+ new_fn->next = fname->next;
+ fname->next = new_fn;
+- return;
++ return 0;
+ }
+
+ if (new_fn->hash < fname->hash)
+@@ -359,6 +361,7 @@
+
+ rb_link_node(&new_fn->rb_hash, parent, p);
+ rb_insert_color(&new_fn->rb_hash, &info->root);
++ return 0;
+ }
+
+
+diff -Nru a/fs/ext3/namei.c b/fs/ext3/namei.c
+--- a/fs/ext3/namei.c Thu Nov 7 10:57:34 2002
++++ b/fs/ext3/namei.c Thu Nov 7 10:57:34 2002
+@@ -549,9 +549,11 @@
+ /* Add '.' and '..' from the htree header */
+ if (!start_hash && !start_minor_hash) {
+ de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
+- ext3_htree_store_dirent(dir_file, 0, 0, de);
++ if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
++ goto errout;
+ de = ext3_next_entry(de);
+- ext3_htree_store_dirent(dir_file, 0, 0, de);
++ if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
++ goto errout;
+ count += 2;
+ }
+
+@@ -570,8 +572,9 @@
+ ((hinfo.hash == start_hash) &&
+ (hinfo.minor_hash < start_minor_hash)))
+ continue;
+- ext3_htree_store_dirent(dir_file, hinfo.hash,
+- hinfo.minor_hash, de);
++ if ((err = ext3_htree_store_dirent(dir_file,
++ hinfo.hash, hinfo.minor_hash, de)) != 0)
++ goto errout;
+ count++;
+ }
+ brelse (bh);
+diff -Nru a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
+--- a/include/linux/ext3_fs.h Thu Nov 7 10:57:34 2002
++++ b/include/linux/ext3_fs.h Thu Nov 7 10:57:34 2002
+@@ -682,7 +682,7 @@
+ extern int ext3_check_dir_entry(const char *, struct inode *,
+ struct ext3_dir_entry_2 *,
+ struct buffer_head *, unsigned long);
+-extern void ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
++extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
+ __u32 minor_hash,
+ struct ext3_dir_entry_2 *dirent);
+ extern void ext3_htree_free_dir_info(struct dir_private_info *p);
--- /dev/null
+# This is a BitKeeper generated patch for the following project:
+# Project Name: Linux kernel tree
+#
+# namei.c | 21 ++++++++++++++++++++-
+# 1 files changed, 20 insertions(+), 1 deletion(-)
+#
+# The following is the BitKeeper ChangeSet Log
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.779
+# Fix ext3 htree rename bug.
+#
+# This fixes an ext3 htree bug pointed out by Christopher Li; if
+# adding the new name to the directory causes a split, this can cause
+# the directory entry containing the old name to move to another
+# block, and then the removal of the old name will fail.
+# --------------------------------------------
+#
+diff -Nru a/fs/ext3/namei.c b/fs/ext3/namei.c
+--- a/fs/ext3/namei.c Thu Nov 7 10:57:49 2002
++++ b/fs/ext3/namei.c Thu Nov 7 10:57:49 2002
+@@ -2173,7 +2173,26 @@
+ /*
+ * ok, that's it
+ */
+- ext3_delete_entry(handle, old_dir, old_de, old_bh);
++ retval = ext3_delete_entry(handle, old_dir, old_de, old_bh);
++ if (retval == -ENOENT) {
++ /*
++ * old_de could have moved out from under us.
++ */
++ struct buffer_head *old_bh2;
++ struct ext3_dir_entry_2 *old_de2;
++
++ old_bh2 = ext3_find_entry(old_dentry, &old_de2);
++ if (old_bh2) {
++ retval = ext3_delete_entry(handle, old_dir,
++ old_de2, old_bh2);
++ brelse(old_bh2);
++ }
++ }
++ if (retval) {
++ ext3_warning(old_dir->i_sb, "ext3_rename",
++ "Deleting old file (%lu), %d, error=%d",
++ old_dir->i_ino, old_dir->i_nlink, retval);
++ }
+
+ if (new_inode) {
+ new_inode->i_nlink--;
--- /dev/null
+--- linux/fs/ext3/ialloc.c.orig Sat Oct 19 11:42:23 2002
++++ linux/fs/ext3/ialloc.c Sat Jan 4 12:14:18 2003
+@@ -64,8 +64,8 @@ static int read_inode_bitmap (struct sup
+ if (!bh) {
+ ext3_error (sb, "read_inode_bitmap",
+ "Cannot read inode bitmap - "
+- "block_group = %lu, inode_bitmap = %lu",
+- block_group, (unsigned long) gdp->bg_inode_bitmap);
++ "block_group = %lu, inode_bitmap = %u",
++ block_group, gdp->bg_inode_bitmap);
+ retval = -EIO;
+ }
+ /*
+@@ -531,19 +532,19 @@ out:
+ }
+
+ /* Verify that we are loading a valid orphan from disk */
+-struct inode *ext3_orphan_get (struct super_block * sb, ino_t ino)
++struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
+ {
+- ino_t max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
++ unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
+ unsigned long block_group;
+ int bit;
+ int bitmap_nr;
+ struct buffer_head *bh;
+ struct inode *inode = NULL;
+-
++
+ /* Error cases - e2fsck has already cleaned up for us */
+ if (ino > max_ino) {
+ ext3_warning(sb, __FUNCTION__,
+- "bad orphan ino %ld! e2fsck was run?\n", ino);
++ "bad orphan ino %lu! e2fsck was run?\n", ino);
+ return NULL;
+ }
+
+@@ -552,7 +553,7 @@ struct inode *ext3_orphan_get (struct su
+ if ((bitmap_nr = load_inode_bitmap(sb, block_group)) < 0 ||
+ !(bh = EXT3_SB(sb)->s_inode_bitmap[bitmap_nr])) {
+ ext3_warning(sb, __FUNCTION__,
+- "inode bitmap error for orphan %ld\n", ino);
++ "inode bitmap error for orphan %lu\n", ino);
+ return NULL;
+ }
+
+@@ -563,7 +564,7 @@ struct inode *ext3_orphan_get (struct su
+ if (!ext3_test_bit(bit, bh->b_data) || !(inode = iget(sb, ino)) ||
+ is_bad_inode(inode) || NEXT_ORPHAN(inode) > max_ino) {
+ ext3_warning(sb, __FUNCTION__,
+- "bad orphan inode %ld! e2fsck was run?\n", ino);
++ "bad orphan inode %lu! e2fsck was run?\n", ino);
+ printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%ld) = %d\n",
+ bit, bh->b_blocknr, ext3_test_bit(bit, bh->b_data));
+ printk(KERN_NOTICE "inode=%p\n", inode);
+@@ -570,9 +571,9 @@ struct inode *ext3_orphan_get (struct su
+ if (inode) {
+ printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
+ is_bad_inode(inode));
+- printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%d\n",
++ printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
+ NEXT_ORPHAN(inode));
+- printk(KERN_NOTICE "max_ino=%ld\n", max_ino);
++ printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
+ }
+ /* Avoid freeing blocks if we got a bad deleted inode */
+ if (inode && inode->i_nlink == 0)
+--- linux/fs/ext3/namei.c.orig Sat Oct 19 11:42:45 2002
++++ linux/fs/ext3/namei.c Sat Jan 4 12:13:27 2003
+@@ -716,10 +716,10 @@ int ext3_orphan_del(handle_t *handle, st
+ {
+ struct list_head *prev;
+ struct ext3_sb_info *sbi;
+- ino_t ino_next;
++ unsigned long ino_next;
+ struct ext3_iloc iloc;
+ int err = 0;
+-
++
+ lock_super(inode->i_sb);
+ if (list_empty(&inode->u.ext3_i.i_orphan)) {
+ unlock_super(inode->i_sb);
+@@ -730,7 +730,7 @@ int ext3_orphan_del(handle_t *handle, st
+ prev = inode->u.ext3_i.i_orphan.prev;
+ sbi = EXT3_SB(inode->i_sb);
+
+- jbd_debug(4, "remove inode %ld from orphan list\n", inode->i_ino);
++ jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
+
+ list_del(&inode->u.ext3_i.i_orphan);
+ INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
+@@ -741,13 +741,13 @@ int ext3_orphan_del(handle_t *handle, st
+ * list in memory. */
+ if (!handle)
+ goto out;
+-
++
+ err = ext3_reserve_inode_write(handle, inode, &iloc);
+ if (err)
+ goto out_err;
+
+ if (prev == &sbi->s_orphan) {
+- jbd_debug(4, "superblock will point to %ld\n", ino_next);
++ jbd_debug(4, "superblock will point to %lu\n", ino_next);
+ BUFFER_TRACE(sbi->s_sbh, "get_write_access");
+ err = ext3_journal_get_write_access(handle, sbi->s_sbh);
+ if (err)
+@@ -758,8 +758,8 @@ int ext3_orphan_del(handle_t *handle, st
+ struct ext3_iloc iloc2;
+ struct inode *i_prev =
+ list_entry(prev, struct inode, u.ext3_i.i_orphan);
+-
+- jbd_debug(4, "orphan inode %ld will point to %ld\n",
++
++ jbd_debug(4, "orphan inode %lu will point to %lu\n",
+ i_prev->i_ino, ino_next);
+ err = ext3_reserve_inode_write(handle, i_prev, &iloc2);
+ if (err)
+@@ -774,7 +774,7 @@ int ext3_orphan_del(handle_t *handle, st
+ if (err)
+ goto out_brelse;
+
+-out_err:
++out_err:
+ ext3_std_error(inode->i_sb, err);
+ out:
+ unlock_super(inode->i_sb);
+--- linux/include/linux/ext3_fs.h.orig Thu Jan 2 16:10:24 2003
++++ linux/include/linux/ext3_fs.h Sat Jan 4 12:25:41 2003
+@@ -622,7 +622,7 @@ extern int ext3_sync_file (struct file *
+ /* ialloc.c */
+ extern struct inode * ext3_new_inode (handle_t *, const struct inode *, int);
+ extern void ext3_free_inode (handle_t *, struct inode *);
+-extern struct inode * ext3_orphan_get (struct super_block *, ino_t);
++extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
+ extern unsigned long ext3_count_free_inodes (struct super_block *);
+ extern void ext3_check_inodes_bitmap (struct super_block *);
+ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
--- /dev/null
+diff -ru lum-2.4.18-um30/fs/ext3/balloc.c uml-2.4.18-12.5/fs/ext3/balloc.c
+--- lum-2.4.18-um30/fs/ext3/balloc.c Mon Feb 25 12:38:08 2002
++++ uml-2.4.18-12.5/fs/ext3/balloc.c Thu Sep 19 13:40:11 2002
+@@ -276,7 +276,8 @@
+ }
+ lock_super (sb);
+ es = sb->u.ext3_sb.s_es;
+- if (block < le32_to_cpu(es->s_first_data_block) ||
++ if (block < le32_to_cpu(es->s_first_data_block) ||
++ block + count < block ||
+ (block + count) > le32_to_cpu(es->s_blocks_count)) {
+ ext3_error (sb, "ext3_free_blocks",
+ "Freeing blocks not in datazone - "
+@@ -309,17 +310,6 @@
+ if (!gdp)
+ goto error_return;
+
+- if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
+- in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
+- in_range (block, le32_to_cpu(gdp->bg_inode_table),
+- sb->u.ext3_sb.s_itb_per_group) ||
+- in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
+- sb->u.ext3_sb.s_itb_per_group))
+- ext3_error (sb, "ext3_free_blocks",
+- "Freeing blocks in system zones - "
+- "Block = %lu, count = %lu",
+- block, count);
+-
+ /*
+ * We are about to start releasing blocks in the bitmap,
+ * so we need undo access.
+@@ -345,14 +335,24 @@
+ if (err)
+ goto error_return;
+
+- for (i = 0; i < count; i++) {
++ for (i = 0; i < count; i++, block++) {
++ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
++ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
++ in_range(block, le32_to_cpu(gdp->bg_inode_table),
++ sb->u.ext2_sb.s_itb_per_group)) {
++ ext3_error(sb, __FUNCTION__,
++ "Freeing block in system zone - block = %lu",
++ block);
++ continue;
++ }
++
+ /*
+ * An HJ special. This is expensive...
+ */
+ #ifdef CONFIG_JBD_DEBUG
+ {
+ struct buffer_head *debug_bh;
+- debug_bh = sb_get_hash_table(sb, block + i);
++ debug_bh = sb_get_hash_table(sb, block);
+ if (debug_bh) {
+ BUFFER_TRACE(debug_bh, "Deleted!");
+ if (!bh2jh(bitmap_bh)->b_committed_data)
+@@ -365,9 +365,8 @@
+ #endif
+ BUFFER_TRACE(bitmap_bh, "clear bit");
+ if (!ext3_clear_bit (bit + i, bitmap_bh->b_data)) {
+- ext3_error (sb, __FUNCTION__,
+- "bit already cleared for block %lu",
+- block + i);
++ ext3_error(sb, __FUNCTION__,
++ "bit already cleared for block %lu", block);
+ BUFFER_TRACE(bitmap_bh, "bit already cleared");
+ } else {
+ dquot_freed_blocks++;
+@@ -415,7 +417,6 @@
+ if (!err) err = ret;
+
+ if (overflow && !err) {
+- block += count;
+ count = overflow;
+ goto do_more;
+ }
+@@ -542,6 +543,7 @@
+ int i, j, k, tmp, alloctmp;
+ int bitmap_nr;
+ int fatal = 0, err;
++ int performed_allocation = 0;
+ struct super_block * sb;
+ struct ext3_group_desc * gdp;
+ struct ext3_super_block * es;
+@@ -575,6 +577,7 @@
+
+ ext3_debug ("goal=%lu.\n", goal);
+
++repeat:
+ /*
+ * First, test whether the goal block is free.
+ */
+@@ -644,8 +647,7 @@
+ }
+
+ /* No space left on the device */
+- unlock_super (sb);
+- return 0;
++ goto out;
+
+ search_back:
+ /*
+@@ -684,16 +686,28 @@
+ if (tmp == le32_to_cpu(gdp->bg_block_bitmap) ||
+ tmp == le32_to_cpu(gdp->bg_inode_bitmap) ||
+ in_range (tmp, le32_to_cpu(gdp->bg_inode_table),
+- sb->u.ext3_sb.s_itb_per_group))
+- ext3_error (sb, "ext3_new_block",
+- "Allocating block in system zone - "
+- "block = %u", tmp);
++ EXT3_SB(sb)->s_itb_per_group)) {
++ ext3_error(sb, __FUNCTION__,
++ "Allocating block in system zone - block = %u", tmp);
++
++ /* Note: This will potentially use up one of the handle's
++ * buffer credits. Normally we have way too many credits,
++ * so that is OK. In _very_ rare cases it might not be OK.
++ * We will trigger an assertion if we run out of credits,
++ * and we will have to do a full fsck of the filesystem -
++ * better than randomly corrupting filesystem metadata.
++ */
++ ext3_set_bit(j, bh->b_data);
++ goto repeat;
++ }
++
+
+ /* The superblock lock should guard against anybody else beating
+ * us to this point! */
+ J_ASSERT_BH(bh, !ext3_test_bit(j, bh->b_data));
+ BUFFER_TRACE(bh, "setting bitmap bit");
+ ext3_set_bit(j, bh->b_data);
++ performed_allocation = 1;
+
+ #ifdef CONFIG_JBD_DEBUG
+ {
+@@ -815,6 +829,11 @@
+ ext3_std_error(sb, fatal);
+ }
+ unlock_super (sb);
++ /*
++ * Undo the block allocation
++ */
++ if (!performed_allocation)
++ DQUOT_FREE_BLOCK(inode, 1);
+ return 0;
+
+ }
+diff -ru lum-2.4.18-um30/fs/ext3/file.c uml-2.4.18-12.5/fs/ext3/file.c
+--- lum-2.4.18-um30/fs/ext3/file.c Thu Nov 15 14:37:55 2001
++++ uml-2.4.18-12.5/fs/ext3/file.c Thu Sep 19 13:40:11 2002
+@@ -61,19 +61,52 @@
+ static ssize_t
+ ext3_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+ {
++ int ret, err;
+ struct inode *inode = file->f_dentry->d_inode;
+
+- /*
+- * Nasty: if the file is subject to synchronous writes then we need
+- * to force generic_osync_inode() to call ext3_write_inode().
+- * We do that by marking the inode dirty. This adds much more
+- * computational expense than we need, but we're going to sync
+- * anyway.
+- */
+- if (IS_SYNC(inode) || (file->f_flags & O_SYNC))
+- mark_inode_dirty(inode);
++ ret = generic_file_write(file, buf, count, ppos);
+
+- return generic_file_write(file, buf, count, ppos);
++ /* Skip file flushing code if there was an error, or if nothing
++ was written. */
++ if (ret <= 0)
++ return ret;
++
++ /* If the inode is IS_SYNC, or is O_SYNC and we are doing
++ data-journaling, then we need to make sure that we force the
++ transaction to disk to keep all metadata uptodate
++ synchronously. */
++
++ if (file->f_flags & O_SYNC) {
++ /* If we are non-data-journaled, then the dirty data has
++ already been flushed to backing store by
++ generic_osync_inode, and the inode has been flushed
++ too if there have been any modifications other than
++ mere timestamp updates.
++
++ Open question --- do we care about flushing
++ timestamps too if the inode is IS_SYNC? */
++ if (!ext3_should_journal_data(inode))
++ return ret;
++
++ goto force_commit;
++ }
++
++ /* So we know that there has been no forced data flush. If the
++ inode is marked IS_SYNC, we need to force one ourselves. */
++ if (!IS_SYNC(inode))
++ return ret;
++
++ /* Open question #2 --- should we force data to disk here too?
++ If we don't, the only impact is that data=writeback
++ filesystems won't flush data to disk automatically on
++ IS_SYNC, only metadata (but historically, that is what ext2
++ has done.) */
++
++force_commit:
++ err = ext3_force_commit(inode->i_sb);
++ if (err)
++ return err;
++ return ret;
+ }
+
+ struct file_operations ext3_file_operations = {
+diff -ru lum-2.4.18-um30/fs/ext3/fsync.c uml-2.4.18-12.5/fs/ext3/fsync.c
+--- lum-2.4.18-um30/fs/ext3/fsync.c Tue Nov 20 22:34:13 2001
++++ uml-2.4.18-12.5/fs/ext3/fsync.c Thu Sep 19 13:40:11 2002
+@@ -62,7 +62,12 @@
+ * we'll end up waiting on them in commit.
+ */
+ ret = fsync_inode_buffers(inode);
+- ret |= fsync_inode_data_buffers(inode);
++
++ /* In writeback mode, we need to force out data buffers too. In
++ * the other modes, ext3_force_commit takes care of forcing out
++ * just the right data blocks. */
++ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
++ ret |= fsync_inode_data_buffers(inode);
+
+ ext3_force_commit(inode->i_sb);
+
+diff -ru lum-2.4.18-um30/fs/ext3/ialloc.c uml-2.4.18-12.5/fs/ext3/ialloc.c
+--- lum-2.4.18-um30/fs/ext3/ialloc.c Mon Feb 25 12:38:08 2002
++++ uml-2.4.18-12.5/fs/ext3/ialloc.c Thu Sep 19 13:40:11 2002
+@@ -392,7 +392,7 @@
+
+ err = -ENOSPC;
+ if (!gdp)
+- goto fail;
++ goto out;
+
+ err = -EIO;
+ bitmap_nr = load_inode_bitmap (sb, i);
+@@ -523,9 +523,10 @@
+ return inode;
+
+ fail:
++ ext3_std_error(sb, err);
++out:
+ unlock_super(sb);
+ iput(inode);
+- ext3_std_error(sb, err);
+ return ERR_PTR(err);
+ }
+
+diff -ru lum-2.4.18-um30/fs/ext3/inode.c uml-2.4.18-12.5/fs/ext3/inode.c
+--- lum-2.4.18-um30/fs/ext3/inode.c Mon Feb 25 12:38:08 2002
++++ uml-2.4.18-12.5/fs/ext3/inode.c Thu Sep 19 13:40:11 2002
+@@ -412,6 +412,7 @@
+ return NULL;
+
+ changed:
++ brelse(bh);
+ *err = -EAGAIN;
+ goto no_block;
+ failure:
+@@ -581,8 +582,6 @@
+
+ parent = nr;
+ }
+- if (IS_SYNC(inode))
+- handle->h_sync = 1;
+ }
+ if (n == num)
+ return 0;
+@@ -1015,8 +1018,8 @@
+ unsigned from, unsigned to)
+ {
+ struct inode *inode = page->mapping->host;
+- handle_t *handle = ext3_journal_current_handle();
+ int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
++ handle_t *handle;
+
+ lock_kernel();
+ handle = ext3_journal_start(inode, needed_blocks);
+diff -ru lum-2.4.18-um30/fs/ext3/namei.c uml-2.4.18-12.5/fs/ext3/namei.c
+--- lum-2.4.18-um30/fs/ext3/namei.c Fri Nov 9 15:25:04 2001
++++ uml-2.4.18-12.5/fs/ext3/namei.c Thu Sep 19 13:40:11 2002
+@@ -354,8 +355,8 @@
+ */
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- ext3_mark_inode_dirty(handle, dir);
+ dir->i_version = ++event;
++ ext3_mark_inode_dirty(handle, dir);
+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+ ext3_journal_dirty_metadata(handle, bh);
+ brelse(bh);
+@@ -464,8 +465,8 @@
+ inode->i_op = &ext3_file_inode_operations;
+ inode->i_fop = &ext3_file_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ }
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -489,8 +490,8 @@
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, mode, rdev);
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ }
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -933,8 +934,8 @@
+ inode->i_size = l-1;
+ }
+ inode->u.ext3_i.i_disksize = inode->i_size;
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ out_stop:
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -970,8 +971,8 @@
+ ext3_inc_count(handle, inode);
+ atomic_inc(&inode->i_count);
+
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ ext3_journal_stop(handle, dir);
+ return err;
+ }
+diff -ru lum-2.4.18-um30/fs/ext3/super.c uml-2.4.18-12.5/fs/ext3/super.c
+--- lum-2.4.18-um30/fs/ext3/super.c Fri Jul 12 17:59:37 2002
++++ uml-2.4.18-12.5/fs/ext3/super.c Thu Sep 19 13:40:11 2002
+@@ -1589,8 +1589,10 @@
+ journal_t *journal = EXT3_SB(sb)->s_journal;
+
+ /* Now we set up the journal barrier. */
++ unlock_super(sb);
+ journal_lock_updates(journal);
+ journal_flush(journal);
++ lock_super(sb);
+
+ /* Journal blocked and flushed, clear needs_recovery flag. */
+ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
--- /dev/null
+--- ./fs/ext3/balloc.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/balloc.c Tue May 7 15:35:59 2002
+@@ -46,18 +46,18 @@ struct ext3_group_desc * ext3_get_group_
+ unsigned long desc;
+ struct ext3_group_desc * gdp;
+
+- if (block_group >= sb->u.ext3_sb.s_groups_count) {
++ if (block_group >= EXT3_SB(sb)->s_groups_count) {
+ ext3_error (sb, "ext3_get_group_desc",
+ "block_group >= groups_count - "
+ "block_group = %d, groups_count = %lu",
+- block_group, sb->u.ext3_sb.s_groups_count);
++ block_group, EXT3_SB(sb)->s_groups_count);
+
+ return NULL;
+ }
+
+ group_desc = block_group / EXT3_DESC_PER_BLOCK(sb);
+ desc = block_group % EXT3_DESC_PER_BLOCK(sb);
+- if (!sb->u.ext3_sb.s_group_desc[group_desc]) {
++ if (!EXT3_SB(sb)->s_group_desc[group_desc]) {
+ ext3_error (sb, "ext3_get_group_desc",
+ "Group descriptor not loaded - "
+ "block_group = %d, group_desc = %lu, desc = %lu",
+@@ -66,9 +66,9 @@ struct ext3_group_desc * ext3_get_group_
+ }
+
+ gdp = (struct ext3_group_desc *)
+- sb->u.ext3_sb.s_group_desc[group_desc]->b_data;
++ EXT3_SB(sb)->s_group_desc[group_desc]->b_data;
+ if (bh)
+- *bh = sb->u.ext3_sb.s_group_desc[group_desc];
++ *bh = EXT3_SB(sb)->s_group_desc[group_desc];
+ return gdp + desc;
+ }
+
+@@ -104,8 +104,8 @@ static int read_block_bitmap (struct sup
+ * this group. The IO will be retried next time.
+ */
+ error_out:
+- sb->u.ext3_sb.s_block_bitmap_number[bitmap_nr] = block_group;
+- sb->u.ext3_sb.s_block_bitmap[bitmap_nr] = bh;
++ EXT3_SB(sb)->s_block_bitmap_number[bitmap_nr] = block_group;
++ EXT3_SB(sb)->s_block_bitmap[bitmap_nr] = bh;
+ return retval;
+ }
+
+@@ -128,16 +128,17 @@ static int __load_block_bitmap (struct s
+ int i, j, retval = 0;
+ unsigned long block_bitmap_number;
+ struct buffer_head * block_bitmap;
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
+
+- if (block_group >= sb->u.ext3_sb.s_groups_count)
++ if (block_group >= sbi->s_groups_count)
+ ext3_panic (sb, "load_block_bitmap",
+ "block_group >= groups_count - "
+ "block_group = %d, groups_count = %lu",
+- block_group, sb->u.ext3_sb.s_groups_count);
++ block_group, EXT3_SB(sb)->s_groups_count);
+
+- if (sb->u.ext3_sb.s_groups_count <= EXT3_MAX_GROUP_LOADED) {
+- if (sb->u.ext3_sb.s_block_bitmap[block_group]) {
+- if (sb->u.ext3_sb.s_block_bitmap_number[block_group] ==
++ if (sbi->s_groups_count <= EXT3_MAX_GROUP_LOADED) {
++ if (sbi->s_block_bitmap[block_group]) {
++ if (sbi->s_block_bitmap_number[block_group] ==
+ block_group)
+ return block_group;
+ ext3_error (sb, "__load_block_bitmap",
+@@ -149,21 +150,20 @@ static int __load_block_bitmap (struct s
+ return block_group;
+ }
+
+- for (i = 0; i < sb->u.ext3_sb.s_loaded_block_bitmaps &&
+- sb->u.ext3_sb.s_block_bitmap_number[i] != block_group; i++)
++ for (i = 0; i < sbi->s_loaded_block_bitmaps &&
++ sbi->s_block_bitmap_number[i] != block_group; i++)
+ ;
+- if (i < sb->u.ext3_sb.s_loaded_block_bitmaps &&
+- sb->u.ext3_sb.s_block_bitmap_number[i] == block_group) {
+- block_bitmap_number = sb->u.ext3_sb.s_block_bitmap_number[i];
+- block_bitmap = sb->u.ext3_sb.s_block_bitmap[i];
++ if (i < sbi->s_loaded_block_bitmaps &&
++ sbi->s_block_bitmap_number[i] == block_group) {
++ block_bitmap_number = sbi->s_block_bitmap_number[i];
++ block_bitmap = sbi->s_block_bitmap[i];
+ for (j = i; j > 0; j--) {
+- sb->u.ext3_sb.s_block_bitmap_number[j] =
+- sb->u.ext3_sb.s_block_bitmap_number[j - 1];
+- sb->u.ext3_sb.s_block_bitmap[j] =
+- sb->u.ext3_sb.s_block_bitmap[j - 1];
++ sbi->s_block_bitmap_number[j] =
++ sbi->s_block_bitmap_number[j - 1];
++ sbi->s_block_bitmap[j] = sbi->s_block_bitmap[j - 1];
+ }
+- sb->u.ext3_sb.s_block_bitmap_number[0] = block_bitmap_number;
+- sb->u.ext3_sb.s_block_bitmap[0] = block_bitmap;
++ sbi->s_block_bitmap_number[0] = block_bitmap_number;
++ sbi->s_block_bitmap[0] = block_bitmap;
+
+ /*
+ * There's still one special case here --- if block_bitmap == 0
+@@ -173,17 +173,14 @@ static int __load_block_bitmap (struct s
+ if (!block_bitmap)
+ retval = read_block_bitmap (sb, block_group, 0);
+ } else {
+- if (sb->u.ext3_sb.s_loaded_block_bitmaps<EXT3_MAX_GROUP_LOADED)
+- sb->u.ext3_sb.s_loaded_block_bitmaps++;
++ if (sbi->s_loaded_block_bitmaps<EXT3_MAX_GROUP_LOADED)
++ sbi->s_loaded_block_bitmaps++;
+ else
+- brelse (sb->u.ext3_sb.s_block_bitmap
+- [EXT3_MAX_GROUP_LOADED - 1]);
+- for (j = sb->u.ext3_sb.s_loaded_block_bitmaps - 1;
+- j > 0; j--) {
+- sb->u.ext3_sb.s_block_bitmap_number[j] =
+- sb->u.ext3_sb.s_block_bitmap_number[j - 1];
+- sb->u.ext3_sb.s_block_bitmap[j] =
+- sb->u.ext3_sb.s_block_bitmap[j - 1];
++ brelse(sbi->s_block_bitmap[EXT3_MAX_GROUP_LOADED - 1]);
++ for (j = sbi->s_loaded_block_bitmaps - 1; j > 0; j--) {
++ sbi->s_block_bitmap_number[j] =
++ sbi->s_block_bitmap_number[j - 1];
++ sbi->s_block_bitmap[j] = sbi->s_block_bitmap[j - 1];
+ }
+ retval = read_block_bitmap (sb, block_group, 0);
+ }
+@@ -206,24 +203,25 @@ static int __load_block_bitmap (struct s
+ static inline int load_block_bitmap (struct super_block * sb,
+ unsigned int block_group)
+ {
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
+ int slot;
+-
++
+ /*
+ * Do the lookup for the slot. First of all, check if we're asking
+ * for the same slot as last time, and did we succeed that last time?
+ */
+- if (sb->u.ext3_sb.s_loaded_block_bitmaps > 0 &&
+- sb->u.ext3_sb.s_block_bitmap_number[0] == block_group &&
+- sb->u.ext3_sb.s_block_bitmap[0]) {
++ if (sbi->s_loaded_block_bitmaps > 0 &&
++ sbi->s_block_bitmap_number[0] == block_group &&
++ sbi->s_block_bitmap[0]) {
+ return 0;
+ }
+ /*
+ * Or can we do a fast lookup based on a loaded group on a filesystem
+ * small enough to be mapped directly into the superblock?
+ */
+- else if (sb->u.ext3_sb.s_groups_count <= EXT3_MAX_GROUP_LOADED &&
+- sb->u.ext3_sb.s_block_bitmap_number[block_group]==block_group
+- && sb->u.ext3_sb.s_block_bitmap[block_group]) {
++ else if (sbi->s_groups_count <= EXT3_MAX_GROUP_LOADED &&
++ sbi->s_block_bitmap_number[block_group] == block_group
++ && sbi->s_block_bitmap[block_group]) {
+ slot = block_group;
+ }
+ /*
+@@ -243,7 +241,7 @@ static inline int load_block_bitmap (str
+ * If it's a valid slot, we may still have cached a previous IO error,
+ * in which case the bh in the superblock cache will be zero.
+ */
+- if (!sb->u.ext3_sb.s_block_bitmap[slot])
++ if (!sbi->s_block_bitmap[slot])
+ return -EIO;
+
+ /*
+@@ -275,7 +273,7 @@ void ext3_free_blocks (handle_t *handle,
+ return;
+ }
+ lock_super (sb);
+- es = sb->u.ext3_sb.s_es;
++ es = EXT3_SB(sb)->s_es;
+ if (block < le32_to_cpu(es->s_first_data_block) ||
+ block + count < block ||
+ (block + count) > le32_to_cpu(es->s_blocks_count)) {
+@@ -304,7 +302,7 @@ do_more:
+ if (bitmap_nr < 0)
+ goto error_return;
+
+- bitmap_bh = sb->u.ext3_sb.s_block_bitmap[bitmap_nr];
++ bitmap_bh = EXT3_SB(sb)->s_block_bitmap[bitmap_nr];
+ gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
+ if (!gdp)
+ goto error_return;
+@@ -330,8 +328,8 @@ do_more:
+ if (err)
+ goto error_return;
+
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access");
+- err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
+ if (err)
+ goto error_return;
+
+@@ -341,7 +339,7 @@
+ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
+ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
+ in_range(block, le32_to_cpu(gdp->bg_inode_table),
+- sb->u.ext2_sb.s_itb_per_group)) {
++ EXT3_SB(sb)->s_itb_per_group)) {
+ ext3_error(sb, __FUNCTION__,
+ "Freeing block in system zone - block = %lu",
+ block);
+@@ -410,8 +407,8 @@ do_more:
+ if (!err) err = ret;
+
+ /* And the superblock */
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "dirtied superblock");
+- ret = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "dirtied superblock");
++ ret = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ if (!err) err = ret;
+
+ if (overflow && !err) {
+@@ -564,12 +560,12 @@ int ext3_new_block (handle_t *handle, st
+ }
+
+ lock_super (sb);
+- es = sb->u.ext3_sb.s_es;
++ es = EXT3_SB(sb)->s_es;
+ if (le32_to_cpu(es->s_free_blocks_count) <=
+ le32_to_cpu(es->s_r_blocks_count) &&
+- ((sb->u.ext3_sb.s_resuid != current->fsuid) &&
+- (sb->u.ext3_sb.s_resgid == 0 ||
+- !in_group_p (sb->u.ext3_sb.s_resgid)) &&
++ ((EXT3_SB(sb)->s_resuid != current->fsuid) &&
++ (EXT3_SB(sb)->s_resgid == 0 ||
++ !in_group_p (EXT3_SB(sb)->s_resgid)) &&
+ !capable(CAP_SYS_RESOURCE)))
+ goto out;
+
+@@ -598,7 +595,7 @@ int ext3_new_block (handle_t *handle, st
+ if (bitmap_nr < 0)
+ goto io_error;
+
+- bh = sb->u.ext3_sb.s_block_bitmap[bitmap_nr];
++ bh = EXT3_SB(sb)->s_block_bitmap[bitmap_nr];
+
+ ext3_debug ("goal is at %d:%d.\n", i, j);
+
+@@ -621,9 +618,9 @@ int ext3_new_block (handle_t *handle, st
+ * Now search the rest of the groups. We assume that
+ * i and gdp correctly point to the last group visited.
+ */
+- for (k = 0; k < sb->u.ext3_sb.s_groups_count; k++) {
++ for (k = 0; k < EXT3_SB(sb)->s_groups_count; k++) {
+ i++;
+- if (i >= sb->u.ext3_sb.s_groups_count)
++ if (i >= EXT3_SB(sb)->s_groups_count)
+ i = 0;
+ gdp = ext3_get_group_desc (sb, i, &bh2);
+ if (!gdp) {
+@@ -635,7 +632,7 @@ int ext3_new_block (handle_t *handle, st
+ if (bitmap_nr < 0)
+ goto io_error;
+
+- bh = sb->u.ext3_sb.s_block_bitmap[bitmap_nr];
++ bh = EXT3_SB(sb)->s_block_bitmap[bitmap_nr];
+ j = find_next_usable_block(-1, bh,
+ EXT3_BLOCKS_PER_GROUP(sb));
+ if (j >= 0)
+@@ -674,8 +671,8 @@ got_block:
+ fatal = ext3_journal_get_write_access(handle, bh2);
+ if (fatal) goto out;
+
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access");
+- fatal = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
++ fatal = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
+ if (fatal) goto out;
+
+ tmp = j + i * EXT3_BLOCKS_PER_GROUP(sb)
+@@ -796,7 +804,7 @@ got_block:
+ if (!fatal) fatal = err;
+
+ BUFFER_TRACE(bh, "journal_dirty_metadata for superblock");
+- err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
++ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ if (!fatal) fatal = err;
+
+ sb->s_dirt = 1;
+@@ -829,11 +837,11 @@ unsigned long ext3_count_free_blocks (st
+ int i;
+
+ lock_super (sb);
+- es = sb->u.ext3_sb.s_es;
++ es = EXT3_SB(sb)->s_es;
+ desc_count = 0;
+ bitmap_count = 0;
+ gdp = NULL;
+- for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) {
++ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+ gdp = ext3_get_group_desc (sb, i, NULL);
+ if (!gdp)
+ continue;
+@@ -842,7 +850,7 @@ unsigned long ext3_count_free_blocks (st
+ if (bitmap_nr < 0)
+ continue;
+
+- x = ext3_count_free (sb->u.ext3_sb.s_block_bitmap[bitmap_nr],
++ x = ext3_count_free (EXT3_SB(sb)->s_block_bitmap[bitmap_nr],
+ sb->s_blocksize);
+ printk ("group %d: stored = %d, counted = %lu\n",
+ i, le16_to_cpu(gdp->bg_free_blocks_count), x);
+@@ -853,7 +861,7 @@ unsigned long ext3_count_free_blocks (st
+ unlock_super (sb);
+ return bitmap_count;
+ #else
+- return le32_to_cpu(sb->u.ext3_sb.s_es->s_free_blocks_count);
++ return le32_to_cpu(EXT3_SB(sb)->s_es->s_free_blocks_count);
+ #endif
+ }
+
+@@ -862,7 +870,7 @@ static inline int block_in_use (unsigned
+ unsigned char * map)
+ {
+ return ext3_test_bit ((block -
+- le32_to_cpu(sb->u.ext3_sb.s_es->s_first_data_block)) %
++ le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
+ EXT3_BLOCKS_PER_GROUP(sb), map);
+ }
+
+@@ -930,11 +938,11 @@ void ext3_check_blocks_bitmap (struct su
+ struct ext3_group_desc * gdp;
+ int i;
+
+- es = sb->u.ext3_sb.s_es;
++ es = EXT3_SB(sb)->s_es;
+ desc_count = 0;
+ bitmap_count = 0;
+ gdp = NULL;
+- for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) {
++ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
+ gdp = ext3_get_group_desc (sb, i, NULL);
+ if (!gdp)
+ continue;
+@@ -968,7 +976,7 @@ void ext3_check_blocks_bitmap (struct su
+ "Inode bitmap for group %d is marked free",
+ i);
+
+- for (j = 0; j < sb->u.ext3_sb.s_itb_per_group; j++)
++ for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++)
+ if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j,
+ sb, bh->b_data))
+ ext3_error (sb, "ext3_check_blocks_bitmap",
+--- ./fs/ext3/dir.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/dir.c Tue May 7 14:54:13 2002
+@@ -52,7 +52,7 @@ int ext3_check_dir_entry (const char * f
+ else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ error_msg = "directory entry across blocks";
+ else if (le32_to_cpu(de->inode) >
+- le32_to_cpu(dir->i_sb->u.ext3_sb.s_es->s_inodes_count))
++ le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
+ error_msg = "inode out of bounds";
+
+ if (error_msg != NULL)
+--- ./fs/ext3/ialloc.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/ialloc.c Tue May 7 15:39:26 2002
+@@ -73,8 +73,8 @@ static int read_inode_bitmap (struct sup
+ * this group. The IO will be retried next time.
+ */
+ error_out:
+- sb->u.ext3_sb.s_inode_bitmap_number[bitmap_nr] = block_group;
+- sb->u.ext3_sb.s_inode_bitmap[bitmap_nr] = bh;
++ EXT3_SB(sb)->s_inode_bitmap_number[bitmap_nr] = block_group;
++ EXT3_SB(sb)->s_inode_bitmap[bitmap_nr] = bh;
+ return retval;
+ }
+
+@@ -225,7 +225,7 @@ void ext3_free_inode (handle_t *handle,
+ clear_inode (inode);
+
+ lock_super (sb);
+- es = sb->u.ext3_sb.s_es;
++ es = EXT3_SB(sb)->s_es;
+ if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+ ext3_error (sb, "ext3_free_inode",
+ "reserved or nonexistent inode %lu", ino);
+@@ -237,7 +237,7 @@ void ext3_free_inode (handle_t *handle,
+ if (bitmap_nr < 0)
+ goto error_return;
+
+- bh = sb->u.ext3_sb.s_inode_bitmap[bitmap_nr];
++ bh = EXT3_SB(sb)->s_inode_bitmap[bitmap_nr];
+
+ BUFFER_TRACE(bh, "get_write_access");
+ fatal = ext3_journal_get_write_access(handle, bh);
+@@ -255,8 +255,8 @@ void ext3_free_inode (handle_t *handle,
+ fatal = ext3_journal_get_write_access(handle, bh2);
+ if (fatal) goto error_return;
+
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get write access");
+- fatal = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get write access");
++ fatal = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
+ if (fatal) goto error_return;
+
+ if (gdp) {
+@@ -271,9 +271,9 @@ void ext3_free_inode (handle_t *handle,
+ if (!fatal) fatal = err;
+ es->s_free_inodes_count =
+ cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1);
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh,
++ BUFFER_TRACE(EXT3_SB(sb)->s_sbh,
+ "call ext3_journal_dirty_metadata");
+- err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
++ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ if (!fatal) fatal = err;
+ }
+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+@@ -305,6 +305,8 @@ struct inode * ext3_new_inode (handle_t
+ int i, j, avefreei;
+ struct inode * inode;
+ int bitmap_nr;
++ struct ext3_inode_info *ei;
++ struct ext3_sb_info *sbi;
+ struct ext3_group_desc * gdp;
+ struct ext3_group_desc * tmp;
+ struct ext3_super_block * es;
+@@ -318,19 +320,21 @@ struct inode * ext3_new_inode (handle_t
+ inode = new_inode(sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+- init_rwsem(&inode->u.ext3_i.truncate_sem);
++ sbi = EXT3_SB(sb);
++ ei = EXT3_I(inode);
++ init_rwsem(&ei->truncate_sem);
+
+ lock_super (sb);
+- es = sb->u.ext3_sb.s_es;
++ es = sbi->s_es;
+ repeat:
+ gdp = NULL;
+ i = 0;
+
+ if (S_ISDIR(mode)) {
+ avefreei = le32_to_cpu(es->s_free_inodes_count) /
+- sb->u.ext3_sb.s_groups_count;
++ sbi->s_groups_count;
+ if (!gdp) {
+- for (j = 0; j < sb->u.ext3_sb.s_groups_count; j++) {
++ for (j = 0; j < sbi->s_groups_count; j++) {
+ struct buffer_head *temp_buffer;
+ tmp = ext3_get_group_desc (sb, j, &temp_buffer);
+ if (tmp &&
+@@ -350,7 +354,7 @@ repeat:
+ /*
+ * Try to place the inode in its parent directory
+ */
+- i = dir->u.ext3_i.i_block_group;
++ i = EXT3_I(dir)->i_block_group;
+ tmp = ext3_get_group_desc (sb, i, &bh2);
+ if (tmp && le16_to_cpu(tmp->bg_free_inodes_count))
+ gdp = tmp;
+@@ -360,10 +364,10 @@ repeat:
+ * Use a quadratic hash to find a group with a
+ * free inode
+ */
+- for (j = 1; j < sb->u.ext3_sb.s_groups_count; j <<= 1) {
++ for (j = 1; j < sbi->s_groups_count; j <<= 1) {
+ i += j;
+- if (i >= sb->u.ext3_sb.s_groups_count)
+- i -= sb->u.ext3_sb.s_groups_count;
++ if (i >= sbi->s_groups_count)
++ i -= sbi->s_groups_count;
+ tmp = ext3_get_group_desc (sb, i, &bh2);
+ if (tmp &&
+ le16_to_cpu(tmp->bg_free_inodes_count)) {
+@@ -376,9 +380,9 @@ repeat:
+ /*
+ * That failed: try linear search for a free inode
+ */
+- i = dir->u.ext3_i.i_block_group + 1;
+- for (j = 2; j < sb->u.ext3_sb.s_groups_count; j++) {
+- if (++i >= sb->u.ext3_sb.s_groups_count)
++ i = EXT3_I(dir)->i_block_group + 1;
++ for (j = 2; j < sbi->s_groups_count; j++) {
++ if (++i >= sbi->s_groups_count)
+ i = 0;
+ tmp = ext3_get_group_desc (sb, i, &bh2);
+ if (tmp &&
+@@ -399,11 +403,11 @@ repeat:
+ if (bitmap_nr < 0)
+ goto fail;
+
+- bh = sb->u.ext3_sb.s_inode_bitmap[bitmap_nr];
++ bh = sbi->s_inode_bitmap[bitmap_nr];
+
+ if ((j = ext3_find_first_zero_bit ((unsigned long *) bh->b_data,
+- EXT3_INODES_PER_GROUP(sb))) <
+- EXT3_INODES_PER_GROUP(sb)) {
++ sbi->s_inodes_per_group)) <
++ sbi->s_inodes_per_group) {
+ BUFFER_TRACE(bh, "get_write_access");
+ err = ext3_journal_get_write_access(handle, bh);
+ if (err) goto fail;
+@@ -436,8 +440,8 @@ repeat:
+ }
+ goto repeat;
+ }
+- j += i * EXT3_INODES_PER_GROUP(sb) + 1;
+- if (j < EXT3_FIRST_INO(sb) || j > le32_to_cpu(es->s_inodes_count)) {
++ j += i * sbi->s_inodes_per_group + 1;
++ if (j < sbi->s_first_ino || j > le32_to_cpu(es->s_inodes_count)) {
+ ext3_error (sb, "ext3_new_inode",
+ "reserved inode or inode > inodes count - "
+ "block_group = %d,inode=%d", i, j);
+@@ -457,13 +461,13 @@ repeat:
+ err = ext3_journal_dirty_metadata(handle, bh2);
+ if (err) goto fail;
+
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access");
+- err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(sbi->s_sbh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, sbi->s_sbh);
+ if (err) goto fail;
+ es->s_free_inodes_count =
+ cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "call ext3_journal_dirty_metadata");
+- err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(sbi->s_sbh, "call ext3_journal_dirty_metadata");
++ err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
+ sb->s_dirt = 1;
+ if (err) goto fail;
+
+@@ -483,31 +487,31 @@ repeat:
+ inode->i_blksize = PAGE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+- inode->u.ext3_i.i_flags = dir->u.ext3_i.i_flags & ~EXT3_INDEX_FL;
++ ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
+ if (S_ISLNK(mode))
+- inode->u.ext3_i.i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
++ ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
+ #ifdef EXT3_FRAGMENTS
+- inode->u.ext3_i.i_faddr = 0;
+- inode->u.ext3_i.i_frag_no = 0;
+- inode->u.ext3_i.i_frag_size = 0;
++ ei->i_faddr = 0;
++ ei->i_frag_no = 0;
++ ei->i_frag_size = 0;
+ #endif
+- inode->u.ext3_i.i_file_acl = 0;
+- inode->u.ext3_i.i_dir_acl = 0;
+- inode->u.ext3_i.i_dtime = 0;
+- INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
++ ei->i_file_acl = 0;
++ ei->i_dir_acl = 0;
++ ei->i_dtime = 0;
++ INIT_LIST_HEAD(&ei->i_orphan);
+ #ifdef EXT3_PREALLOCATE
+- inode->u.ext3_i.i_prealloc_count = 0;
++ ei->i_prealloc_count = 0;
+ #endif
+- inode->u.ext3_i.i_block_group = i;
++ ei->i_block_group = i;
+
+- if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL)
++ if (ei->i_flags & EXT3_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+ if (IS_SYNC(inode))
+ handle->h_sync = 1;
+ insert_inode_hash(inode);
+- inode->i_generation = sb->u.ext3_sb.s_next_generation++;
++ inode->i_generation = sbi->s_next_generation++;
+
+- inode->u.ext3_i.i_state = EXT3_STATE_NEW;
++ ei->i_state = EXT3_STATE_NEW;
+ err = ext3_mark_inode_dirty(handle, inode);
+ if (err) goto fail;
+
+@@ -585,19 +589,19 @@ struct inode *ext3_orphan_get (struct su
+
+ unsigned long ext3_count_free_inodes (struct super_block * sb)
+ {
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
++ struct ext3_super_block *es = sbi->s_es;
+ #ifdef EXT3FS_DEBUG
+- struct ext3_super_block * es;
+ unsigned long desc_count, bitmap_count, x;
+ int bitmap_nr;
+ struct ext3_group_desc * gdp;
+ int i;
+
+ lock_super (sb);
+- es = sb->u.ext3_sb.s_es;
+ desc_count = 0;
+ bitmap_count = 0;
+ gdp = NULL;
+- for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) {
++ for (i = 0; i < sbi->s_groups_count; i++) {
+ gdp = ext3_get_group_desc (sb, i, NULL);
+ if (!gdp)
+ continue;
+@@ -606,8 +610,8 @@ unsigned long ext3_count_free_inodes (st
+ if (bitmap_nr < 0)
+ continue;
+
+- x = ext3_count_free (sb->u.ext3_sb.s_inode_bitmap[bitmap_nr],
+- EXT3_INODES_PER_GROUP(sb) / 8);
++ x = ext3_count_free(sbi->s_inode_bitmap[bitmap_nr],
++ sbi->s_inodes_per_group / 8);
+ printk ("group %d: stored = %d, counted = %lu\n",
+ i, le16_to_cpu(gdp->bg_free_inodes_count), x);
+ bitmap_count += x;
+@@ -617,7 +621,7 @@ unsigned long ext3_count_free_inodes (st
+ unlock_super (sb);
+ return desc_count;
+ #else
+- return le32_to_cpu(sb->u.ext3_sb.s_es->s_free_inodes_count);
++ return le32_to_cpu(es->s_free_inodes_count);
+ #endif
+ }
+
+@@ -626,16 +630,18 @@ unsigned long ext3_count_free_inodes (st
+ void ext3_check_inodes_bitmap (struct super_block * sb)
+ {
+ struct ext3_super_block * es;
++ struct ext3_sb_info *sbi;
+ unsigned long desc_count, bitmap_count, x;
+ int bitmap_nr;
+ struct ext3_group_desc * gdp;
+ int i;
+
+- es = sb->u.ext3_sb.s_es;
++ sbi = EXT3_SB(sb);
++ es = sbi->s_es;
+ desc_count = 0;
+ bitmap_count = 0;
+ gdp = NULL;
+- for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) {
++ for (i = 0; i < sbi->s_groups_count; i++) {
+ gdp = ext3_get_group_desc (sb, i, NULL);
+ if (!gdp)
+ continue;
+@@ -644,7 +650,7 @@ void ext3_check_inodes_bitmap (struct su
+ if (bitmap_nr < 0)
+ continue;
+
+- x = ext3_count_free (sb->u.ext3_sb.s_inode_bitmap[bitmap_nr],
++ x = ext3_count_free (sbi->s_inode_bitmap[bitmap_nr],
+ EXT3_INODES_PER_GROUP(sb) / 8);
+ if (le16_to_cpu(gdp->bg_free_inodes_count) != x)
+ ext3_error (sb, "ext3_check_inodes_bitmap",
+--- ./fs/ext3/inode.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/inode.c Tue May 7 15:41:23 2002
+@@ -196,7 +196,7 @@ void ext3_delete_inode (struct inode * i
+ * (Well, we could do this if we need to, but heck - it works)
+ */
+ ext3_orphan_del(handle, inode);
+- inode->u.ext3_i.i_dtime = CURRENT_TIME;
++ EXT3_I(inode)->i_dtime = CURRENT_TIME;
+
+ /*
+ * One subtle ordering requirement: if anything has gone wrong
+@@ -220,13 +220,14 @@ no_delete:
+ void ext3_discard_prealloc (struct inode * inode)
+ {
+ #ifdef EXT3_PREALLOCATE
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ lock_kernel();
+ /* Writer: ->i_prealloc* */
+- if (inode->u.ext3_i.i_prealloc_count) {
+- unsigned short total = inode->u.ext3_i.i_prealloc_count;
+- unsigned long block = inode->u.ext3_i.i_prealloc_block;
+- inode->u.ext3_i.i_prealloc_count = 0;
+- inode->u.ext3_i.i_prealloc_block = 0;
++ if (ei->i_prealloc_count) {
++ unsigned short total = ei->i_prealloc_count;
++ unsigned long block = ei->i_prealloc_block;
++ ei->i_prealloc_count = 0;
++ ei->i_prealloc_block = 0;
+ /* Writer: end */
+ ext3_free_blocks (inode, block, total);
+ }
+@@ -243,13 +244,15 @@ static int ext3_alloc_block (handle_t *h
+ unsigned long result;
+
+ #ifdef EXT3_PREALLOCATE
++ struct ext3_inode_info *ei = EXT3_I(inode);
++
+ /* Writer: ->i_prealloc* */
+- if (inode->u.ext3_i.i_prealloc_count &&
+- (goal == inode->u.ext3_i.i_prealloc_block ||
+- goal + 1 == inode->u.ext3_i.i_prealloc_block))
++ if (ei->i_prealloc_count &&
++ (goal == ei->i_prealloc_block ||
++ goal + 1 == ei->i_prealloc_block))
+ {
+- result = inode->u.ext3_i.i_prealloc_block++;
+- inode->u.ext3_i.i_prealloc_count--;
++ result = ei->i_prealloc_block++;
++ ei->i_prealloc_count--;
+ /* Writer: end */
+ ext3_debug ("preallocation hit (%lu/%lu).\n",
+ ++alloc_hits, ++alloc_attempts);
+@@ -259,8 +262,8 @@ static int ext3_alloc_block (handle_t *h
+ alloc_hits, ++alloc_attempts);
+ if (S_ISREG(inode->i_mode))
+ result = ext3_new_block (inode, goal,
+- &inode->u.ext3_i.i_prealloc_count,
+- &inode->u.ext3_i.i_prealloc_block, err);
++ &ei->i_prealloc_count,
++ &ei->i_prealloc_block, err);
+ else
+ result = ext3_new_block (inode, goal, 0, 0, err);
+ /*
+@@ -394,7 +397,7 @@ static Indirect *ext3_get_branch(struct
+
+ *err = 0;
+ /* i_data is not going away, no lock needed */
+- add_chain (chain, NULL, inode->u.ext3_i.i_data + *offsets);
++ add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
+ if (!p->key)
+ goto no_block;
+ while (--depth) {
+@@ -437,7 +440,8 @@ no_block:
+
+ static inline unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
+ {
+- u32 *start = ind->bh ? (u32*) ind->bh->b_data : inode->u.ext3_i.i_data;
++ struct ext3_inode_info *ei = EXT3_I(inode);
++ u32 *start = ind->bh ? (u32*) ind->bh->b_data : ei->i_data;
+ u32 *p;
+
+ /* Try to find previous block */
+@@ -453,9 +456,8 @@ static inline unsigned long ext3_find_ne
+ * It is going to be refered from inode itself? OK, just put it into
+ * the same cylinder group then.
+ */
+- return (inode->u.ext3_i.i_block_group *
+- EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
+- le32_to_cpu(inode->i_sb->u.ext3_sb.s_es->s_first_data_block);
++ return (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
++ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
+ }
+
+ /**
+@@ -474,14 +477,15 @@
+ static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
+ Indirect *partial, unsigned long *goal)
+ {
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ /* Writer: ->i_next_alloc* */
+- if (block == inode->u.ext3_i.i_next_alloc_block + 1) {
+- inode->u.ext3_i.i_next_alloc_block++;
+- inode->u.ext3_i.i_next_alloc_goal++;
++ if (block == ei->i_next_alloc_block + 1) {
++ ei->i_next_alloc_block++;
++ ei->i_next_alloc_goal++;
+ }
+ #ifdef SEARCH_FROM_ZERO
+- inode->u.ext3_i.i_next_alloc_block = 0;
+- inode->u.ext3_i.i_next_alloc_goal = 0;
++ ei->i_next_alloc_block = 0;
++ ei->i_next_alloc_goal = 0;
+ #endif
+ /* Writer: end */
+ /* Reader: pointers, ->i_next_alloc* */
+@@ -490,8 +493,8 @@ static int ext3_find_goal(struct inode *
+ * try the heuristic for sequential allocation,
+ * failing that at least try to get decent locality.
+ */
+- if (block == inode->u.ext3_i.i_next_alloc_block)
+- *goal = inode->u.ext3_i.i_next_alloc_goal;
++ if (block == ei->i_next_alloc_block)
++ *goal = ei->i_next_alloc_goal;
+ if (!*goal)
+ *goal = ext3_find_near(inode, partial);
+ #ifdef SEARCH_FROM_ZERO
+@@ -619,6 +621,7 @@
+ {
+ int i;
+ int err = 0;
++ struct ext3_inode_info *ei = EXT3_I(inode);
+
+ /*
+ * If we're splicing into a [td]indirect block (as opposed to the
+@@ -641,11 +644,11 @@ static int ext3_splice_branch(handle_t *
+ /* That's it */
+
+ *where->p = where->key;
+- inode->u.ext3_i.i_next_alloc_block = block;
+- inode->u.ext3_i.i_next_alloc_goal = le32_to_cpu(where[num-1].key);
++ ei->i_next_alloc_block = block;
++ ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
+ #ifdef SEARCH_FROM_ZERO
+- inode->u.ext3_i.i_next_alloc_block = 0;
+- inode->u.ext3_i.i_next_alloc_goal = 0;
++ ei->i_next_alloc_block = 0;
++ ei->i_next_alloc_goal = 0;
+ #endif
+ /* Writer: end */
+
+@@ -729,6 +732,7 @@
+ unsigned long goal;
+ int left;
+ int depth = ext3_block_to_path(inode, iblock, offsets);
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ loff_t new_size;
+
+ J_ASSERT(handle != NULL || create == 0);
+@@ -782,7 +785,7 @@ out:
+ /*
+ * Block out ext3_truncate while we alter the tree
+ */
+- down_read(&inode->u.ext3_i.truncate_sem);
++ down_read(&ei->truncate_sem);
+ err = ext3_alloc_branch(handle, inode, left, goal,
+ offsets+(partial-chain), partial);
+
+@@ -794,7 +797,7 @@ out:
+ if (!err)
+ err = ext3_splice_branch(handle, inode, iblock, chain,
+ partial, left);
+- up_read(&inode->u.ext3_i.truncate_sem);
++ up_read(&ei->truncate_sem);
+ if (err == -EAGAIN)
+ goto changed;
+ if (err)
+@@ -807,8 +810,8 @@ out:
+ * truncate is in progress. It is racy between multiple parallel
+ * instances of get_block, but we have the BKL.
+ */
+- if (new_size > inode->u.ext3_i.i_disksize)
+- inode->u.ext3_i.i_disksize = new_size;
++ if (new_size > ei->i_disksize)
++ ei->i_disksize = new_size;
+
+ bh_result->b_state |= (1UL << BH_New);
+ goto got_it;
+@@ -921,7 +924,7 @@ struct buffer_head *ext3_bread(handle_t
+ struct buffer_head *tmp_bh;
+
+ for (i = 1;
+- inode->u.ext3_i.i_prealloc_count &&
++ EXT3_I(inode)->i_prealloc_count &&
+ i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
+ i++) {
+ /*
+@@ -1131,8 +1134,8 @@ static int ext3_commit_write(struct file
+ kunmap(page);
+ }
+ }
+- if (inode->i_size > inode->u.ext3_i.i_disksize) {
+- inode->u.ext3_i.i_disksize = inode->i_size;
++ if (inode->i_size > EXT3_I(inode)->i_disksize) {
++ EXT3_I(inode)->i_disksize = inode->i_size;
+ ret2 = ext3_mark_inode_dirty(handle, inode);
+ if (!ret)
+ ret = ret2;
+@@ -1832,7 +1835,8 @@ static void ext3_free_branches(handle_t
+ void ext3_truncate(struct inode * inode)
+ {
+ handle_t *handle;
+- u32 *i_data = inode->u.ext3_i.i_data;
++ struct ext3_inode_info *ei = EXT3_I(inode);
++ u32 *i_data = EXT3_I(inode)->i_data;
+ int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
+ int offsets[4];
+ Indirect chain[4];
+@@ -1884,13 +1887,13 @@ void ext3_truncate(struct inode * inode)
+ * on-disk inode. We do this via i_disksize, which is the value which
+ * ext3 *really* writes onto the disk inode.
+ */
+- inode->u.ext3_i.i_disksize = inode->i_size;
++ ei->i_disksize = inode->i_size;
+
+ /*
+ * From here we block out all ext3_get_block() callers who want to
+ * modify the block allocation tree.
+ */
+- down_write(&inode->u.ext3_i.truncate_sem);
++ down_write(&ei->truncate_sem);
+
+ if (n == 1) { /* direct blocks */
+ ext3_free_data(handle, inode, NULL, i_data+offsets[0],
+@@ -1954,7 +1957,7 @@ do_indirects:
+ case EXT3_TIND_BLOCK:
+ ;
+ }
+- up_write(&inode->u.ext3_i.truncate_sem);
++ up_write(&ei->truncate_sem);
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ ext3_mark_inode_dirty(handle, inode);
+
+@@ -1983,6 +1986,8 @@ out_stop:
+
+ int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
+ {
++ struct super_block *sb = inode->i_sb;
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
+ struct buffer_head *bh = 0;
+ unsigned long block;
+ unsigned long block_group;
+@@ -1997,23 +2010,19 @@ int ext3_get_inode_loc (struct inode *in
+ inode->i_ino != EXT3_JOURNAL_INO &&
+- inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) ||
+- inode->i_ino > le32_to_cpu(
+- inode->i_sb->u.ext3_sb.s_es->s_inodes_count)) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
+- "bad inode number: %lu", inode->i_ino);
++ inode->i_ino < EXT3_FIRST_INO(sb)) ||
++ inode->i_ino > le32_to_cpu(sbi->s_es->s_inodes_count)) {
++ ext3_error (sb, __FUNCTION__, "bad inode #%lu", inode->i_ino);
+ goto bad_inode;
+ }
+- block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb);
+- if (block_group >= inode->i_sb->u.ext3_sb.s_groups_count) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
+- "group >= groups count");
++ block_group = (inode->i_ino - 1) / sbi->s_inodes_per_group;
++ if (block_group >= sbi->s_groups_count) {
++ ext3_error(sb, __FUNCTION__, "group >= groups count");
+ goto bad_inode;
+ }
+- group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(inode->i_sb);
+- desc = block_group & (EXT3_DESC_PER_BLOCK(inode->i_sb) - 1);
+- bh = inode->i_sb->u.ext3_sb.s_group_desc[group_desc];
++ group_desc = block_group >> sbi->s_desc_per_block_bits;
++ desc = block_group & (sbi->s_desc_per_block - 1);
++ bh = sbi->s_group_desc[group_desc];
+ if (!bh) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
+- "Descriptor not loaded");
++ ext3_error(sb, __FUNCTION__, "Descriptor not loaded");
+ goto bad_inode;
+ }
+
+@@ -2021,17 +2022,17 @@ int ext3_get_inode_loc (struct inode *in
+ /*
+ * Figure out the offset within the block group inode table
+ */
+- offset = ((inode->i_ino - 1) % EXT3_INODES_PER_GROUP(inode->i_sb)) *
+- EXT3_INODE_SIZE(inode->i_sb);
++ offset = ((inode->i_ino - 1) % sbi->s_inodes_per_group) *
++ sbi->s_inode_size;
+ block = le32_to_cpu(gdp[desc].bg_inode_table) +
+- (offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb));
+- if (!(bh = sb_bread(inode->i_sb, block))) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
++ (offset >> EXT3_BLOCK_SIZE_BITS(sb));
++ if (!(bh = sb_bread(sb, block))) {
++ ext3_error (sb, __FUNCTION__,
+ "unable to read inode block - "
+ "inode=%lu, block=%lu", inode->i_ino, block);
+ goto bad_inode;
+ }
+- offset &= (EXT3_BLOCK_SIZE(inode->i_sb) - 1);
++ offset &= (EXT3_BLOCK_SIZE(sb) - 1);
+
+ iloc->bh = bh;
+ iloc->raw_inode = (struct ext3_inode *) (bh->b_data + offset);
+@@ -2047,6 +2048,7 @@ void ext3_read_inode(struct inode * inod
+ {
+ struct ext3_iloc iloc;
+ struct ext3_inode *raw_inode;
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ struct buffer_head *bh;
+ int block;
+
+@@ -2054,7 +2056,7 @@ void ext3_read_inode(struct inode * inod
+ goto bad_inode;
+ bh = iloc.bh;
+ raw_inode = iloc.raw_inode;
+- init_rwsem(&inode->u.ext3_i.truncate_sem);
++ init_rwsem(&ei->truncate_sem);
+ inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+ inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+ inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+@@ -2067,7 +2069,7 @@ void ext3_read_inode(struct inode * inod
+ inode->i_atime = le32_to_cpu(raw_inode->i_atime);
+ inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
+ inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
+- inode->u.ext3_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
++ ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
+ /* We now have enough fields to check if the inode was active or not.
+ * This is needed because nfsd might try to access dead inodes
+ * the test is that same one that e2fsck uses
+@@ -2075,7 +2077,7 @@ void ext3_read_inode(struct inode * inod
+ */
+ if (inode->i_nlink == 0) {
+ if (inode->i_mode == 0 ||
+- !(inode->i_sb->u.ext3_sb.s_mount_state & EXT3_ORPHAN_FS)) {
++ !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
+ /* this inode is deleted */
+ brelse (bh);
+ goto bad_inode;
+@@ -2090,33 +2092,33 @@ void ext3_read_inode(struct inode * inod
+ * size */
+ inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
+ inode->i_version = ++event;
+- inode->u.ext3_i.i_flags = le32_to_cpu(raw_inode->i_flags);
++ ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ #ifdef EXT3_FRAGMENTS
+- inode->u.ext3_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
+- inode->u.ext3_i.i_frag_no = raw_inode->i_frag;
+- inode->u.ext3_i.i_frag_size = raw_inode->i_fsize;
++ ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
++ ei->i_frag_no = raw_inode->i_frag;
++ ei->i_frag_size = raw_inode->i_fsize;
+ #endif
+- inode->u.ext3_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
++ ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
+ if (!S_ISREG(inode->i_mode)) {
+- inode->u.ext3_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
++ ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
+ } else {
+ inode->i_size |=
+ ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
+ }
+- inode->u.ext3_i.i_disksize = inode->i_size;
++ ei->i_disksize = inode->i_size;
+ inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+ #ifdef EXT3_PREALLOCATE
+- inode->u.ext3_i.i_prealloc_count = 0;
++ ei->i_prealloc_count = 0;
+ #endif
+- inode->u.ext3_i.i_block_group = iloc.block_group;
++ ei->i_block_group = iloc.block_group;
+
+ /*
+ * NOTE! The in-memory inode i_data array is in little-endian order
+ * even on big-endian machines: we do NOT byteswap the block numbers!
+ */
+ for (block = 0; block < EXT3_N_BLOCKS; block++)
+- inode->u.ext3_i.i_data[block] = iloc.raw_inode->i_block[block];
+- INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
++ ei->i_data[block] = iloc.raw_inode->i_block[block];
++ INIT_LIST_HEAD(&ei->i_orphan);
+
+ brelse (iloc.bh);
+
+@@ -2143,17 +2145,17 @@ void ext3_read_inode(struct inode * inod
+ /* inode->i_attr_flags = 0; unused */
+- if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL) {
++ if (ei->i_flags & EXT3_SYNC_FL) {
+ /* inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS; unused */
+ inode->i_flags |= S_SYNC;
+ }
+- if (inode->u.ext3_i.i_flags & EXT3_APPEND_FL) {
++ if (ei->i_flags & EXT3_APPEND_FL) {
+ /* inode->i_attr_flags |= ATTR_FLAG_APPEND; unused */
+ inode->i_flags |= S_APPEND;
+ }
+- if (inode->u.ext3_i.i_flags & EXT3_IMMUTABLE_FL) {
++ if (ei->i_flags & EXT3_IMMUTABLE_FL) {
+ /* inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE; unused */
+ inode->i_flags |= S_IMMUTABLE;
+ }
+- if (inode->u.ext3_i.i_flags & EXT3_NOATIME_FL) {
++ if (ei->i_flags & EXT3_NOATIME_FL) {
+ /* inode->i_attr_flags |= ATTR_FLAG_NOATIME; unused */
+ inode->i_flags |= S_NOATIME;
+ }
+@@ -2175,6 +2177,7 @@ static int ext3_do_update_inode(handle_t
+ struct ext3_iloc *iloc)
+ {
+ struct ext3_inode *raw_inode = iloc->raw_inode;
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ struct buffer_head *bh = iloc->bh;
+ int err = 0, rc, block;
+
+@@ -2192,7 +2195,7 @@ static int ext3_do_update_inode(handle_t
+ * Fix up interoperability with old kernels. Otherwise, old inodes get
+ * re-used with the upper 16 bits of the uid/gid intact
+ */
+- if(!inode->u.ext3_i.i_dtime) {
++ if(!ei->i_dtime) {
+ raw_inode->i_uid_high =
+ cpu_to_le16(high_16_bits(inode->i_uid));
+ raw_inode->i_gid_high =
+@@ -2210,34 +2213,33 @@ static int ext3_do_update_inode(handle_t
+ raw_inode->i_gid_high = 0;
+ }
+ raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+- raw_inode->i_size = cpu_to_le32(inode->u.ext3_i.i_disksize);
++ raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+ raw_inode->i_atime = cpu_to_le32(inode->i_atime);
+ raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
+ raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
+ raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
+- raw_inode->i_dtime = cpu_to_le32(inode->u.ext3_i.i_dtime);
+- raw_inode->i_flags = cpu_to_le32(inode->u.ext3_i.i_flags);
++ raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
++ raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+ #ifdef EXT3_FRAGMENTS
+- raw_inode->i_faddr = cpu_to_le32(inode->u.ext3_i.i_faddr);
+- raw_inode->i_frag = inode->u.ext3_i.i_frag_no;
+- raw_inode->i_fsize = inode->u.ext3_i.i_frag_size;
++ raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
++ raw_inode->i_frag = ei->i_frag_no;
++ raw_inode->i_fsize = ei->i_frag_size;
+ #else
+ /* If we are not tracking these fields in the in-memory inode,
+ * then preserve them on disk, but still initialise them to zero
+ * for new inodes. */
+- if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
++ if (ei->i_state & EXT3_STATE_NEW) {
+ raw_inode->i_faddr = 0;
+ raw_inode->i_frag = 0;
+ raw_inode->i_fsize = 0;
+ }
+ #endif
+- raw_inode->i_file_acl = cpu_to_le32(inode->u.ext3_i.i_file_acl);
++ raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
+ if (!S_ISREG(inode->i_mode)) {
+- raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext3_i.i_dir_acl);
++ raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
+ } else {
+- raw_inode->i_size_high =
+- cpu_to_le32(inode->u.ext3_i.i_disksize >> 32);
+- if (inode->u.ext3_i.i_disksize > 0x7fffffffULL) {
++ raw_inode->i_size_high = cpu_to_le32(ei->i_disksize >> 32);
++ if (ei->i_disksize > MAX_NON_LFS) {
+ struct super_block *sb = inode->i_sb;
+ if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
+ EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
+@@ -2247,7 +2249,7 @@ static int ext3_do_update_inode(handle_t
+ * created, add a flag to the superblock.
+ */
+ err = ext3_journal_get_write_access(handle,
+- sb->u.ext3_sb.s_sbh);
++ EXT3_SB(sb)->s_sbh);
+ if (err)
+ goto out_brelse;
+ ext3_update_dynamic_rev(sb);
+@@ -2256,7 +2258,7 @@ static int ext3_do_update_inode(handle_t
+ sb->s_dirt = 1;
+ handle->h_sync = 1;
+ err = ext3_journal_dirty_metadata(handle,
+- sb->u.ext3_sb.s_sbh);
++ EXT3_SB(sb)->s_sbh);
+ }
+ }
+ }
+@@ -2265,13 +2267,13 @@ static int ext3_do_update_inode(handle_t
+ raw_inode->i_block[0] =
+ cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
+ else for (block = 0; block < EXT3_N_BLOCKS; block++)
+- raw_inode->i_block[block] = inode->u.ext3_i.i_data[block];
++ raw_inode->i_block[block] = ei->i_data[block];
+
+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+ rc = ext3_journal_dirty_metadata(handle, bh);
+ if (!err)
+ err = rc;
+- EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW;
++ ei->i_state &= ~EXT3_STATE_NEW;
+
+ out_brelse:
+ brelse (bh);
+@@ -2379,7 +2381,7 @@ int ext3_setattr(struct dentry *dentry,
+ }
+
+ error = ext3_orphan_add(handle, inode);
+- inode->u.ext3_i.i_disksize = attr->ia_size;
++ EXT3_I(inode)->i_disksize = attr->ia_size;
+ rc = ext3_mark_inode_dirty(handle, inode);
+ if (!error)
+ error = rc;
+@@ -2622,9 +2624,9 @@ int ext3_change_inode_journal_flag(struc
+ */
+
+ if (val)
+- inode->u.ext3_i.i_flags |= EXT3_JOURNAL_DATA_FL;
++ EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
+ else
+- inode->u.ext3_i.i_flags &= ~EXT3_JOURNAL_DATA_FL;
++ EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
+
+ journal_unlock_updates(journal);
+
+--- ./fs/ext3/ioctl.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/ioctl.c Tue May 7 15:20:52 2002
+@@ -18,13 +18,14 @@
+ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+ unsigned long arg)
+ {
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ unsigned int flags;
+
+ ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+ switch (cmd) {
+ case EXT3_IOC_GETFLAGS:
+- flags = inode->u.ext3_i.i_flags & EXT3_FL_USER_VISIBLE;
++ flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
+ return put_user(flags, (int *) arg);
+ case EXT3_IOC_SETFLAGS: {
+ handle_t *handle = NULL;
+@@ -42,7 +42,7 @@ int ext3_ioctl (struct inode * inode, st
+ if (get_user(flags, (int *) arg))
+ return -EFAULT;
+
+- oldflags = inode->u.ext3_i.i_flags;
++ oldflags = ei->i_flags;
+
+ /* The JOURNAL_DATA flag is modifiable only by root */
+ jflag = flags & EXT3_JOURNAL_DATA_FL;
+@@ -79,7 +79,7 @@ int ext3_ioctl (struct inode * inode, st
+
+ flags = flags & EXT3_FL_USER_MODIFIABLE;
+ flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
+- inode->u.ext3_i.i_flags = flags;
++ ei->i_flags = flags;
+
+ if (flags & EXT3_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+@@ -155,12 +155,12 @@ flags_err:
+ int ret = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- add_wait_queue(&sb->u.ext3_sb.ro_wait_queue, &wait);
+- if (timer_pending(&sb->u.ext3_sb.turn_ro_timer)) {
++ add_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
++ if (timer_pending(&EXT3_SB(sb)->turn_ro_timer)) {
+ schedule();
+ ret = 1;
+ }
+- remove_wait_queue(&sb->u.ext3_sb.ro_wait_queue, &wait);
++ remove_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
+ return ret;
+ }
+ #endif
+--- ./fs/ext3/namei.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/namei.c Tue May 7 16:05:51 2002
+@@ -636,7 +636,7 @@ static struct buffer_head * ext3_find_en
+ }
+
+ nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
+- start = dir->u.ext3_i.i_dir_start_lookup;
++ start = EXT3_I(dir)->i_dir_start_lookup;
+ if (start >= nblocks)
+ start = 0;
+ block = start;
+@@ -677,7 +677,7 @@ restart:
+ i = search_dirblock(bh, dir, dentry,
+ block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
+ if (i == 1) {
+- dir->u.ext3_i.i_dir_start_lookup = block;
++ EXT3_I(dir)->i_dir_start_lookup = block;
+ ret = bh;
+ goto cleanup_and_exit;
+ } else {
+@@ -1419,7 +1419,7 @@ int ext3_orphan_add(handle_t *handle, st
+ int err = 0, rc;
+
+ lock_super(sb);
+- if (!list_empty(&inode->u.ext3_i.i_orphan))
++ if (!list_empty(&EXT3_I(inode)->i_orphan))
+ goto out_unlock;
+
+ /* Orphan handling is only valid for files with data blocks
+@@ -1430,8 +1430,8 @@ int ext3_orphan_add(handle_t *handle, st
+ J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
+
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access");
+- err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
+ if (err)
+ goto out_unlock;
+
+@@ -1442,7 +1442,7 @@ int ext3_orphan_add(handle_t *handle, st
+ /* Insert this inode at the head of the on-disk orphan list... */
+ NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan);
+ EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+- err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh);
++ err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ rc = ext3_mark_iloc_dirty(handle, inode, &iloc);
+ if (!err)
+ err = rc;
+@@ -1456,7 +1456,7 @@ int ext3_orphan_add(handle_t *handle, st
+ * This is safe: on error we're going to ignore the orphan list
+ * anyway on the next recovery. */
+ if (!err)
+- list_add(&inode->u.ext3_i.i_orphan, &EXT3_SB(sb)->s_orphan);
++ list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
+
+ jbd_debug(4, "superblock will point to %ld\n", inode->i_ino);
+ jbd_debug(4, "orphan inode %ld will point to %d\n",
+@@ -714,25 +770,25 @@
+ int ext3_orphan_del(handle_t *handle, struct inode *inode)
+ {
+ struct list_head *prev;
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ struct ext3_sb_info *sbi;
+ unsigned long ino_next;
+ struct ext3_iloc iloc;
+ int err = 0;
+
+ lock_super(inode->i_sb);
+- if (list_empty(&inode->u.ext3_i.i_orphan)) {
++ if (list_empty(&ei->i_orphan)) {
+ unlock_super(inode->i_sb);
+ return 0;
+ }
+
+ ino_next = NEXT_ORPHAN(inode);
+- prev = inode->u.ext3_i.i_orphan.prev;
++ prev = ei->i_orphan.prev;
+ sbi = EXT3_SB(inode->i_sb);
+
+ jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
+
+- list_del(&inode->u.ext3_i.i_orphan);
+- INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
++ list_del_init(&ei->i_orphan);
+
+ /* If we're on an error path, we may not have a valid
+ * transaction handle with which to update the orphan list on
+@@ -1520,8 +1520,7 @@ int ext3_orphan_del(handle_t *handle, st
+ err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
+ } else {
+ struct ext3_iloc iloc2;
+- struct inode *i_prev =
+- list_entry(prev, struct inode, u.ext3_i.i_orphan);
++ struct inode *i_prev = orphan_list_entry(prev);
+
+ jbd_debug(4, "orphan inode %lu will point to %lu\n",
+ i_prev->i_ino, ino_next);
+@@ -1695,10 +1695,10 @@ static int ext3_symlink (struct inode *
+ goto out_no_entry;
+ } else {
+ inode->i_op = &ext3_fast_symlink_inode_operations;
+- memcpy((char*)&inode->u.ext3_i.i_data,symname,l);
++ memcpy((char*)&EXT3_I(inode)->i_data,symname,l);
+ inode->i_size = l-1;
+ }
+- inode->u.ext3_i.i_disksize = inode->i_size;
++ EXT3_I(inode)->i_disksize = inode->i_size;
+ err = ext3_add_nondir(handle, dentry, inode);
+ ext3_mark_inode_dirty(handle, inode);
+ out_stop:
+--- ./fs/ext3/super.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/super.c Tue May 7 16:05:44 2002
+@@ -121,7 +121,7 @@ static int ext3_error_behaviour(struct s
+ /* If no overrides were specified on the mount, then fall back
+ * to the default behaviour set in the filesystem's superblock
+ * on disk. */
+- switch (le16_to_cpu(sb->u.ext3_sb.s_es->s_errors)) {
++ switch (le16_to_cpu(EXT3_SB(sb)->s_es->s_errors)) {
+ case EXT3_ERRORS_PANIC:
+ return EXT3_ERRORS_PANIC;
+ case EXT3_ERRORS_RO:
+@@ -269,9 +269,9 @@ void ext3_abort (struct super_block * sb
+ return;
+
+ printk (KERN_CRIT "Remounting filesystem read-only\n");
+- sb->u.ext3_sb.s_mount_state |= EXT3_ERROR_FS;
++ EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
+ sb->s_flags |= MS_RDONLY;
+- sb->u.ext3_sb.s_mount_opt |= EXT3_MOUNT_ABORT;
++ EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
+ journal_abort(EXT3_SB(sb)->s_journal, -EIO);
+ }
+
+@@ -377,8 +377,6 @@ static int ext3_blkdev_remove(struct ext3
+ return ret;
+ }
+
+-#define orphan_list_entry(l) list_entry((l), struct inode, u.ext3_i.i_orphan)
+-
+ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
+ {
+ struct list_head *l;
+@@ -818,7 +818,7 @@ static void ext3_orphan_cleanup (struct
+ sb->s_flags &= ~MS_RDONLY;
+ }
+
+- if (sb->u.ext3_sb.s_mount_state & EXT3_ERROR_FS) {
++ if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
+ if (es->s_last_orphan)
+ jbd_debug(1, "Errors on filesystem, "
+ "clearing orphan list.\n");
+@@ -1463,12 +1463,14 @@ static void ext3_commit_super (struct su
+ struct ext3_super_block * es,
+ int sync)
+ {
++ struct buffer_head *sbh = EXT3_SB(sb)->s_sbh;
++
+ es->s_wtime = cpu_to_le32(CURRENT_TIME);
+- BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "marking dirty");
+- mark_buffer_dirty(sb->u.ext3_sb.s_sbh);
++ BUFFER_TRACE(sbh, "marking dirty");
++ mark_buffer_dirty(sbh);
+ if (sync) {
+- ll_rw_block(WRITE, 1, &sb->u.ext3_sb.s_sbh);
+- wait_on_buffer(sb->u.ext3_sb.s_sbh);
++ ll_rw_block(WRITE, 1, &sbh);
++ wait_on_buffer(sbh);
+ }
+ }
+
+@@ -1519,7 +1521,7 @@ static void ext3_clear_journal_err(struc
+ ext3_warning(sb, __FUNCTION__, "Marking fs in need of "
+ "filesystem check.");
+
+- sb->u.ext3_sb.s_mount_state |= EXT3_ERROR_FS;
++ EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
+ es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
+ ext3_commit_super (sb, es, 1);
+
+--- ./fs/ext3/symlink.c.orig Fri Apr 12 10:27:49 2002
++++ ./fs/ext3/symlink.c Tue May 7 15:25:39 2002
+@@ -23,13 +23,13 @@
+
+ static int ext3_readlink(struct dentry *dentry, char *buffer, int buflen)
+ {
+- char *s = (char *)dentry->d_inode->u.ext3_i.i_data;
+- return vfs_readlink(dentry, buffer, buflen, s);
++ struct ext3_inode_info *ei = EXT3_I(dentry->d_inode);
++ return vfs_readlink(dentry, buffer, buflen, (char *)ei->i_data);
+ }
+
+ static int ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
+ {
+- char *s = (char *)dentry->d_inode->u.ext3_i.i_data;
+- return vfs_follow_link(nd, s);
++ struct ext3_inode_info *ei = EXT3_I(dentry->d_inode);
++ return vfs_follow_link(nd, (char*)ei->i_data);
+ }
+
+--- ./include/linux/ext3_fs.h.orig Tue Apr 16 14:27:25 2002
++++ ./include/linux/ext3_fs.h Tue May 7 16:47:36 2002
+@@ -84,22 +84,25 @@
+ #define EXT3_MIN_BLOCK_SIZE 1024
+ #define EXT3_MAX_BLOCK_SIZE 4096
+ #define EXT3_MIN_BLOCK_LOG_SIZE 10
++
+ #ifdef __KERNEL__
+-# define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
+-#else
+-# define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
+-#endif
+-#define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
+-#ifdef __KERNEL__
+-# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
+-#else
+-# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
+-#endif
+-#ifdef __KERNEL__
+-#define EXT3_ADDR_PER_BLOCK_BITS(s) ((s)->u.ext3_sb.s_addr_per_block_bits)
+-#define EXT3_INODE_SIZE(s) ((s)->u.ext3_sb.s_inode_size)
+-#define EXT3_FIRST_INO(s) ((s)->u.ext3_sb.s_first_ino)
++#define EXT3_SB(sb) (&((sb)->u.ext3_sb))
++#define EXT3_I(inode) (&((inode)->u.ext3_i))
++
++#define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
++#define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
++#define EXT3_ADDR_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_addr_per_block_bits)
++#define EXT3_INODE_SIZE(s) (EXT3_SB(s)->s_inode_size)
++#define EXT3_FIRST_INO(s) (EXT3_SB(s)->s_first_ino)
+ #else
++
++/* Assume that user mode programs are passing in an ext3fs superblock, not
++ * a kernel struct super_block. This will allow us to call the feature-test
++ * macros from user land. */
++#define EXT3_SB(sb) (sb)
++
++#define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
++#define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
+ #define EXT3_INODE_SIZE(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
+ EXT3_GOOD_OLD_INODE_SIZE : \
+ (s)->s_inode_size)
+@@ -108,6 +110,7 @@
+ EXT3_GOOD_OLD_FIRST_INO : \
+ (s)->s_first_ino)
+ #endif
++#define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
+
+ /*
+ * Macro-instructions used to manage fragments
+@@ -116,8 +120,8 @@
+ #define EXT3_MAX_FRAG_SIZE 4096
+ #define EXT3_MIN_FRAG_LOG_SIZE 10
+ #ifdef __KERNEL__
+-# define EXT3_FRAG_SIZE(s) ((s)->u.ext3_sb.s_frag_size)
+-# define EXT3_FRAGS_PER_BLOCK(s) ((s)->u.ext3_sb.s_frags_per_block)
++# define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size)
++# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
+ #else
+ # define EXT3_FRAG_SIZE(s) (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
+ # define EXT3_FRAGS_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
+@@ -163,15 +167,13 @@
+ /*
+ * Macro-instructions used to manage group descriptors
+ */
++# define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group)
++# define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group)
+ #ifdef __KERNEL__
+-# define EXT3_BLOCKS_PER_GROUP(s) ((s)->u.ext3_sb.s_blocks_per_group)
+-# define EXT3_DESC_PER_BLOCK(s) ((s)->u.ext3_sb.s_desc_per_block)
+-# define EXT3_INODES_PER_GROUP(s) ((s)->u.ext3_sb.s_inodes_per_group)
+-# define EXT3_DESC_PER_BLOCK_BITS(s) ((s)->u.ext3_sb.s_desc_per_block_bits)
++# define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block)
++# define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
+ #else
+-# define EXT3_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
+ # define EXT3_DESC_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
+-# define EXT3_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
+ #endif
+
+ /*
+@@ -344,7 +347,7 @@
+ #ifndef _LINUX_EXT2_FS_H
+ #define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt
+ #define set_opt(o, opt) o |= EXT3_MOUNT_##opt
+-#define test_opt(sb, opt) ((sb)->u.ext3_sb.s_mount_opt & \
++#define test_opt(sb, opt) (EXT3_SB(sb)->s_mount_opt & \
+ EXT3_MOUNT_##opt)
+ #else
+ #define EXT2_MOUNT_NOLOAD EXT3_MOUNT_NOLOAD
+@@ -441,17 +443,11 @@
+ /*EC*/ __u32 s_reserved[197]; /* Padding to the end of the block */
+ };
+
+-#ifdef __KERNEL__
+-#define EXT3_SB(sb) (&((sb)->u.ext3_sb))
+-#define EXT3_I(inode) (&((inode)->u.ext3_i))
+-#else
+-/* Assume that user mode programs are passing in an ext3fs superblock, not
+- * a kernel struct super_block. This will allow us to call the feature-test
+- * macros from user land. */
+-#define EXT3_SB(sb) (sb)
+-#endif
+-
+-#define NEXT_ORPHAN(inode) (inode)->u.ext3_i.i_dtime
++#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
++static inline struct inode *orphan_list_entry(struct list_head *l)
++{
++ return list_entry(l, struct inode, u.ext3_i.i_orphan);
++}
+
+ /*
+ * Codes for operating systems
+--- ./include/linux/ext3_jbd.h.orig Tue May 7 14:44:08 2002
++++ ./include/linux/ext3_jbd.h Tue May 7 14:44:43 2002
+@@ -291,7 +291,7 @@
+ return 1;
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
+ return 1;
+- if (inode->u.ext3_i.i_flags & EXT3_JOURNAL_DATA_FL)
++ if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+ return 1;
+ return 0;
+ }
--- /dev/null
+===== fs/ext3/ialloc.c 1.26 vs edited =====
+--- 1.26/fs/ext3/ialloc.c Fri Feb 14 19:24:09 2003
++++ edited/fs/ext3/ialloc.c Sat Mar 8 01:20:55 2003
+@@ -195,6 +195,36 @@
+ }
+
+ /*
++ * @block_group: block group of inode
++ * @offset: relative offset of inode within @block_group
++ *
++ * Check whether any of the inodes in this disk block are in use.
++ *
++ * Caller must be holding superblock lock (group/bitmap read lock in
++ * future).
++ */
++int ext3_itable_block_used(struct super_block *sb, unsigned int block_group,
++ int offset)
++{
++ struct buffer_head *ibitmap = read_inode_bitmap(sb, block_group);
++ int inodes_per_block;
++ unsigned long inum, iend;
++
++ if (!ibitmap)
++ return 1;
++
++ inodes_per_block = sb->s_blocksize / EXT3_SB(sb)->s_inode_size;
++ inum = offset & ~(inodes_per_block - 1);
++ iend = inum + inodes_per_block;
++ for (; inum < iend; inum++) {
++ if (inum != offset && ext3_test_bit(inum, ibitmap->b_data))
++ return 1;
++ }
++
++ return 0;
++}
++
++/*
+ * There are two policies for allocating an inode. If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+@@ -422,8 +452,9 @@
+ struct ext3_group_desc * gdp;
+ struct ext3_super_block * es;
+ struct ext3_inode_info *ei;
+- int err = 0;
++ struct ext3_iloc iloc;
+ struct inode *ret;
++ int err = 0;
+
+ /* Cannot create files in a deleted directory */
+ if (!dir || !dir->i_nlink)
+@@ -587,16 +618,23 @@
+ goto fail2;
+ }
+ err = ext3_init_acl(handle, inode, dir);
++ if (err)
++ goto fail3;
++
++ err = ext3_get_inode_loc_new(inode, &iloc, 1);
++ if (err)
++ goto fail3;
++
++ BUFFER_TRACE(iloc->bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, iloc.bh);
+ if (err) {
+- DQUOT_FREE_INODE(inode);
+- goto fail2;
+- }
+- err = ext3_mark_inode_dirty(handle, inode);
+- if (err) {
+- ext3_std_error(sb, err);
+- DQUOT_FREE_INODE(inode);
+- goto fail2;
+- }
++ brelse(iloc.bh);
++ iloc.bh = NULL;
++ goto fail3;
++ }
++ err = ext3_mark_iloc_dirty(handle, inode, &iloc);
++ if (err)
++ goto fail3;
+
+ ext3_debug("allocating inode %lu\n", inode->i_ino);
+ goto really_out;
+@@ -610,6 +648,9 @@
+ brelse(bitmap_bh);
+ return ret;
+
++fail3:
++ ext3_std_error(sb, err);
++ DQUOT_FREE_INODE(inode);
+ fail2:
+ inode->i_flags |= S_NOQUOTA;
+ inode->i_nlink = 0;
+===== fs/ext3/inode.c 1.62 vs edited =====
+--- 1.62/fs/ext3/inode.c Fri Feb 14 19:24:09 2003
++++ edited/fs/ext3/inode.c Sat Mar 8 02:10:39 2003
+@@ -2144,69 +2144,118 @@
+ unlock_kernel();
+ }
+
+-/*
+- * ext3_get_inode_loc returns with an extra refcount against the
+- * inode's underlying buffer_head on success.
+- */
++#define NUM_INODE_PREREAD 16
+
+-int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
++/*
++ * ext3_get_inode_loc returns with an extra refcount against the inode's
++ * underlying buffer_head on success. If this is for a new inode allocation
++ * (new is non-zero) then we may be able to optimize away the read if there
++ * are no other in-use inodes in this inode table block. If we need to do
++ * a read, then read in a whole chunk of blocks to avoid blocking again soon
++ * if we are doing lots of creates/updates.
++ */
++int ext3_get_inode_loc_new(struct inode *inode, struct ext3_iloc *iloc, int new)
+ {
+- struct buffer_head *bh = 0;
++ struct buffer_head *bh[NUM_INODE_PREREAD];
++ struct super_block *sb = inode->i_sb;
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
++ unsigned long ino = inode->i_ino;
+ unsigned long block;
+ unsigned long block_group;
+ unsigned long group_desc;
+ unsigned long desc;
+ unsigned long offset;
+ struct ext3_group_desc * gdp;
+-
+- if ((inode->i_ino != EXT3_ROOT_INO &&
+- inode->i_ino != EXT3_JOURNAL_INO &&
+- inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) ||
+- inode->i_ino > le32_to_cpu(
+- EXT3_SB(inode->i_sb)->s_es->s_inodes_count)) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
+- "bad inode number: %lu", inode->i_ino);
++
++ if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO &&
++ ino < EXT3_FIRST_INO(sb)) ||
++ ino > le32_to_cpu(sbi->s_es->s_inodes_count)) {
++ ext3_error(sb, "ext3_get_inode_loc", "bad inode number: %lu",
++ ino);
+ goto bad_inode;
+ }
+- block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb);
+- if (block_group >= EXT3_SB(inode->i_sb)->s_groups_count) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
+- "group >= groups count");
++ block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
++ if (block_group >= EXT3_SB(sb)->s_groups_count) {
++ ext3_error(sb, "ext3_get_inode_loc", "group >= groups count");
+ goto bad_inode;
+ }
+- group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(inode->i_sb);
+- desc = block_group & (EXT3_DESC_PER_BLOCK(inode->i_sb) - 1);
+- bh = EXT3_SB(inode->i_sb)->s_group_desc[group_desc];
+- if (!bh) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
+- "Descriptor not loaded");
++ group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
++ desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
++ if (!sbi->s_group_desc[group_desc]) {
++ ext3_error(sb, "ext3_get_inode_loc", "Descriptor not loaded");
+ goto bad_inode;
+ }
+
+- gdp = (struct ext3_group_desc *) bh->b_data;
++ gdp = (struct ext3_group_desc *)(sbi->s_group_desc[group_desc]->b_data);
+ /*
+ * Figure out the offset within the block group inode table
+ */
+- offset = ((inode->i_ino - 1) % EXT3_INODES_PER_GROUP(inode->i_sb)) *
+- EXT3_INODE_SIZE(inode->i_sb);
++ offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb));
+ block = le32_to_cpu(gdp[desc].bg_inode_table) +
+- (offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb));
+- if (!(bh = sb_bread(inode->i_sb, block))) {
+- ext3_error (inode->i_sb, "ext3_get_inode_loc",
+- "unable to read inode block - "
+- "inode=%lu, block=%lu", inode->i_ino, block);
+- goto bad_inode;
++ (offset * sbi->s_inode_size >> EXT3_BLOCK_SIZE_BITS(sb));
++ bh[0] = sb_getblk(sb, block);
++ if (buffer_uptodate(bh[0]))
++ goto done;
++
++ /* If we don't really need to read this block, and it isn't already
++ * in memory, then we just zero it out. Otherwise, we keep the
++ * current block contents (deleted inode data) for posterity.
++ */
++ if (new && !ext3_itable_block_used(sb, block_group, offset)) {
++ lock_buffer(bh[0]);
++ memset(bh[0]->b_data, 0, bh[0]->b_size);
++ set_buffer_uptodate(bh[0]);
++ unlock_buffer(bh[0]);
++ } else {
++ unsigned long block_end, itable_end;
++ int count = 1;
++
++ itable_end = le32_to_cpu(gdp[desc].bg_inode_table) +
++ sbi->s_itb_per_group;
++ block_end = block + NUM_INODE_PREREAD;
++ if (block_end > itable_end)
++ block_end = itable_end;
++
++ for (; block < block_end; block++) {
++ bh[count] = sb_getblk(sb, block);
++ if (count && (buffer_uptodate(bh[count]) ||
++ buffer_locked(bh[count]))) {
++ __brelse(bh[count]);
++ } else
++ count++;
++ }
++
++ ll_rw_block(READ, count, bh);
++
++ /* Release all but the block we actually need (bh[0]) */
++ while (--count > 0)
++ __brelse(bh[count]);
++
++ wait_on_buffer(bh[0]);
++ if (!buffer_uptodate(bh[0])) {
++ ext3_error(sb, __FUNCTION__,
++ "unable to read inode block - "
++ "inode=%lu, block=%llu", ino,
++ (unsigned long long)bh[0]->b_blocknr);
++ goto bad_inode;
++ }
+ }
+- offset &= (EXT3_BLOCK_SIZE(inode->i_sb) - 1);
++done:
++ offset = (offset * sbi->s_inode_size) & (EXT3_BLOCK_SIZE(sb) - 1);
+
+- iloc->bh = bh;
+- iloc->raw_inode = (struct ext3_inode *) (bh->b_data + offset);
++ iloc->bh = bh[0];
++ iloc->raw_inode = (struct ext3_inode *)(bh[0]->b_data + offset);
+ iloc->block_group = block_group;
+-
++
+ return 0;
+-
++
+ bad_inode:
+ return -EIO;
++}
++
++int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
++{
++ return ext3_get_inode_loc_new(inode, iloc, 0);
+ }
+
+ void ext3_read_inode(struct inode * inode)
+===== include/linux/ext3_fs.h 1.22 vs edited =====
+--- 1.22/include/linux/ext3_fs.h Tue Jan 14 00:56:29 2003
++++ edited/include/linux/ext3_fs.h Sat Mar 8 01:56:28 2003
+@@ -719,6 +719,8 @@
+ extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+ extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+
++extern int ext3_itable_block_used(struct super_block *, unsigned int, int);
++extern int ext3_get_inode_loc_new(struct inode *, struct ext3_iloc *, int);
+ extern int ext3_get_inode_loc (struct inode *, struct ext3_iloc *);
+ extern void ext3_read_inode (struct inode *);
+ extern void ext3_write_inode (struct inode *, int);
--- /dev/null
+Under rare conditions (filesystem corruption, really) it is possible
+for ext3_dirty_inode() to require _two_ blocks for the transaction: one
+for the inode and one to update the superblock - to set
+EXT3_FEATURE_RO_COMPAT_LARGE_FILE. This causes the filesystem to go
+BUG.
+
+So reserve an additional block for that eventuality.
+
+
+ fs/ext3/inode.c | 2 +-
+ 1 files changed, 1 insertion(+), 1 deletion(-)
+
+--- 25/fs/ext3/inode.c~ext3-transaction-reserved-blocks Sat Dec 14 18:28:21 2002
++++ 25-akpm/fs/ext3/inode.c Sat Dec 14 18:28:21 2002
+@@ -2698,7 +2698,7 @@ void ext3_dirty_inode(struct inode *inod
+ handle_t *handle;
+
+ lock_kernel();
+- handle = ext3_journal_start(inode, 1);
++ handle = ext3_journal_start(inode, 2);
+ if (IS_ERR(handle))
+ goto out;
+ if (current_handle &&
--- /dev/null
+--- linux/fs/ext3/namei.c.orig Fri Mar 14 14:11:58 2003
++++ linux/fs/ext3/namei.c Fri Mar 14 14:39:48 2003
+@@ -1406,8 +1409,8 @@
+ struct super_block *sb = inode->i_sb;
+ struct ext3_iloc iloc;
+ int err = 0, rc;
+-
+- lock_super(sb);
++
++ down(&EXT3_SB(sb)->s_orphan_lock);
+ if (!list_empty(&EXT3_I(inode)->i_orphan))
+ goto out_unlock;
+
+@@ -1455,7 +1458,7 @@
+ jbd_debug(4, "orphan inode %ld will point to %d\n",
+ inode->i_ino, NEXT_ORPHAN(inode));
+ out_unlock:
+- unlock_super(sb);
++ up(&EXT3_SB(sb)->s_orphan_lock);
+ ext3_std_error(inode->i_sb, err);
+ return err;
+ }
+@@ -1468,20 +1471,19 @@
+ {
+ struct list_head *prev;
+ struct ext3_inode_info *ei = EXT3_I(inode);
+- struct ext3_sb_info *sbi;
++ struct ext3_sb_info *sbi = EXT3_SB(inode->i_sb);
+ unsigned long ino_next;
+ struct ext3_iloc iloc;
+ int err = 0;
+
+- lock_super(inode->i_sb);
++ down(&sbi->s_orphan_lock);
+ if (list_empty(&ei->i_orphan)) {
+- unlock_super(inode->i_sb);
++ up(&sbi->s_orphan_lock);
+ return 0;
+ }
+
+ ino_next = NEXT_ORPHAN(inode);
+ prev = ei->i_orphan.prev;
+- sbi = EXT3_SB(inode->i_sb);
+
+ jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
+
+@@ -1525,10 +1527,10 @@
+ if (err)
+ goto out_brelse;
+
+-out_err:
++out_err:
+ ext3_std_error(inode->i_sb, err);
+ out:
+- unlock_super(inode->i_sb);
++ up(&sbi->s_orphan_lock);
+ return err;
+
+ out_brelse:
+--- linux/fs/ext3/super.c.orig Fri Mar 14 14:11:58 2003
++++ linux/fs/ext3/super.c Fri Mar 14 14:36:00 2003
+@@ -1134,6 +1314,7 @@
+ */
+ sb->s_op = &ext3_sops;
+ INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
++ sema_init(&sbi->s_orphan_lock, 1);
+
+ sb->s_root = 0;
+
+--- linux/include/linux/ext3_fs_sb.h.orig Tue Feb 11 16:34:33 2003
++++ linux/include/linux/ext3_fs_sb.h Fri Mar 14 14:30:11 2003
+@@ -67,6 +69,7 @@
+ struct inode * s_journal_inode;
+ struct journal_s * s_journal;
+ struct list_head s_orphan;
++ struct semaphore s_orphan_lock;
+ unsigned long s_commit_interval;
+ struct block_device *journal_bdev;
+ #ifdef CONFIG_JBD_DEBUG
--- /dev/null
+--- ./fs/ext3/inode.c.orig Wed Mar 12 02:44:06 2003
++++ ./fs/ext3/inode.c Wed Mar 12 11:55:20 2003
+@@ -99,7 +99,35 @@ int ext3_forget(handle_t *handle, int is
+ return err;
+ }
+
+-/*
++/*
++ * Work out how many blocks we need to progress with the next chunk of a
++ * truncate transaction.
++ */
++
++static unsigned long blocks_for_truncate(struct inode *inode)
++{
++ unsigned long needed;
++
++ needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
++
++ /* Give ourselves just enough room to cope with inodes in which
++ * i_blocks is corrupt: we've seen disk corruptions in the past
++ * which resulted in random data in an inode which looked enough
++ * like a regular file for ext3 to try to delete it. Things
++ * will go a bit crazy if that happens, but at least we should
++ * try not to panic the whole kernel. */
++ if (needed < 2)
++ needed = 2;
++
++ /* But we need to bound the transaction so we don't overflow the
++ * journal. */
++ if (needed > EXT3_MAX_TRANS_DATA)
++ needed = EXT3_MAX_TRANS_DATA;
++
++ return EXT3_DATA_TRANS_BLOCKS + needed;
++}
++
++/*
+ * Truncate transactions can be complex and absolutely huge. So we need to
+ * be able to restart the transaction at a conventient checkpoint to make
+ * sure we don't overflow the journal.
+@@ -110,19 +138,14 @@ int ext3_forget(handle_t *handle, int is
+ * transaction in the top-level truncate loop. --sct
+ */
+
+-static handle_t *start_transaction(struct inode *inode)
++static handle_t *start_transaction(struct inode *inode)
+ {
+- long needed;
+ handle_t *result;
+-
+- needed = inode->i_blocks;
+- if (needed > EXT3_MAX_TRANS_DATA)
+- needed = EXT3_MAX_TRANS_DATA;
+-
+- result = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS + needed);
++
++ result = ext3_journal_start(inode, blocks_for_truncate(inode));
+ if (!IS_ERR(result))
+ return result;
+-
++
+ ext3_std_error(inode->i_sb, PTR_ERR(result));
+ return result;
+ }
+@@ -135,14 +158,9 @@ static handle_t *start_transaction(struc
+ */
+ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
+ {
+- long needed;
+-
+ if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
+ return 0;
+- needed = inode->i_blocks;
+- if (needed > EXT3_MAX_TRANS_DATA)
+- needed = EXT3_MAX_TRANS_DATA;
+- if (!ext3_journal_extend(handle, EXT3_RESERVE_TRANS_BLOCKS + needed))
++ if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
+ return 0;
+ return 1;
+ }
+@@ -154,11 +172,8 @@ static int try_to_extend_transaction(han
+ */
+ static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
+ {
+- long needed = inode->i_blocks;
+- if (needed > EXT3_MAX_TRANS_DATA)
+- needed = EXT3_MAX_TRANS_DATA;
+ jbd_debug(2, "restarting handle %p\n", handle);
+- return ext3_journal_restart(handle, EXT3_DATA_TRANS_BLOCKS + needed);
++ return ext3_journal_restart(handle, blocks_for_truncate(inode));
+ }
+
+ /*
--- /dev/null
+From adilger@clusterfs.com Mon Dec 2 10:26:44 2002
+Date: Mon, 2 Dec 2002 10:26:44 -0700
+From: Andreas Dilger <adilger@clusterfs.com>
+To: Lustre LLNL Mailing list <lc-lustre@llnl.gov>,
+ Lustre Development Mailing List <lustre-devel@lists.sourceforge.net>
+Subject: Re: data corrupting bug in 2.4.20 ext3, data=journal
+Message-ID: <20021202102644.H1422@schatzie.adilger.int>
+Mail-Followup-To: Lustre LLNL Mailing list <lc-lustre@llnl.gov>,
+ Lustre Development Mailing List <lustre-devel@lists.sourceforge.net>
+Mime-Version: 1.0
+Content-Type: text/plain; charset=us-ascii
+Content-Disposition: inline
+User-Agent: Mutt/1.2.5.1i
+X-GPG-Key: 1024D/0D35BED6
+X-GPG-Fingerprint: 7A37 5D79 BF1B CECA D44F 8A29 A488 39F5 0D35 BED6
+Status: RO
+Content-Length: 1160
+Lines: 39
+
+Here is the new-improved fix for the ext3 discarding data at umount bug
+discovered late last week. To be used instead of the previous ext3 fix.
+
+Sadly, this is completely unrelated to the problems Mike is having with
+ext3 under UML, since it is an unmount-time problem.
+
+----- Forwarded message from "Stephen C. Tweedie" <sct@redhat.com> -----
+The attached patch seems to fix things for me.
+
+Cheers,
+ Stephen
+
+
+--- linux-2.4-ext3merge/fs/ext3/super.c.=K0027=.orig 2002-12-02 15:35:13.000000000 +0000
++++ linux-2.4-ext3merge/fs/ext3/super.c 2002-12-02 15:35:14.000000000 +0000
+@@ -1640,7 +1640,12 @@
+ sb->s_dirt = 0;
+ target = log_start_commit(EXT3_SB(sb)->s_journal, NULL);
+
+- if (do_sync_supers) {
++ /*
++ * Tricky --- if we are unmounting, the write really does need
++ * to be synchronous. We can detect that by looking for NULL in
++ * sb->s_root.
++ */
++ if (do_sync_supers || !sb->s_root) {
+ unlock_super(sb);
+ log_wait_commit(EXT3_SB(sb)->s_journal, target);
+ lock_super(sb);
+
+
+----- End forwarded message -----
+
+Cheers, Andreas
+--
+Andreas Dilger
+http://sourceforge.net/projects/ext2resize/
+http://www-mddsp.enel.ucalgary.ca/People/adilger/
+
+
--- /dev/null
+
+
+If ext3_add_nondir() fails it will do an iput() of the inode. But we
+continue to run ext3_mark_inode_dirty() against the potentially-freed
+inode. This oopses when slab poisoning is enabled.
+
+Fix it so that we only run ext3_mark_inode_dirty() if the inode was
+successfully instantiated.
+
+This bug was added in 2.4.20-pre9.
+
+
+ fs/ext3/namei.c | 11 +++++------
+ 1 files changed, 5 insertions(+), 6 deletions(-)
+
+--- 24/fs/ext3/namei.c~ext3-use-after-free Sun Dec 15 11:27:50 2002
++++ 24-akpm/fs/ext3/namei.c Sun Dec 15 11:27:50 2002
+@@ -429,8 +429,11 @@ static int ext3_add_nondir(handle_t *han
+ {
+ int err = ext3_add_entry(handle, dentry, inode);
+ if (!err) {
+- d_instantiate(dentry, inode);
+- return 0;
++ err = ext3_mark_inode_dirty(handle, inode);
++ if (err == 0) {
++ d_instantiate(dentry, inode);
++ return 0;
++ }
+ }
+ ext3_dec_count(handle, inode);
+ iput(inode);
+@@ -465,7 +468,6 @@ static int ext3_create (struct inode * d
+ inode->i_fop = &ext3_file_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+ err = ext3_add_nondir(handle, dentry, inode);
+- ext3_mark_inode_dirty(handle, inode);
+ }
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -490,7 +492,6 @@ static int ext3_mknod (struct inode * di
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, mode, rdev);
+ err = ext3_add_nondir(handle, dentry, inode);
+- ext3_mark_inode_dirty(handle, inode);
+ }
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -934,7 +935,6 @@ static int ext3_symlink (struct inode *
+ }
+ inode->u.ext3_i.i_disksize = inode->i_size;
+ err = ext3_add_nondir(handle, dentry, inode);
+- ext3_mark_inode_dirty(handle, inode);
+ out_stop:
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -971,7 +971,6 @@ static int ext3_link (struct dentry * ol
+ atomic_inc(&inode->i_count);
+
+ err = ext3_add_nondir(handle, dentry, inode);
+- ext3_mark_inode_dirty(handle, inode);
+ ext3_journal_stop(handle, dir);
+ return err;
+ }
+
+_
--- /dev/null
+--- linux-2.4.17/fs/ext3/super.c.orig Fri Dec 21 10:41:55 2001
++++ linux-2.4.17/fs/ext3/super.c Fri Mar 22 11:00:41 2002
+@@ -1742,7 +1742,7 @@
+ unregister_filesystem(&ext3_fs_type);
+ }
+
+-EXPORT_NO_SYMBOLS;
++EXPORT_SYMBOL(ext3_bread);
+
+ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+ MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
--- /dev/null
+--- ./include/linux/ext3_fs.h.orig Tue May 7 17:06:03 2002
++++ ./include/linux/ext3_fs.h Tue May 7 17:07:11 2002
+@@ -17,6 +17,8 @@
+ #define _LINUX_EXT3_FS_H
+
+ #include <linux/types.h>
++#include <linux/ext3_fs_sb.h>
++#include <linux/ext3_fs_i.h>
+
+ /*
+ * The second extended filesystem constants/structures
+@@ -86,8 +88,8 @@
+ #define EXT3_MIN_BLOCK_LOG_SIZE 10
+
+ #ifdef __KERNEL__
+-#define EXT3_SB(sb) (&((sb)->u.ext3_sb))
+-#define EXT3_I(inode) (&((inode)->u.ext3_i))
++#define EXT3_SB(sb) ((struct ext3_sb_info *)&((sb)->u.generic_sbp))
++#define EXT3_I(inode) ((struct ext3_inode_info *)&((inode)->u.generic_ip))
+
+ #define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
+ #define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
+@@ -447,7 +447,9 @@
+ #define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
+ static inline struct inode *orphan_list_entry(struct list_head *l)
+ {
+- return list_entry(l, struct inode, u.ext3_i.i_orphan);
++ return ((struct inode *)((char *)l -
++ (unsigned long)(offsetof(struct inode, u.generic_ip) +
++ offsetof(struct ext3_inode_info, i_orphan))));
+ }
+
+ /*
--- /dev/null
+--- linux/include/linux/ext3_fs.h.orig Fri Mar 14 18:09:02 2003
++++ linux/include/linux/ext3_fs.h Fri Mar 14 18:10:20 2003
+@@ -190,7 +192,8 @@
+ */
+ #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
+ #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
++#define EXT3_STATE_DELETE 0x00000010 /* deferred delete inode */
+
+ /*
+ * ioctl commands
+--- linux/include/linux/ext3_fs_sb.h.orig Tue Feb 11 16:34:33 2003
++++ linux/include/linux/ext3_fs_sb.h Mon Mar 10 14:42:07 2003
+@@ -29,6 +29,8 @@
+
+ #define EXT3_MAX_GROUP_LOADED 32
+
++#define EXT3_DELETE_THREAD
++
+ /*
+ * third extended-fs super-block data in memory
+ */
+@@ -73,7 +75,15 @@
+ struct timer_list turn_ro_timer; /* For turning read-only (crash simulation) */
+ wait_queue_head_t ro_wait_queue; /* For people waiting for the fs to go read-only */
+ #endif
++#ifdef EXT3_DELETE_THREAD
++ spinlock_t s_delete_lock;
++ struct list_head s_delete_list;
++ unsigned long s_delete_blocks;
++ unsigned long s_delete_inodes;
++ wait_queue_head_t s_delete_thread_queue;
++ wait_queue_head_t s_delete_waiter_queue;
++#endif
+ };
+
+ #endif /* _LINUX_EXT3_FS_SB */
+--- linux/fs/ext3/super.c.orig Wed Mar 12 14:05:30 2003
++++ linux/fs/ext3/super.c Thu Mar 13 19:05:26 2003
+@@ -396,6 +396,207 @@
+ }
+ }
+
++#ifdef EXT3_DELETE_THREAD
++/*
++ * Delete inodes in a loop until there are no more to be deleted.
++ * Normally, we run in the background doing the deletes and sleeping again,
++ * and clients just add new inodes to be deleted onto the end of the list.
++ * If someone is concerned about free space (e.g. block allocation or similar)
++ * then they can sleep on s_delete_waiter_queue and be woken up when space
++ * has been freed.
++ */
++int ext3_delete_thread(void *data)
++{
++ struct super_block *sb = data;
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
++ struct task_struct *tsk = current;
++
++ /* Almost like daemonize, but not quite */
++ exit_mm(current);
++ tsk->session = 1;
++ tsk->pgrp = 1;
++ tsk->tty = NULL;
++ exit_files(current);
++ reparent_to_init();
++
++ sprintf(tsk->comm, "kdelext3-%s", kdevname(sb->s_dev));
++ sigfillset(&tsk->blocked);
++
++ tsk->flags |= PF_KERNTHREAD;
++
++ INIT_LIST_HEAD(&sbi->s_delete_list);
++ wake_up(&sbi->s_delete_waiter_queue);
++ printk(KERN_INFO "EXT3-fs: delete thread on %s started\n",
++ kdevname(sb->s_dev));
++
++ /* main loop */
++ for (;;) {
++ sleep_on(&sbi->s_delete_thread_queue);
++ printk(KERN_DEBUG "%s woken up: %lu inodes, %lu blocks\n",
++ tsk->comm, sbi->s_delete_inodes, sbi->s_delete_blocks);
++
++ spin_lock(&sbi->s_delete_lock);
++ if (list_empty(&sbi->s_delete_list)) {
++ memset(&sbi->s_delete_list, 0,
++ sizeof(sbi->s_delete_list));
++ spin_unlock(&sbi->s_delete_lock);
++ printk(KERN_DEBUG "ext3 delete thread on %s exiting\n",
++ kdevname(sb->s_dev));
++ wake_up(&sbi->s_delete_waiter_queue);
++ break;
++ }
++
++ while (!list_empty(&sbi->s_delete_list)) {
++ struct inode *inode=list_entry(sbi->s_delete_list.next,
++ struct inode, i_dentry);
++ unsigned long blocks = inode->i_blocks >>
++ (inode->i_blkbits - 9);
++
++ list_del_init(&inode->i_dentry);
++ spin_unlock(&sbi->s_delete_lock);
++ printk(KERN_DEBUG "%s delete ino %lu blk %lu\n",
++ tsk->comm, inode->i_ino, blocks);
++
++ iput(inode);
++
++ spin_lock(&sbi->s_delete_lock);
++ sbi->s_delete_blocks -= blocks;
++ sbi->s_delete_inodes--;
++ }
++ if (sbi->s_delete_blocks != 0 || sbi->s_delete_inodes != 0)
++ printk(KERN_WARNING
++ "%lu blocks and %lu left on list?\n",
++ sbi->s_delete_blocks, sbi->s_delete_inodes);
++ sbi->s_delete_blocks = 0;
++ sbi->s_delete_inodes = 0;
++ spin_unlock(&sbi->s_delete_lock);
++ wake_up(&sbi->s_delete_waiter_queue);
++ }
++
++ return 0;
++}
++
++static void ext3_start_delete_thread(struct super_block *sb)
++{
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
++ int rc;
++
++ spin_lock_init(&sbi->s_delete_lock);
++ memset(&sbi->s_delete_list, 0, sizeof(sbi->s_delete_list));
++ init_waitqueue_head(&sbi->s_delete_thread_queue);
++ init_waitqueue_head(&sbi->s_delete_waiter_queue);
++ sbi->s_delete_blocks = 0;
++ sbi->s_delete_inodes = 0;
++ rc = kernel_thread(ext3_delete_thread, sb, CLONE_VM | CLONE_FILES);
++ if (rc < 0)
++ printk(KERN_ERR "EXT3-fs: cannot start delete thread: rc %d\n",
++ rc);
++ else
++ wait_event(sbi->s_delete_waiter_queue, sbi->s_delete_list.next);
++}
++
++static void ext3_stop_delete_thread(struct ext3_sb_info *sbi)
++{
++ wake_up(&sbi->s_delete_thread_queue);
++ wait_event(sbi->s_delete_waiter_queue, list_empty(&sbi->s_delete_list));
++}
++
++/* Instead of playing games with the inode flags, destruction, etc we just
++ * duplicate the inode data locally and put it on a list for the truncate
++ * thread. We need large parts of the inode struct in order to complete
++ * the truncate and unlink, so we may as well just copy the whole thing.
++ *
++ * If we have any problem deferring the delete, just delete it right away.
++ * If we defer it, we also mark how many blocks it would free, so that we
++ * can keep the statfs data correct, and we know if we should sleep on the
++ * truncate thread when we run out of space.
++ *
++ * One shouldn't consider this duplicate an "inode", as it isn't really
++ * visible to the VFS, but rather a data struct that holds truncate data.
++ *
++ * In 2.5 this can be done much more cleanly by just registering a "drop"
++ * method in the super_operations struct.
++ */
++static void ext3_delete_inode_thread(struct inode *old_inode)
++{
++ struct ext3_sb_info *sbi = EXT3_SB(old_inode->i_sb);
++ struct inode *new_inode;
++ unsigned long blocks = old_inode->i_blocks >> (old_inode->i_blkbits-9);
++
++ if (is_bad_inode(old_inode)) {
++ clear_inode(old_inode);
++ return;
++ }
++
++ /* We may want to delete the inode immediately and not defer it */
++ if (IS_SYNC(old_inode) || blocks <= EXT3_NDIR_BLOCKS ||
++ !sbi->s_delete_list.next) {
++ ext3_delete_inode(old_inode);
++ return;
++ }
++
++ if (EXT3_I(old_inode)->i_state & EXT3_STATE_DELETE) {
++ ext3_debug("doing deferred inode %lu delete (%lu blocks)\n",
++ old_inode->i_ino, blocks);
++ ext3_delete_inode(old_inode);
++ return;
++ }
++
++ /* We can iget this inode again here, because our caller has unhashed
++ * old_inode, so new_inode will be in a different inode struct.
++ *
++ * We need to ensure that the i_orphan pointers in the other inodes
++ * point at the new inode copy instead of the old one so the orphan
++ * list doesn't get corrupted when the old orphan inode is freed.
++ */
++ down(&sbi->s_orphan_lock);
++
++ EXT3_SB(old_inode->i_sb)->s_mount_state |= EXT3_ORPHAN_FS;
++ new_inode = iget(old_inode->i_sb, old_inode->i_ino);
++ EXT3_SB(old_inode->i_sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
++ if (is_bad_inode(new_inode)) {
++ printk(KERN_WARNING "read bad inode %lu\n", old_inode->i_ino);
++ iput(new_inode);
++ new_inode = NULL;
++ }
++ if (!new_inode) {
++ up(&sbi->s_orphan_lock);
++ ext3_debug(KERN_DEBUG "delete inode %lu directly (bad read)\n",
++ old_inode->i_ino);
++ ext3_delete_inode(old_inode);
++ return;
++ }
++ J_ASSERT(new_inode != old_inode);
++
++ J_ASSERT(!list_empty(&EXT3_I(old_inode)->i_orphan));
++ /* Ugh. We need to insert new_inode into the same spot on the list
++ * as old_inode was, to ensure the in-memory orphan list is still
++ * the same as the on-disk orphan list.
++ */
++ EXT3_I(new_inode)->i_orphan = EXT3_I(old_inode)->i_orphan;
++ EXT3_I(new_inode)->i_orphan.next->prev = &EXT3_I(new_inode)->i_orphan;
++ EXT3_I(new_inode)->i_orphan.prev->next = &EXT3_I(new_inode)->i_orphan;
++ EXT3_I(new_inode)->i_state |= EXT3_STATE_DELETE;
++ up(&sbi->s_orphan_lock);
++
++ clear_inode(old_inode);
++
++ printk(KERN_DEBUG "delete inode %lu (%lu blocks) by thread\n",
++ new_inode->i_ino, blocks);
++ spin_lock(&sbi->s_delete_lock);
++ J_ASSERT(list_empty(&new_inode->i_dentry));
++ list_add_tail(&new_inode->i_dentry, &sbi->s_delete_list);
++ sbi->s_delete_blocks += blocks;
++ sbi->s_delete_inodes++;
++ spin_unlock(&sbi->s_delete_lock);
++
++ wake_up(&sbi->s_delete_thread_queue);
++}
++#else
++#define ext3_start_delete_thread(sbi) do {} while(0)
++#define ext3_stop_delete_thread(sbi) do {} while(0)
++#endif /* EXT3_DELETE_THREAD */
++
+ void ext3_put_super (struct super_block * sb)
+ {
+ struct ext3_sb_info *sbi = EXT3_SB(sb);
+@@ -403,6 +578,7 @@
+ kdev_t j_dev = sbi->s_journal->j_dev;
+ int i;
+
++ ext3_stop_delete_thread(sbi);
+ ext3_xattr_put_super(sb);
+ journal_destroy(sbi->s_journal);
+ if (!(sb->s_flags & MS_RDONLY)) {
+@@ -451,7 +627,11 @@
+ write_inode: ext3_write_inode, /* BKL not held. Don't need */
+ dirty_inode: ext3_dirty_inode, /* BKL not held. We take it */
+ put_inode: ext3_put_inode, /* BKL not held. Don't need */
++#ifdef EXT3_DELETE_THREAD
++ delete_inode: ext3_delete_inode_thread,/* BKL not held. We take it */
++#else
+ delete_inode: ext3_delete_inode, /* BKL not held. We take it */
++#endif
+ put_super: ext3_put_super, /* BKL held */
+ write_super: ext3_write_super, /* BKL held */
+ write_super_lockfs: ext3_write_super_lockfs, /* BKL not held. Take it */
+@@ -1205,6 +1385,7 @@
+ }
+
+ ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
++ ext3_start_delete_thread(sb);
+ /*
+ * akpm: core read_super() calls in here with the superblock locked.
+ * That deadlocks, because orphan cleanup needs to lock the superblock
--- /dev/null
+--- linux/fs/ext3/namei.c.orig Thu Jan 30 01:15:13 2003
++++ linux/fs/ext3/namei.c Sat Feb 1 00:33:46 2003
+@@ -710,6 +710,24 @@
+ return ret;
+ }
+
++static int ext3_find_inode(struct inode *inode, unsigned long ino,
++ void *opaque)
++{
++ const char *name = NULL;
++ int len = 0;
++
++ if (opaque) {
++ struct dentry *dentry = opaque;
++ name = dentry->d_name.name;
++ len = dentry->d_name.len;
++ }
++ printk(KERN_INFO "finding inode %s:%lu (%p) count %d (%p = %*s)\n",
++ kdevname(inode->i_dev), ino, inode, atomic_read(&inode->i_count),
++ opaque, len, name ? name : "");
++
++ return 1;
++}
++
+ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry)
+ {
+ struct inode * inode;
+@@ -724,8 +742,8 @@
+ if (bh) {
+ unsigned long ino = le32_to_cpu(de->inode);
+ brelse (bh);
+- inode = iget(dir->i_sb, ino);
++ inode = iget4(dir->i_sb, ino, ext3_find_inode, dentry);
+
+ if (!inode)
+ return ERR_PTR(-EACCES);
+--- linux/fs/ext3/inode.c.orig Thu Jan 30 01:15:13 2003
++++ linux/fs/ext3/inode.c Sat Feb 1 00:34:45 2003
+@@ -166,6 +166,9 @@
+ */
+ void ext3_put_inode (struct inode * inode)
+ {
++ printk(KERN_INFO "putting inode %s:%lu (%p) count %d\n",
++ kdevname(inode->i_dev), inode->i_ino, inode,
++ atomic_read(&inode->i_count));
+ ext3_discard_prealloc (inode);
+ }
+
--- /dev/null
+--- linux-2.4.17/fs/ext3/super.c.orig Fri Dec 21 10:41:55 2001
++++ linux-2.4.17/fs/ext3/super.c Fri Mar 22 11:00:41 2002
+@@ -1344,10 +1342,10 @@
+ printk(KERN_ERR "EXT3-fs: I/O error on journal device\n");
+ goto out_journal;
+ }
+- if (ntohl(journal->j_superblock->s_nr_users) != 1) {
++ if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
+ printk(KERN_ERR "EXT3-fs: External journal has more than one "
+ "user (unsupported) - %d\n",
+- ntohl(journal->j_superblock->s_nr_users));
++ be32_to_cpu(journal->j_superblock->s_nr_users));
+ goto out_journal;
+ }
+ EXT3_SB(sb)->journal_bdev = bdev;
+@@ -1560,6 +1560,7 @@
+ unlock_kernel();
+ return ret;
+ }
++EXPORT_SYMBOL(ext3_force_commit); /* here to avoid potential patch collisions */
+
+ /*
+ * Ext3 always journals updates to the superblock itself, so we don't
--- /dev/null
+diff -ru lustre-head/fs/ext3/ialloc.c lustre/fs/ext3/ialloc.c
+--- lustre-head/fs/ext3/ialloc.c Mon Dec 23 10:02:58 2002
++++ lustre/fs/ext3/ialloc.c Mon Dec 23 09:46:20 2002
+@@ -289,6 +289,37 @@
+ }
+
+ /*
++ * @block_group: block group of inode
++ * @offset: relative offset of inode within @block_group
++ *
++ * Check whether any of the inodes in this disk block are in use.
++ *
++ * Caller must be holding superblock lock (group/bitmap read lock in future).
++ */
++int ext3_itable_block_used(struct super_block *sb, unsigned int block_group,
++ int offset)
++{
++ int bitmap_nr = load_inode_bitmap(sb, block_group);
++ int inodes_per_block;
++ unsigned long inum, iend;
++ struct buffer_head *ibitmap;
++
++ if (bitmap_nr < 0)
++ return 1;
++
++ inodes_per_block = sb->s_blocksize / EXT3_SB(sb)->s_inode_size;
++ inum = offset & ~(inodes_per_block - 1);
++ iend = inum + inodes_per_block;
++ ibitmap = EXT3_SB(sb)->s_inode_bitmap[bitmap_nr];
++ for (; inum < iend; inum++) {
++ if (inum != offset && ext3_test_bit(inum, ibitmap->b_data))
++ return 1;
++ }
++
++ return 0;
++}
++
++/*
+ * There are two policies for allocating an inode. If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+@@ -312,6 +343,7 @@
+ struct ext3_group_desc * gdp;
+ struct ext3_group_desc * tmp;
+ struct ext3_super_block * es;
++ struct ext3_iloc iloc;
+ int err = 0;
+
+ /* Cannot create files in a deleted directory */
+@@ -505,7 +538,7 @@
+ ei->i_prealloc_count = 0;
+ #endif
+ ei->i_block_group = i;
+-
++
+ if (ei->i_flags & EXT3_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+ if (IS_SYNC(inode))
+@@ -514,9 +547,18 @@
+ inode->i_generation = sbi->s_next_generation++;
+
+ ei->i_state = EXT3_STATE_NEW;
+- err = ext3_mark_inode_dirty(handle, inode);
++ err = ext3_get_inode_loc_new(inode, &iloc, 1);
+ if (err) goto fail;
+-
++ BUFFER_TRACE(iloc->bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, iloc.bh);
++ if (err) {
++ brelse(iloc.bh);
++ iloc.bh = NULL;
++ goto fail;
++ }
++ err = ext3_mark_iloc_dirty(handle, inode, &iloc);
++ if (err) goto fail;
++
+ unlock_super (sb);
+ if(DQUOT_ALLOC_INODE(inode)) {
+ DQUOT_DROP(inode);
+diff -ru lustre-head/fs/ext3/inode.c lustre/fs/ext3/inode.c
+--- lustre-head/fs/ext3/inode.c Mon Dec 23 10:02:58 2002
++++ lustre/fs/ext3/inode.c Mon Dec 23 09:50:25 2002
+@@ -2011,23 +1994,28 @@
+ ext3_journal_stop(handle, inode);
+ }
+
+-/*
+- * ext3_get_inode_loc returns with an extra refcount against the
+- * inode's underlying buffer_head on success.
+- */
++#define NUM_INODE_PREREAD 16
+
+-int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
++/*
++ * ext3_get_inode_loc returns with an extra refcount against the inode's
++ * underlying buffer_head on success. If this is for a new inode allocation
++ * (new is non-zero) then we may be able to optimize away the read if there
++ * are no other in-use inodes in this inode table block. If we need to do
++ * a read, then read in a whole chunk of blocks to avoid blocking again soon
++ * if we are doing lots of creates/updates.
++ */
++int ext3_get_inode_loc_new(struct inode *inode, struct ext3_iloc *iloc, int new)
+ {
+ struct super_block *sb = inode->i_sb;
+ struct ext3_sb_info *sbi = EXT3_SB(sb);
+- struct buffer_head *bh = 0;
++ struct buffer_head *bh[NUM_INODE_PREREAD];
+ unsigned long block;
+ unsigned long block_group;
+ unsigned long group_desc;
+ unsigned long desc;
+ unsigned long offset;
+ struct ext3_group_desc * gdp;
+-
++
+ if ((inode->i_ino != EXT3_ROOT_INO &&
+ inode->i_ino != EXT3_JOURNAL_INO &&
+ inode->i_ino < EXT3_FIRST_INO(sb)) ||
+@@ -2042,38 +2034,86 @@
+ }
+ group_desc = block_group >> sbi->s_desc_per_block_bits;
+ desc = block_group & (sbi->s_desc_per_block - 1);
+- bh = sbi->s_group_desc[group_desc];
+- if (!bh) {
++ if (!sbi->s_group_desc[group_desc]) {
+ ext3_error(sb, __FUNCTION__, "Descriptor not loaded");
+ goto bad_inode;
+ }
+
+- gdp = (struct ext3_group_desc *) bh->b_data;
++ gdp = (struct ext3_group_desc *)(sbi->s_group_desc[group_desc]->b_data);
++
+ /*
+ * Figure out the offset within the block group inode table
+ */
+- offset = ((inode->i_ino - 1) % sbi->s_inodes_per_group) *
+- sbi->s_inode_size;
++ offset = ((inode->i_ino - 1) % sbi->s_inodes_per_group);
++
+ block = le32_to_cpu(gdp[desc].bg_inode_table) +
+- (offset >> EXT3_BLOCK_SIZE_BITS(sb));
+- if (!(bh = sb_bread(sb, block))) {
+- ext3_error (sb, __FUNCTION__,
+- "unable to read inode block - "
+- "inode=%lu, block=%lu", inode->i_ino, block);
+- goto bad_inode;
++ (offset * sbi->s_inode_size >> EXT3_BLOCK_SIZE_BITS(sb));
++
++ bh[0] = sb_getblk(sb, block);
++ if (buffer_uptodate(bh[0]))
++ goto done;
++
++ /* If we don't really need to read this block, and it isn't already
++ * in memory, then we just zero it out. Otherwise, we keep the
++ * current block contents (deleted inode data) for posterity.
++ */
++ if (new && !ext3_itable_block_used(sb, block_group, offset)) {
++ lock_buffer(bh[0]);
++ memset(bh[0]->b_data, 0, bh[0]->b_size);
++ mark_buffer_uptodate(bh[0], 1);
++ unlock_buffer(bh[0]);
++ } else {
++ unsigned long block_end, itable_end;
++ int count = 1;
++
++ itable_end = le32_to_cpu(gdp[desc].bg_inode_table) +
++ sbi->s_itb_per_group;
++ block_end = block + NUM_INODE_PREREAD;
++ if (block_end > itable_end)
++ block_end = itable_end;
++
++ for (; block < block_end; block++) {
++ bh[count] = sb_getblk(sb, block);
++ if (count && (buffer_uptodate(bh[count]) ||
++ buffer_locked(bh[count]))) {
++ __brelse(bh[count]);
++ } else
++ count++;
++ }
++
++ ll_rw_block(READ, count, bh);
++
++ /* Release all but the block we actually need (bh[0]) */
++ while (--count > 0)
++ __brelse(bh[count]);
++
++ wait_on_buffer(bh[0]);
++ if (!buffer_uptodate(bh[0])) {
++ ext3_error(sb, __FUNCTION__,
++ "unable to read inode block - "
++ "inode=%lu, block=%lu", inode->i_ino,
++ bh[0]->b_blocknr);
++ goto bad_inode;
++ }
+ }
+- offset &= (EXT3_BLOCK_SIZE(sb) - 1);
++ done:
++ offset = (offset * sbi->s_inode_size) & (EXT3_BLOCK_SIZE(sb) - 1);
+
+- iloc->bh = bh;
+- iloc->raw_inode = (struct ext3_inode *) (bh->b_data + offset);
++ iloc->bh = bh[0];
++ iloc->raw_inode = (struct ext3_inode *)(bh[0]->b_data + offset);
+ iloc->block_group = block_group;
+-
++
+ return 0;
+-
++
+ bad_inode:
+ return -EIO;
+ }
+
++int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
++{
++ return ext3_get_inode_loc_new(inode, iloc, 0);
++}
++
+ void ext3_read_inode(struct inode * inode)
+ {
+ struct ext3_iloc iloc;
+diff -ru include/linux/ext3_fs.h.orig include/linux/ext3_fs.h
+--- lustre/include/linux/ext3_fs.h.orig Sat Mar 8 01:23:09 2003
++++ lustre/include/linux/ext3_fs.h Sat Mar 8 01:24:31 2003
+@@ -642,6 +646,8 @@
+ extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+ extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+
++extern int ext3_itable_block_used(struct super_block *sb, unsigned int, int);
++extern int ext3_get_inode_loc_new(struct inode *, struct ext3_iloc *, int);
+ extern int ext3_get_inode_loc (struct inode *, struct ext3_iloc *);
+ extern void ext3_read_inode (struct inode *);
+ extern void ext3_write_inode (struct inode *, int);
--- /dev/null
+--- linux/fs/ext3/inode.orig.c 2002-12-29 18:48:56.000000000 +0800
++++ linux/fs/ext3/inode.c 2002-12-29 19:17:24.000000000 +0800
+@@ -2728,3 +2728,85 @@
+ * here, in ext3_aops_journal_start() to ensure that the forthcoming "see if we
+ * need to extend" test in ext3_prepare_write() succeeds.
+ */
++
++/* for each block: 1 ind + 1 dind + 1 tind
++ * for each block: 3 bitmap blocks
++ * for each block: 3 group descriptor blocks
++ * i inode block
++ * 1 superblock
++ * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
++ * ((1+1+1) * 3 * nblocks) + 1 + 1 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
++ *
++ * XXX assuming:
++ * (1) fs logic block size == page size
++ * (2) ext3 in writeback mode
++ */
++static inline int ext3_san_write_trans_blocks(int nblocks)
++{
++ int ret;
++
++ ret = (1 + 1 + 1) * 3 * nblocks + 1 + 1;
++
++#ifdef CONFIG_QUOTA
++ ret += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
++#endif
++
++ return ret;
++}
++
++/* Alloc blocks for an inode, while don't create any buffer/page
++ * for data I/O; set the inode size if file is extended.
++ *
++ * @inode: target inode
++ * @blocks: array of logic block number
++ * @nblocks: how many blocks need be alloced
++ * @newsize: new filesize we should set
++ *
++ * return: 0 success, otherwise failed
++ * (*blocks) contains physical block number alloced
++ *
++ * XXX this assume the fs block size == page size
++ */
++int ext3_prep_san_write(struct inode *inode, long *blocks,
++ int nblocks, loff_t newsize)
++{
++ handle_t *handle;
++ struct buffer_head bh_tmp;
++ int needed_blocks;
++ int i, ret = 0, ret2;
++
++ needed_blocks = ext3_san_write_trans_blocks(nblocks);
++
++ lock_kernel();
++ handle = ext3_journal_start(inode, needed_blocks);
++ if (IS_ERR(handle)) {
++ unlock_kernel();
++ return PTR_ERR(handle);
++ }
++ unlock_kernel();
++
++ /* alloc blocks one by one */
++ for (i = 0; i < nblocks; i++) {
++ ret = ext3_get_block_handle(handle, inode, blocks[i],
++ &bh_tmp, 1);
++ if (ret)
++ break;
++
++ blocks[i] = bh_tmp.b_blocknr;
++ }
++
++ /* set inode size if needed */
++ if (!ret && (newsize > inode->i_size)) {
++ inode->i_size = newsize;
++ ext3_mark_inode_dirty(handle, inode);
++ }
++
++ lock_kernel();
++ ret2 = ext3_journal_stop(handle, inode);
++ unlock_kernel();
++
++ if (!ret)
++ ret = ret2;
++ return ret;
++}
++EXPORT_SYMBOL(ext3_prep_san_write);
--- /dev/null
+--- lustre/extN-clean/namei.c 2002-12-30 05:56:09.000000000 -0500
++++ lustre/extN/namei.c 2002-12-30 06:29:39.000000000 -0500
+@@ -1224,7 +1224,8 @@
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = extN_new_inode (handle, dir, mode);
++ inode = extN_new_inode (handle, dir, mode,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ inode->i_op = &extN_file_inode_operations;
+@@ -1254,7 +1254,8 @@
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = extN_new_inode (handle, dir, mode);
++ inode = extN_new_inode (handle, dir, mode,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, mode, rdev);
+@@ -1286,7 +1286,8 @@
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = extN_new_inode (handle, dir, S_IFDIR | mode);
++ inode = extN_new_inode (handle, dir, S_IFDIR | mode,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
+@@ -1680,7 +1681,8 @@
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = extN_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
++ inode = extN_new_inode (handle, dir, S_IFLNK|S_IRWXUGO,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
+--- lustre/extN-clean/ialloc.c 2002-12-28 23:56:42.000000000 -0500
++++ lustre/extN/ialloc.c 2002-12-30 06:29:39.000000000 -0500
+@@ -329,8 +329,8 @@
+ * For other inodes, search forward from the parent directory's block
+ * group to find a free inode.
+ */
+-struct inode * extN_new_inode (handle_t *handle,
+- const struct inode * dir, int mode)
++struct inode *extN_new_inode(handle_t *handle, const struct inode *dir,
++ int mode, unsigned long goal)
+ {
+ struct super_block * sb;
+ struct buffer_head * bh;
+@@ -360,6 +361,38 @@
+
+ lock_super (sb);
+ es = sbi->s_es;
++
++ if (goal) {
++ i = (goal - 1) / EXTN_INODES_PER_GROUP(sb);
++ j = (goal - 1) % EXTN_INODES_PER_GROUP(sb);
++ gdp = extN_get_group_desc(sb, i, &bh2);
++
++ bitmap_nr = load_inode_bitmap (sb, i);
++ if (bitmap_nr < 0)
++ goto fail;
++
++ bh = sbi->s_inode_bitmap[bitmap_nr];
++
++ BUFFER_TRACE(bh, "get_write_access");
++ err = extN_journal_get_write_access(handle, bh);
++ if (err) goto fail;
++
++ if (extN_set_bit(j, bh->b_data)) {
++ printk(KERN_ERR "goal inode %lu unavailable\n", goal);
++ /* Oh well, we tried. */
++ goto repeat;
++ }
++
++ BUFFER_TRACE(bh, "call extN_journal_dirty_metadata");
++ err = extN_journal_dirty_metadata(handle, bh);
++ if (err) goto fail;
++
++ /* We've shortcircuited the allocation system successfully,
++ * now finish filling in the inode.
++ */
++ goto have_bit_and_group;
++ }
++
+ repeat:
+ gdp = NULL;
+ i = 0;
+@@ -474,6 +509,7 @@
+ }
+ goto repeat;
+ }
++have_bit_and_group:
+ j += i * sbi->s_inodes_per_group + 1;
+ if (j < sbi->s_first_ino || j > le32_to_cpu(es->s_inodes_count)) {
+ extN_error (sb, "extN_new_inode",
+--- lustre/extN-clean/ioctl.c 2002-12-28 23:56:42.000000000 -0500
++++ lustre/extN/ioctl.c 2002-12-30 06:29:39.000000000 -0500
+@@ -24,6 +24,31 @@
+ extN_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+ switch (cmd) {
++ case EXTN_IOC_CREATE_INUM: {
++ char name[32];
++ struct dentry *dchild, *dparent;
++ int rc = 0;
++
++ dparent = list_entry(inode->i_dentry.next, struct dentry,
++ d_alias);
++ snprintf(name, sizeof name, "%lu", arg);
++ dchild = lookup_one_len(name, dparent, strlen(name));
++ if (dchild->d_inode) {
++ printk(KERN_ERR "%*s/%lu already exists (ino %lu)\n",
++ dparent->d_name.len, dparent->d_name.name, arg,
++ dchild->d_inode->i_ino);
++ rc = -EEXIST;
++ } else {
++ dchild->d_fsdata = (void *)arg;
++ rc = vfs_create(inode, dchild, 0644);
++ if (rc)
++ printk(KERN_ERR "vfs_create: %d\n", rc);
++ else if (dchild->d_inode->i_ino != arg)
++ rc = -EEXIST;
++ }
++ dput(dchild);
++ return rc;
++ }
+ case EXTN_IOC_GETFLAGS:
+ flags = ei->i_flags & EXTN_FL_USER_VISIBLE;
+ return put_user(flags, (int *) arg);
+--- lustre/include/linux/extN_fs.h~ 2002-12-30 06:01:43.000000000 -0500
++++ lustre/include/linux/extN_fs.h 2002-12-30 06:02:51.000000000 -0500
+@@ -200,6 +200,7 @@
+ #define EXTN_IOC_SETFLAGS _IOW('f', 2, long)
+ #define EXTN_IOC_GETVERSION _IOR('f', 3, long)
+ #define EXTN_IOC_SETVERSION _IOW('f', 4, long)
++/* EXTN_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
+ #define EXTN_IOC_GETVERSION_OLD _IOR('v', 1, long)
+ #define EXTN_IOC_SETVERSION_OLD _IOW('v', 2, long)
+ #ifdef CONFIG_JBD_DEBUG
+@@ -632,7 +633,8 @@
+ extern int extN_sync_file (struct file *, struct dentry *, int);
+
+ /* ialloc.c */
+-extern struct inode * extN_new_inode (handle_t *, const struct inode *, int);
++extern struct inode * extN_new_inode (handle_t *, const struct inode *, int,
++ unsigned long);
+ extern void extN_free_inode (handle_t *, struct inode *);
+ extern struct inode * extN_orphan_get (struct super_block *, ino_t);
+ extern unsigned long extN_count_free_inodes (struct super_block *);
+@@ -714,4 +716,6 @@
+
+ #endif /* __KERNEL__ */
+
++#define EXTN_IOC_CREATE_INUM _IOW('f', 5, long)
++
+ #endif /* _LINUX_EXTN_FS_H */
--- /dev/null
+ fs/ext3/ialloc.c | 36 +++++++++++++++++++++++++++++++++++-
+ fs/ext3/ioctl.c | 25 +++++++++++++++++++++++++
+ fs/ext3/namei.c | 12 ++++++++----
+ include/linux/ext3_fs.h | 5 ++++-
+ 4 files changed, 72 insertions(+), 6 deletions(-)
+
+--- linux-2.4.18-18/fs/ext3/namei.c~extN-wantedi Sat Apr 5 02:51:31 2003
++++ linux-2.4.18-18-braam/fs/ext3/namei.c Sat Apr 5 02:51:31 2003
+@@ -1212,7 +1212,8 @@ static int ext3_create (struct inode * d
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = ext3_new_inode (handle, dir, mode);
++ inode = ext3_new_inode (handle, dir, mode,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ inode->i_op = &ext3_file_inode_operations;
+@@ -1240,7 +1241,8 @@ static int ext3_mknod (struct inode * di
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = ext3_new_inode (handle, dir, mode);
++ inode = ext3_new_inode (handle, dir, mode,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, mode, rdev);
+@@ -1270,7 +1272,8 @@ static int ext3_mkdir(struct inode * dir
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
++ inode = ext3_new_inode (handle, dir, S_IFDIR | mode,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
+@@ -1663,7 +1666,8 @@ static int ext3_symlink (struct inode *
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
++ inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO,
++ (unsigned long)dentry->d_fsdata);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
+--- linux-2.4.18-18/fs/ext3/ialloc.c~extN-wantedi Sat Apr 5 02:51:31 2003
++++ linux-2.4.18-18-braam/fs/ext3/ialloc.c Sat Apr 5 02:51:31 2003
+@@ -330,7 +330,8 @@ int ext3_itable_block_used(struct super_
+ * group to find a free inode.
+ */
+ struct inode * ext3_new_inode (handle_t *handle,
+- const struct inode * dir, int mode)
++ const struct inode * dir, int mode,
++ unsigned long goal)
+ {
+ struct super_block * sb;
+ struct buffer_head * bh;
+@@ -360,6 +361,38 @@ struct inode * ext3_new_inode (handle_t
+
+ lock_super (sb);
+ es = sbi->s_es;
++
++ if (goal) {
++ i = (goal - 1) / EXT3_INODES_PER_GROUP(sb);
++ j = (goal - 1) % EXT3_INODES_PER_GROUP(sb);
++ gdp = ext3_get_group_desc(sb, i, &bh2);
++
++ bitmap_nr = load_inode_bitmap (sb, i);
++ if (bitmap_nr < 0)
++ goto fail;
++
++ bh = sbi->s_inode_bitmap[bitmap_nr];
++
++ BUFFER_TRACE(bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, bh);
++ if (err) goto fail;
++
++ if (ext3_set_bit(j, bh->b_data)) {
++ printk(KERN_ERR "goal inode %lu unavailable\n", goal);
++ /* Oh well, we tried. */
++ goto repeat;
++ }
++
++ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
++ err = ext3_journal_dirty_metadata(handle, bh);
++ if (err) goto fail;
++
++ /* We've shortcircuited the allocation system successfully,
++ * now finish filling in the inode.
++ */
++ goto have_bit_and_group;
++ }
++
+ repeat:
+ gdp = NULL;
+ i = 0;
+@@ -474,6 +507,7 @@ repeat:
+ }
+ goto repeat;
+ }
++ have_bit_and_group:
+ j += i * sbi->s_inodes_per_group + 1;
+ if (j < sbi->s_first_ino || j > le32_to_cpu(es->s_inodes_count)) {
+ ext3_error (sb, "ext3_new_inode",
+--- linux-2.4.18-18/fs/ext3/ioctl.c~extN-wantedi Sat Apr 5 02:51:31 2003
++++ linux-2.4.18-18-braam/fs/ext3/ioctl.c Sat Apr 5 02:51:31 2003
+@@ -24,6 +24,31 @@ int ext3_ioctl (struct inode * inode, st
+ ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+ switch (cmd) {
++ case EXT3_IOC_CREATE_INUM: {
++ char name[32];
++ struct dentry *dchild, *dparent;
++ int rc = 0;
++
++ dparent = list_entry(inode->i_dentry.next, struct dentry,
++ d_alias);
++ snprintf(name, sizeof name, "%lu", arg);
++ dchild = lookup_one_len(name, dparent, strlen(name));
++ if (dchild->d_inode) {
++ printk(KERN_ERR "%*s/%lu already exists (ino %lu)\n",
++ dparent->d_name.len, dparent->d_name.name, arg,
++ dchild->d_inode->i_ino);
++ rc = -EEXIST;
++ } else {
++ dchild->d_fsdata = (void *)arg;
++ rc = vfs_create(inode, dchild, 0644);
++ if (rc)
++ printk(KERN_ERR "vfs_create: %d\n", rc);
++ else if (dchild->d_inode->i_ino != arg)
++ rc = -EEXIST;
++ }
++ dput(dchild);
++ return rc;
++ }
+ case EXT3_IOC_GETFLAGS:
+ flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
+ return put_user(flags, (int *) arg);
+--- linux-2.4.18-18/include/linux/ext3_fs.h~extN-wantedi Sat Apr 5 02:51:31 2003
++++ linux-2.4.18-18-braam/include/linux/ext3_fs.h Sat Apr 5 02:52:43 2003
+@@ -198,6 +198,7 @@ struct ext3_group_desc
+ #define EXT3_IOC_SETFLAGS _IOW('f', 2, long)
+ #define EXT3_IOC_GETVERSION _IOR('f', 3, long)
+ #define EXT3_IOC_SETVERSION _IOW('f', 4, long)
++/* EXT3_IOC_CREATE_INUM at bottom of file (visible to kernel and user). */
+ #define EXT3_IOC_GETVERSION_OLD _IOR('v', 1, long)
+ #define EXT3_IOC_SETVERSION_OLD _IOW('v', 2, long)
+ #ifdef CONFIG_JBD_DEBUG
+@@ -628,7 +629,8 @@ extern int ext3_check_dir_entry(const ch
+ extern int ext3_sync_file (struct file *, struct dentry *, int);
+
+ /* ialloc.c */
+-extern struct inode * ext3_new_inode (handle_t *, const struct inode *, int);
++extern struct inode * ext3_new_inode (handle_t *, const struct inode *, int,
++ unsigned long);
+ extern void ext3_free_inode (handle_t *, struct inode *);
+ extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
+ extern unsigned long ext3_count_free_inodes (struct super_block *);
+@@ -712,4 +714,5 @@ extern struct inode_operations ext3_fast
+
+ #endif /* __KERNEL__ */
+
++#define EXT3_IOC_CREATE_INUM _IOW('f', 5, long)
+ #endif /* _LINUX_EXT3_FS_H */
+
+_
--- /dev/null
+# This is a BitKeeper generated patch for the following project:
+# Project Name: Linux kernel tree
+# This patch format is intended for GNU patch command version 2.5 or higher.
+# This patch includes the following deltas:
+# ChangeSet 1.775 -> 1.783
+# fs/ext3/namei.c 1.2 -> 1.6
+# include/linux/ext3_fs.h 1.4 -> 1.9
+# fs/ext2/inode.c 1.15 -> 1.16
+# lib/rbtree.c 1.2 -> 1.3
+# fs/ext3/Makefile 1.2 -> 1.3
+# include/linux/ext3_jbd.h 1.2 -> 1.3
+# include/linux/ext2_fs.h 1.6 -> 1.9
+# include/linux/ext2_fs_sb.h 1.1 -> 1.2
+# include/linux/rbtree.h 1.1 -> 1.2
+# include/linux/ext2_fs_i.h 1.4 -> 1.5
+# fs/ext3/ialloc.c 1.4 -> 1.5
+# fs/ext2/ialloc.c 1.8 -> 1.10
+# fs/ext3/dir.c 1.1 -> 1.3
+# fs/ext2/super.c 1.9 -> 1.13
+# fs/ext3/inode.c 1.10 -> 1.11
+# fs/ext3/super.c 1.7 -> 1.12
+# include/linux/ext3_fs_sb.h 1.2 -> 1.4
+# fs/ext3/file.c 1.3 -> 1.4
+# (new) -> 1.1 fs/ext3/hash.c
+#
+# The following is the BitKeeper ChangeSet Log
+# --------------------------------------------
+# 02/11/06 tytso@snap.thunk.org 1.776
+# Port patch-ext3-dxdir-2.4.19-4 to 2.4.20-rc1
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.777
+# Add '.' and '..' entries to be returned by readdir of htree directories
+#
+# This patch from Chris Li adds '.' and '..' to the rbtree so that they
+# are properly returned by readdir.
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.778
+# Check for failed kmalloc() in ext3_htree_store_dirent()
+#
+# This patch checks for a failed kmalloc() in ext3_htree_store_dirent(),
+# and passes the error up to its caller, ext3_htree_fill_tree().
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.779
+# Fix ext3 htree rename bug.
+#
+# This fixes an ext3 htree bug pointed out by Christopher Li; if
+# adding the new name to the directory causes a split, this can cause
+# the directory entry containing the old name to move to another
+# block, and then the removal of the old name will fail.
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.780
+# Default mount options from superblock for ext2/3 filesystems
+#
+# This patch adds support for default mount options to be stored in the
+# superblock, so they don't have to be specified on the mount command line
+# (or in /etc/fstab). While I was in the code, I also cleaned up the
+# handling of how mount options are processed in the ext2 and ext3
+# filesystems.
+#
+# Most mount options are now processed *after* the superblock has been
+# read in. This allows for a much cleaner handling of those default mount
+# option parameters that were already stored in the superblock: the
+# resuid, resgid, and s_errors fields were handled using some fairly gross
+# special cases. Now the only mount option which is processed first is
+# the sb option, which specifies the location of the superblock. This
+# allows the handling of all of the default mount parameters to be much
+# more cleanly and more generally handled.
+#
+# This does change the behaviour from earlier kernels, in that if the sb
+# mount option is specified, it must be specified *first*. However, this
+# option is rarely used, and if it is, it generally is specified first, so
+# this seems to be a reasonable restriction.
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.781
+# Ext2/3 forward compatibility: on-line resizing
+#
+# This patch allows forward compatibility with future filesystems which
+# are dynamically grown by using an alternate algorithm for storing the
+# block group descriptors. It's also a bit more efficient, in that it
+# uses just a little bit less disk space. Currently, the ext2 filesystem
+# format requires either relocating the inode table, or reserving space in
+# before doing the on-line resize. The new scheme, which is documented in
+# "Planned Extensions to the Ext2/3 Filesystem", by Stephen Tweedie and I
+# (see: http://e2fsprogs.sourceforge.net/extensions-ext23)
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.782
+# Ext2/3 forward compatibility: inode size
+#
+# This patch allows filesystems with expanded inodes to be mounted.
+# (compatibility feature flags will be used to control whether or not the
+# filesystem should be mounted in case the new inode fields will result in
+# compatibility issues). This allows for future compatibility with newer
+# versions of ext2fs.
+# --------------------------------------------
+# 02/11/07 tytso@snap.thunk.org 1.783
+# Orlov block allocator for ext2/3
+#
+# This is Al's implementation of the Orlov block allocator for ext2/3.
+#
+# At least doubles the throughput for the traverse-a-kernel-tree
+# test and is well tested.
+# --------------------------------------------
+#
+diff -Nru a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
+--- a/fs/ext2/ialloc.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext2/ialloc.c Thu Nov 7 11:58:05 2002
+@@ -17,6 +17,7 @@
+ #include <linux/ext2_fs.h>
+ #include <linux/locks.h>
+ #include <linux/quotaops.h>
++#include <linux/random.h>
+
+
+ /*
+@@ -228,8 +229,7 @@
+ * For other inodes, search forward from the parent directory\'s block
+ * group to find a free inode.
+ */
+-
+-static int find_group_dir(struct super_block *sb, int parent_group)
++static int find_group_dir(struct super_block *sb, const struct inode *parent)
+ {
+ struct ext2_super_block * es = sb->u.ext2_sb.s_es;
+ int ngroups = sb->u.ext2_sb.s_groups_count;
+@@ -262,8 +262,140 @@
+ return best_group;
+ }
+
+-static int find_group_other(struct super_block *sb, int parent_group)
++/*
++ * Orlov's allocator for directories.
++ *
++ * We always try to spread first-level directories.
++ *
++ * If there are blockgroups with both free inodes and free blocks counts
++ * not worse than average we return one with smallest directory count.
++ * Otherwise we simply return a random group.
++ *
++ * For the rest rules look so:
++ *
++ * It's OK to put directory into a group unless
++ * it has too many directories already (max_dirs) or
++ * it has too few free inodes left (min_inodes) or
++ * it has too few free blocks left (min_blocks) or
++ * it's already running too large debt (max_debt).
++ * Parent's group is prefered, if it doesn't satisfy these
++ * conditions we search cyclically through the rest. If none
++ * of the groups look good we just look for a group with more
++ * free inodes than average (starting at parent's group).
++ *
++ * Debt is incremented each time we allocate a directory and decremented
++ * when we allocate an inode, within 0--255.
++ */
++
++#define INODE_COST 64
++#define BLOCK_COST 256
++
++static int find_group_orlov(struct super_block *sb, const struct inode *parent)
+ {
++ int parent_group = parent->u.ext2_i.i_block_group;
++ struct ext2_sb_info *sbi = EXT2_SB(sb);
++ struct ext2_super_block *es = sbi->s_es;
++ int ngroups = sbi->s_groups_count;
++ int inodes_per_group = EXT2_INODES_PER_GROUP(sb);
++ int avefreei = le32_to_cpu(es->s_free_inodes_count) / ngroups;
++ int avefreeb = le32_to_cpu(es->s_free_blocks_count) / ngroups;
++ int blocks_per_dir;
++ int ndirs = sbi->s_dir_count;
++ int max_debt, max_dirs, min_blocks, min_inodes;
++ int group = -1, i;
++ struct ext2_group_desc *desc;
++ struct buffer_head *bh;
++
++ if ((parent == sb->s_root->d_inode) ||
++ (parent->i_flags & EXT2_TOPDIR_FL)) {
++ struct ext2_group_desc *best_desc = NULL;
++ struct buffer_head *best_bh = NULL;
++ int best_ndir = inodes_per_group;
++ int best_group = -1;
++
++ get_random_bytes(&group, sizeof(group));
++ parent_group = (unsigned)group % ngroups;
++ for (i = 0; i < ngroups; i++) {
++ group = (parent_group + i) % ngroups;
++ desc = ext2_get_group_desc (sb, group, &bh);
++ if (!desc || !desc->bg_free_inodes_count)
++ continue;
++ if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
++ continue;
++ if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
++ continue;
++ if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
++ continue;
++ best_group = group;
++ best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
++ best_desc = desc;
++ best_bh = bh;
++ }
++ if (best_group >= 0) {
++ desc = best_desc;
++ bh = best_bh;
++ group = best_group;
++ goto found;
++ }
++ goto fallback;
++ }
++
++ blocks_per_dir = (le32_to_cpu(es->s_blocks_count) -
++ le32_to_cpu(es->s_free_blocks_count)) / ndirs;
++
++ max_dirs = ndirs / ngroups + inodes_per_group / 16;
++ min_inodes = avefreei - inodes_per_group / 4;
++ min_blocks = avefreeb - EXT2_BLOCKS_PER_GROUP(sb) / 4;
++
++ max_debt = EXT2_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, BLOCK_COST);
++ if (max_debt * INODE_COST > inodes_per_group)
++ max_debt = inodes_per_group / INODE_COST;
++ if (max_debt > 255)
++ max_debt = 255;
++ if (max_debt == 0)
++ max_debt = 1;
++
++ for (i = 0; i < ngroups; i++) {
++ group = (parent_group + i) % ngroups;
++ desc = ext2_get_group_desc (sb, group, &bh);
++ if (!desc || !desc->bg_free_inodes_count)
++ continue;
++ if (sbi->s_debts[group] >= max_debt)
++ continue;
++ if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
++ continue;
++ if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
++ continue;
++ if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
++ continue;
++ goto found;
++ }
++
++fallback:
++ for (i = 0; i < ngroups; i++) {
++ group = (parent_group + i) % ngroups;
++ desc = ext2_get_group_desc (sb, group, &bh);
++ if (!desc || !desc->bg_free_inodes_count)
++ continue;
++ if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
++ goto found;
++ }
++
++ return -1;
++
++found:
++ desc->bg_free_inodes_count =
++ cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) - 1);
++ desc->bg_used_dirs_count =
++ cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) + 1);
++ sbi->s_dir_count++;
++ mark_buffer_dirty(bh);
++ return group;
++}
++
++static int find_group_other(struct super_block *sb, const struct inode *parent)
++{
++ int parent_group = parent->u.ext2_i.i_block_group;
+ int ngroups = sb->u.ext2_sb.s_groups_count;
+ struct ext2_group_desc *desc;
+ struct buffer_head *bh;
+@@ -331,10 +463,13 @@
+ lock_super (sb);
+ es = sb->u.ext2_sb.s_es;
+ repeat:
+- if (S_ISDIR(mode))
+- group = find_group_dir(sb, dir->u.ext2_i.i_block_group);
+- else
+- group = find_group_other(sb, dir->u.ext2_i.i_block_group);
++ if (S_ISDIR(mode)) {
++ if (test_opt (sb, OLDALLOC))
++ group = find_group_dir(sb, dir);
++ else
++ group = find_group_orlov(sb, dir);
++ } else
++ group = find_group_other(sb, dir);
+
+ err = -ENOSPC;
+ if (group == -1)
+@@ -368,6 +503,15 @@
+
+ es->s_free_inodes_count =
+ cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
++
++ if (S_ISDIR(mode)) {
++ if (EXT2_SB(sb)->s_debts[group] < 255)
++ EXT2_SB(sb)->s_debts[group]++;
++ } else {
++ if (EXT2_SB(sb)->s_debts[group])
++ EXT2_SB(sb)->s_debts[group]--;
++ }
++
+ mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
+ sb->s_dirt = 1;
+ inode->i_uid = current->fsuid;
+@@ -385,7 +529,7 @@
+ inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
+ inode->i_blocks = 0;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+- inode->u.ext2_i.i_new_inode = 1;
++ inode->u.ext2_i.i_state = EXT2_STATE_NEW;
+ inode->u.ext2_i.i_flags = dir->u.ext2_i.i_flags;
+ if (S_ISLNK(mode))
+ inode->u.ext2_i.i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
+@@ -469,6 +613,21 @@
+ #else
+ return le32_to_cpu(sb->u.ext2_sb.s_es->s_free_inodes_count);
+ #endif
++}
++
++/* Called at mount-time, super-block is locked */
++unsigned long ext2_count_dirs (struct super_block * sb)
++{
++ unsigned long count = 0;
++ int i;
++
++ for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
++ struct ext2_group_desc *gdp = ext2_get_group_desc (sb, i, NULL);
++ if (!gdp)
++ continue;
++ count += le16_to_cpu(gdp->bg_used_dirs_count);
++ }
++ return count;
+ }
+
+ #ifdef CONFIG_EXT2_CHECK
+diff -Nru a/fs/ext2/inode.c b/fs/ext2/inode.c
+--- a/fs/ext2/inode.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext2/inode.c Thu Nov 7 11:58:05 2002
+@@ -955,6 +955,7 @@
+ else
+ inode->u.ext2_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
+ inode->i_generation = le32_to_cpu(raw_inode->i_generation);
++ inode->u.ext2_i.i_state = 0;
+ inode->u.ext2_i.i_prealloc_count = 0;
+ inode->u.ext2_i.i_block_group = block_group;
+
+@@ -1061,6 +1062,11 @@
+ offset &= EXT2_BLOCK_SIZE(inode->i_sb) - 1;
+ raw_inode = (struct ext2_inode *) (bh->b_data + offset);
+
++ /* For fields not tracked in the in-memory inode,
++ * initialise them to zero for new inodes. */
++ if (inode->u.ext2_i.i_state & EXT2_STATE_NEW)
++ memset(raw_inode, 0, EXT2_SB(inode->i_sb)->s_inode_size);
++
+ raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+ if(!(test_opt(inode->i_sb, NO_UID32))) {
+ raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
+@@ -1133,6 +1139,7 @@
+ err = -EIO;
+ }
+ }
++ inode->u.ext2_i.i_state &= ~EXT2_STATE_NEW;
+ brelse (bh);
+ return err;
+ }
+diff -Nru a/fs/ext2/super.c b/fs/ext2/super.c
+--- a/fs/ext2/super.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext2/super.c Thu Nov 7 11:58:05 2002
+@@ -48,16 +48,12 @@
+ va_start (args, fmt);
+ vsprintf (error_buf, fmt, args);
+ va_end (args);
+- if (test_opt (sb, ERRORS_PANIC) ||
+- (le16_to_cpu(sb->u.ext2_sb.s_es->s_errors) == EXT2_ERRORS_PANIC &&
+- !test_opt (sb, ERRORS_CONT) && !test_opt (sb, ERRORS_RO)))
++ if (test_opt (sb, ERRORS_PANIC))
+ panic ("EXT2-fs panic (device %s): %s: %s\n",
+ bdevname(sb->s_dev), function, error_buf);
+ printk (KERN_CRIT "EXT2-fs error (device %s): %s: %s\n",
+ bdevname(sb->s_dev), function, error_buf);
+- if (test_opt (sb, ERRORS_RO) ||
+- (le16_to_cpu(sb->u.ext2_sb.s_es->s_errors) == EXT2_ERRORS_RO &&
+- !test_opt (sb, ERRORS_CONT) && !test_opt (sb, ERRORS_PANIC))) {
++ if (test_opt (sb, ERRORS_RO)) {
+ printk ("Remounting filesystem read-only\n");
+ sb->s_flags |= MS_RDONLY;
+ }
+@@ -136,6 +132,7 @@
+ if (sb->u.ext2_sb.s_group_desc[i])
+ brelse (sb->u.ext2_sb.s_group_desc[i]);
+ kfree(sb->u.ext2_sb.s_group_desc);
++ kfree(sb->u.ext2_sb.s_debts);
+ for (i = 0; i < EXT2_MAX_GROUP_LOADED; i++)
+ if (sb->u.ext2_sb.s_inode_bitmap[i])
+ brelse (sb->u.ext2_sb.s_inode_bitmap[i]);
+@@ -158,12 +155,61 @@
+ remount_fs: ext2_remount,
+ };
+
++static unsigned long get_sb_block(void **data)
++{
++ unsigned long sb_block;
++ char *options = (char *) *data;
++
++ if (!options || strncmp(options, "sb=", 3) != 0)
++ return 1; /* Default location */
++ options += 3;
++ sb_block = simple_strtoul(options, &options, 0);
++ if (*options && *options != ',') {
++ printk("EXT2-fs: Invalid sb specification: %s\n",
++ (char *) *data);
++ return 1;
++ }
++ if (*options == ',')
++ options++;
++ *data = (void *) options;
++ return sb_block;
++}
++
++static int want_value(char *value, char *option)
++{
++ if (!value || !*value) {
++ printk(KERN_NOTICE "EXT2-fs: the %s option needs an argument\n",
++ option);
++ return -1;
++ }
++ return 0;
++}
++
++static int want_null_value(char *value, char *option)
++{
++ if (*value) {
++ printk(KERN_NOTICE "EXT2-fs: Invalid %s argument: %s\n",
++ option, value);
++ return -1;
++ }
++ return 0;
++}
++
++static int want_numeric(char *value, char *option, unsigned long *number)
++{
++ if (want_value(value, option))
++ return -1;
++ *number = simple_strtoul(value, &value, 0);
++ if (want_null_value(value, option))
++ return -1;
++ return 0;
++}
++
+ /*
+ * This function has been shamelessly adapted from the msdos fs
+ */
+-static int parse_options (char * options, unsigned long * sb_block,
+- unsigned short *resuid, unsigned short * resgid,
+- unsigned long * mount_options)
++static int parse_options (char * options,
++ struct ext2_sb_info *sbi)
+ {
+ char * this_char;
+ char * value;
+@@ -176,22 +222,22 @@
+ if ((value = strchr (this_char, '=')) != NULL)
+ *value++ = 0;
+ if (!strcmp (this_char, "bsddf"))
+- clear_opt (*mount_options, MINIX_DF);
++ clear_opt (sbi->s_mount_opt, MINIX_DF);
+ else if (!strcmp (this_char, "nouid32")) {
+- set_opt (*mount_options, NO_UID32);
++ set_opt (sbi->s_mount_opt, NO_UID32);
+ }
+ else if (!strcmp (this_char, "check")) {
+ if (!value || !*value || !strcmp (value, "none"))
+- clear_opt (*mount_options, CHECK);
++ clear_opt (sbi->s_mount_opt, CHECK);
+ else
+ #ifdef CONFIG_EXT2_CHECK
+- set_opt (*mount_options, CHECK);
++ set_opt (sbi->s_mount_opt, CHECK);
+ #else
+ printk("EXT2 Check option not supported\n");
+ #endif
+ }
+ else if (!strcmp (this_char, "debug"))
+- set_opt (*mount_options, DEBUG);
++ set_opt (sbi->s_mount_opt, DEBUG);
+ else if (!strcmp (this_char, "errors")) {
+ if (!value || !*value) {
+ printk ("EXT2-fs: the errors option requires "
+@@ -199,19 +245,19 @@
+ return 0;
+ }
+ if (!strcmp (value, "continue")) {
+- clear_opt (*mount_options, ERRORS_RO);
+- clear_opt (*mount_options, ERRORS_PANIC);
+- set_opt (*mount_options, ERRORS_CONT);
++ clear_opt (sbi->s_mount_opt, ERRORS_RO);
++ clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
++ set_opt (sbi->s_mount_opt, ERRORS_CONT);
+ }
+ else if (!strcmp (value, "remount-ro")) {
+- clear_opt (*mount_options, ERRORS_CONT);
+- clear_opt (*mount_options, ERRORS_PANIC);
+- set_opt (*mount_options, ERRORS_RO);
++ clear_opt (sbi->s_mount_opt, ERRORS_CONT);
++ clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
++ set_opt (sbi->s_mount_opt, ERRORS_RO);
+ }
+ else if (!strcmp (value, "panic")) {
+- clear_opt (*mount_options, ERRORS_CONT);
+- clear_opt (*mount_options, ERRORS_RO);
+- set_opt (*mount_options, ERRORS_PANIC);
++ clear_opt (sbi->s_mount_opt, ERRORS_CONT);
++ clear_opt (sbi->s_mount_opt, ERRORS_RO);
++ set_opt (sbi->s_mount_opt, ERRORS_PANIC);
+ }
+ else {
+ printk ("EXT2-fs: Invalid errors option: %s\n",
+@@ -221,53 +267,30 @@
+ }
+ else if (!strcmp (this_char, "grpid") ||
+ !strcmp (this_char, "bsdgroups"))
+- set_opt (*mount_options, GRPID);
++ set_opt (sbi->s_mount_opt, GRPID);
+ else if (!strcmp (this_char, "minixdf"))
+- set_opt (*mount_options, MINIX_DF);
++ set_opt (sbi->s_mount_opt, MINIX_DF);
+ else if (!strcmp (this_char, "nocheck"))
+- clear_opt (*mount_options, CHECK);
++ clear_opt (sbi->s_mount_opt, CHECK);
+ else if (!strcmp (this_char, "nogrpid") ||
+ !strcmp (this_char, "sysvgroups"))
+- clear_opt (*mount_options, GRPID);
++ clear_opt (sbi->s_mount_opt, GRPID);
+ else if (!strcmp (this_char, "resgid")) {
+- if (!value || !*value) {
+- printk ("EXT2-fs: the resgid option requires "
+- "an argument\n");
++ unsigned long v;
++ if (want_numeric(value, "resgid", &v))
+ return 0;
+- }
+- *resgid = simple_strtoul (value, &value, 0);
+- if (*value) {
+- printk ("EXT2-fs: Invalid resgid option: %s\n",
+- value);
+- return 0;
+- }
++ sbi->s_resgid = v;
+ }
+ else if (!strcmp (this_char, "resuid")) {
+- if (!value || !*value) {
+- printk ("EXT2-fs: the resuid option requires "
+- "an argument");
++ unsigned long v;
++ if (want_numeric(value, "resuid", &v))
+ return 0;
+- }
+- *resuid = simple_strtoul (value, &value, 0);
+- if (*value) {
+- printk ("EXT2-fs: Invalid resuid option: %s\n",
+- value);
+- return 0;
+- }
+- }
+- else if (!strcmp (this_char, "sb")) {
+- if (!value || !*value) {
+- printk ("EXT2-fs: the sb option requires "
+- "an argument");
+- return 0;
+- }
+- *sb_block = simple_strtoul (value, &value, 0);
+- if (*value) {
+- printk ("EXT2-fs: Invalid sb option: %s\n",
+- value);
+- return 0;
+- }
++ sbi->s_resuid = v;
+ }
++ else if (!strcmp (this_char, "oldalloc"))
++ set_opt (sbi->s_mount_opt, OLDALLOC);
++ else if (!strcmp (this_char, "orlov"))
++ clear_opt (sbi->s_mount_opt, OLDALLOC);
+ /* Silently ignore the quota options */
+ else if (!strcmp (this_char, "grpquota")
+ || !strcmp (this_char, "noquota")
+@@ -397,17 +420,37 @@
+ return res;
+ }
+
++static unsigned long descriptor_loc(struct super_block *sb,
++ unsigned long logic_sb_block,
++ int nr)
++{
++ struct ext2_sb_info *sbi = EXT2_SB(sb);
++ unsigned long bg, first_data_block, first_meta_bg;
++ int has_super = 0;
++
++ first_data_block = le32_to_cpu(sbi->s_es->s_first_data_block);
++ first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
++
++ if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
++ nr < first_meta_bg)
++ return (logic_sb_block + nr + 1);
++ bg = sbi->s_desc_per_block * nr;
++ if (ext2_bg_has_super(sb, bg))
++ has_super = 1;
++ return (first_data_block + has_super + (bg * sbi->s_blocks_per_group));
++}
++
+ struct super_block * ext2_read_super (struct super_block * sb, void * data,
+ int silent)
+ {
+ struct buffer_head * bh;
++ struct ext2_sb_info * sbi = EXT2_SB(sb);
+ struct ext2_super_block * es;
+- unsigned long sb_block = 1;
+- unsigned short resuid = EXT2_DEF_RESUID;
+- unsigned short resgid = EXT2_DEF_RESGID;
+- unsigned long logic_sb_block = 1;
+- unsigned long offset = 0;
++ unsigned long sb_block = get_sb_block(&data);
++ unsigned long block, logic_sb_block;
++ unsigned long offset;
+ kdev_t dev = sb->s_dev;
++ unsigned long def_mount_opts;
+ int blocksize = BLOCK_SIZE;
+ int db_count;
+ int i, j;
+@@ -423,12 +466,6 @@
+ if(blocksize < BLOCK_SIZE )
+ blocksize = BLOCK_SIZE;
+
+- sb->u.ext2_sb.s_mount_opt = 0;
+- if (!parse_options ((char *) data, &sb_block, &resuid, &resgid,
+- &sb->u.ext2_sb.s_mount_opt)) {
+- return NULL;
+- }
+-
+ if (set_blocksize(dev, blocksize) < 0) {
+ printk ("EXT2-fs: unable to set blocksize %d\n", blocksize);
+ return NULL;
+@@ -436,14 +473,11 @@
+ sb->s_blocksize = blocksize;
+
+ /*
+- * If the superblock doesn't start on a sector boundary,
+- * calculate the offset. FIXME(eric) this doesn't make sense
+- * that we would have to do this.
++ * If the superblock doesn't start on a hardware sector boundary,
++ * calculate the offset.
+ */
+- if (blocksize != BLOCK_SIZE) {
+- logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
+- offset = (sb_block*BLOCK_SIZE) % blocksize;
+- }
++ logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
++ offset = (sb_block*BLOCK_SIZE) % blocksize;
+
+ if (!(bh = sb_bread(sb, logic_sb_block))) {
+ printk ("EXT2-fs: unable to read superblock\n");
+@@ -462,6 +496,27 @@
+ bdevname(dev));
+ goto failed_mount;
+ }
++
++ /* Set defaults before we parse the mount options */
++ def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
++ if (def_mount_opts & EXT2_DEFM_DEBUG)
++ set_opt(sbi->s_mount_opt, DEBUG);
++ if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
++ set_opt(sbi->s_mount_opt, GRPID);
++ if (def_mount_opts & EXT2_DEFM_UID16)
++ set_opt(sbi->s_mount_opt, NO_UID32);
++
++ if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
++ set_opt(sbi->s_mount_opt, ERRORS_PANIC);
++ else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
++ set_opt(sbi->s_mount_opt, ERRORS_RO);
++
++ sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
++ sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
++
++ if (!parse_options ((char *) data, sbi))
++ goto failed_mount;
++
+ if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
+ (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
+ EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
+@@ -522,14 +577,16 @@
+ }
+
+ if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
+- sb->u.ext2_sb.s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
+- sb->u.ext2_sb.s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
++ sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
++ sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
+ } else {
+- sb->u.ext2_sb.s_inode_size = le16_to_cpu(es->s_inode_size);
+- sb->u.ext2_sb.s_first_ino = le32_to_cpu(es->s_first_ino);
+- if (sb->u.ext2_sb.s_inode_size != EXT2_GOOD_OLD_INODE_SIZE) {
++ sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
++ sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
++ if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
++ (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
++ (sbi->s_inode_size > blocksize)) {
+ printk ("EXT2-fs: unsupported inode size: %d\n",
+- sb->u.ext2_sb.s_inode_size);
++ sbi->s_inode_size);
+ goto failed_mount;
+ }
+ }
+@@ -550,14 +607,6 @@
+ sb->u.ext2_sb.s_desc_per_block = sb->s_blocksize /
+ sizeof (struct ext2_group_desc);
+ sb->u.ext2_sb.s_sbh = bh;
+- if (resuid != EXT2_DEF_RESUID)
+- sb->u.ext2_sb.s_resuid = resuid;
+- else
+- sb->u.ext2_sb.s_resuid = le16_to_cpu(es->s_def_resuid);
+- if (resgid != EXT2_DEF_RESGID)
+- sb->u.ext2_sb.s_resgid = resgid;
+- else
+- sb->u.ext2_sb.s_resgid = le16_to_cpu(es->s_def_resgid);
+ sb->u.ext2_sb.s_mount_state = le16_to_cpu(es->s_state);
+ sb->u.ext2_sb.s_addr_per_block_bits =
+ log2 (EXT2_ADDR_PER_BLOCK(sb));
+@@ -610,14 +659,22 @@
+ printk ("EXT2-fs: not enough memory\n");
+ goto failed_mount;
+ }
++ sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
++ GFP_KERNEL);
++ if (!sbi->s_debts) {
++ printk ("EXT2-fs: not enough memory\n");
++ goto failed_mount_group_desc;
++ }
++ memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
+ for (i = 0; i < db_count; i++) {
+- sb->u.ext2_sb.s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
+- if (!sb->u.ext2_sb.s_group_desc[i]) {
++ block = descriptor_loc(sb, logic_sb_block, i);
++ sbi->s_group_desc[i] = sb_bread(sb, block);
++ if (!sbi->s_group_desc[i]) {
+ for (j = 0; j < i; j++)
+- brelse (sb->u.ext2_sb.s_group_desc[j]);
+- kfree(sb->u.ext2_sb.s_group_desc);
++ brelse (sbi->s_group_desc[j]);
++ kfree(sbi->s_group_desc);
+ printk ("EXT2-fs: unable to read group descriptors\n");
+- goto failed_mount;
++ goto failed_mount_group_desc;
+ }
+ }
+ if (!ext2_check_descriptors (sb)) {
+@@ -634,6 +691,7 @@
+ sb->u.ext2_sb.s_loaded_inode_bitmaps = 0;
+ sb->u.ext2_sb.s_loaded_block_bitmaps = 0;
+ sb->u.ext2_sb.s_gdb_count = db_count;
++ sb->u.ext2_sb.s_dir_count = ext2_count_dirs(sb);
+ /*
+ * set up enough so that it can read an inode
+ */
+@@ -654,7 +712,10 @@
+ failed_mount2:
+ for (i = 0; i < db_count; i++)
+ brelse(sb->u.ext2_sb.s_group_desc[i]);
++failed_mount_group_desc:
+ kfree(sb->u.ext2_sb.s_group_desc);
++ if (sb->u.ext2_sb.s_debts)
++ kfree(sb->u.ext2_sb.s_debts);
+ failed_mount:
+ brelse(bh);
+ return NULL;
+@@ -709,24 +770,16 @@
+
+ int ext2_remount (struct super_block * sb, int * flags, char * data)
+ {
++ struct ext2_sb_info * sbi = EXT2_SB(sb);
+ struct ext2_super_block * es;
+- unsigned short resuid = sb->u.ext2_sb.s_resuid;
+- unsigned short resgid = sb->u.ext2_sb.s_resgid;
+- unsigned long new_mount_opt;
+- unsigned long tmp;
+
+ /*
+ * Allow the "check" option to be passed as a remount option.
+ */
+- new_mount_opt = sb->u.ext2_sb.s_mount_opt;
+- if (!parse_options (data, &tmp, &resuid, &resgid,
+- &new_mount_opt))
++ if (!parse_options (data, sbi))
+ return -EINVAL;
+
+- sb->u.ext2_sb.s_mount_opt = new_mount_opt;
+- sb->u.ext2_sb.s_resuid = resuid;
+- sb->u.ext2_sb.s_resgid = resgid;
+- es = sb->u.ext2_sb.s_es;
++ es = sbi->s_es;
+ if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+ return 0;
+ if (*flags & MS_RDONLY) {
+diff -Nru a/fs/ext3/Makefile b/fs/ext3/Makefile
+--- a/fs/ext3/Makefile Thu Nov 7 11:58:05 2002
++++ b/fs/ext3/Makefile Thu Nov 7 11:58:05 2002
+@@ -10,7 +10,7 @@
+ O_TARGET := ext3.o
+
+ obj-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+- ioctl.o namei.o super.o symlink.o
++ ioctl.o namei.o super.o symlink.o hash.o
+ obj-m := $(O_TARGET)
+
+ include $(TOPDIR)/Rules.make
+diff -Nru a/fs/ext3/dir.c b/fs/ext3/dir.c
+--- a/fs/ext3/dir.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext3/dir.c Thu Nov 7 11:58:05 2002
+@@ -21,12 +21,16 @@
+ #include <linux/fs.h>
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
++#include <linux/slab.h>
++#include <linux/rbtree.h>
+
+ static unsigned char ext3_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+ static int ext3_readdir(struct file *, void *, filldir_t);
++static int ext3_dx_readdir(struct file * filp,
++ void * dirent, filldir_t filldir);
+
+ struct file_operations ext3_dir_operations = {
+ read: generic_read_dir,
+@@ -35,6 +39,17 @@
+ fsync: ext3_sync_file, /* BKL held */
+ };
+
++
++static unsigned char get_dtype(struct super_block *sb, int filetype)
++{
++ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
++ (filetype >= EXT3_FT_MAX))
++ return DT_UNKNOWN;
++
++ return (ext3_filetype_table[filetype]);
++}
++
++
+ int ext3_check_dir_entry (const char * function, struct inode * dir,
+ struct ext3_dir_entry_2 * de,
+ struct buffer_head * bh,
+@@ -79,6 +94,16 @@
+
+ sb = inode->i_sb;
+
++ if (is_dx(inode)) {
++ err = ext3_dx_readdir(filp, dirent, filldir);
++ if (err != ERR_BAD_DX_DIR)
++ return err;
++ /*
++ * We don't set the inode dirty flag since it's not
++ * critical that it get flushed back to the disk.
++ */
++ EXT3_I(filp->f_dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL;
++ }
+ stored = 0;
+ bh = NULL;
+ offset = filp->f_pos & (sb->s_blocksize - 1);
+@@ -162,18 +187,12 @@
+ * during the copy operation.
+ */
+ unsigned long version = filp->f_version;
+- unsigned char d_type = DT_UNKNOWN;
+
+- if (EXT3_HAS_INCOMPAT_FEATURE(sb,
+- EXT3_FEATURE_INCOMPAT_FILETYPE)
+- && de->file_type < EXT3_FT_MAX)
+- d_type =
+- ext3_filetype_table[de->file_type];
+ error = filldir(dirent, de->name,
+ de->name_len,
+ filp->f_pos,
+ le32_to_cpu(de->inode),
+- d_type);
++ get_dtype(sb, de->file_type));
+ if (error)
+ break;
+ if (version != filp->f_version)
+@@ -188,3 +207,272 @@
+ UPDATE_ATIME(inode);
+ return 0;
+ }
++
++#ifdef CONFIG_EXT3_INDEX
++/*
++ * These functions convert from the major/minor hash to an f_pos
++ * value.
++ *
++ * Currently we only use major hash numer. This is unfortunate, but
++ * on 32-bit machines, the same VFS interface is used for lseek and
++ * llseek, so if we use the 64 bit offset, then the 32-bit versions of
++ * lseek/telldir/seekdir will blow out spectacularly, and from within
++ * the ext2 low-level routine, we don't know if we're being called by
++ * a 64-bit version of the system call or the 32-bit version of the
++ * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
++ * cookie. Sigh.
++ */
++#define hash2pos(major, minor) (major >> 1)
++#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
++#define pos2min_hash(pos) (0)
++
++/*
++ * This structure holds the nodes of the red-black tree used to store
++ * the directory entry in hash order.
++ */
++struct fname {
++ __u32 hash;
++ __u32 minor_hash;
++ rb_node_t rb_hash;
++ struct fname *next;
++ __u32 inode;
++ __u8 name_len;
++ __u8 file_type;
++ char name[0];
++};
++
++/*
++ * This functoin implements a non-recursive way of freeing all of the
++ * nodes in the red-black tree.
++ */
++static void free_rb_tree_fname(rb_root_t *root)
++{
++ rb_node_t *n = root->rb_node;
++ rb_node_t *parent;
++ struct fname *fname;
++
++ while (n) {
++ /* Do the node's children first */
++ if ((n)->rb_left) {
++ n = n->rb_left;
++ continue;
++ }
++ if (n->rb_right) {
++ n = n->rb_right;
++ continue;
++ }
++ /*
++ * The node has no children; free it, and then zero
++ * out parent's link to it. Finally go to the
++ * beginning of the loop and try to free the parent
++ * node.
++ */
++ parent = n->rb_parent;
++ fname = rb_entry(n, struct fname, rb_hash);
++ kfree(fname);
++ if (!parent)
++ root->rb_node = 0;
++ else if (parent->rb_left == n)
++ parent->rb_left = 0;
++ else if (parent->rb_right == n)
++ parent->rb_right = 0;
++ n = parent;
++ }
++ root->rb_node = 0;
++}
++
++
++struct dir_private_info *create_dir_info(loff_t pos)
++{
++ struct dir_private_info *p;
++
++ p = kmalloc(sizeof(struct dir_private_info), GFP_KERNEL);
++ if (!p)
++ return NULL;
++ p->root.rb_node = 0;
++ p->curr_node = 0;
++ p->extra_fname = 0;
++ p->last_pos = 0;
++ p->curr_hash = pos2maj_hash(pos);
++ p->curr_minor_hash = pos2min_hash(pos);
++ p->next_hash = 0;
++ return p;
++}
++
++void ext3_htree_free_dir_info(struct dir_private_info *p)
++{
++ free_rb_tree_fname(&p->root);
++ kfree(p);
++}
++
++/*
++ * Given a directory entry, enter it into the fname rb tree.
++ */
++int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
++ __u32 minor_hash,
++ struct ext3_dir_entry_2 *dirent)
++{
++ rb_node_t **p, *parent = NULL;
++ struct fname * fname, *new_fn;
++ struct dir_private_info *info;
++ int len;
++
++ info = (struct dir_private_info *) dir_file->private_data;
++ p = &info->root.rb_node;
++
++ /* Create and allocate the fname structure */
++ len = sizeof(struct fname) + dirent->name_len + 1;
++ new_fn = kmalloc(len, GFP_KERNEL);
++ if (!new_fn)
++ return -ENOMEM;
++ memset(new_fn, 0, len);
++ new_fn->hash = hash;
++ new_fn->minor_hash = minor_hash;
++ new_fn->inode = le32_to_cpu(dirent->inode);
++ new_fn->name_len = dirent->name_len;
++ new_fn->file_type = dirent->file_type;
++ memcpy(new_fn->name, dirent->name, dirent->name_len);
++ new_fn->name[dirent->name_len] = 0;
++
++ while (*p) {
++ parent = *p;
++ fname = rb_entry(parent, struct fname, rb_hash);
++
++ /*
++ * If the hash and minor hash match up, then we put
++ * them on a linked list. This rarely happens...
++ */
++ if ((new_fn->hash == fname->hash) &&
++ (new_fn->minor_hash == fname->minor_hash)) {
++ new_fn->next = fname->next;
++ fname->next = new_fn;
++ return 0;
++ }
++
++ if (new_fn->hash < fname->hash)
++ p = &(*p)->rb_left;
++ else if (new_fn->hash > fname->hash)
++ p = &(*p)->rb_right;
++ else if (new_fn->minor_hash < fname->minor_hash)
++ p = &(*p)->rb_left;
++ else /* if (new_fn->minor_hash > fname->minor_hash) */
++ p = &(*p)->rb_right;
++ }
++
++ rb_link_node(&new_fn->rb_hash, parent, p);
++ rb_insert_color(&new_fn->rb_hash, &info->root);
++ return 0;
++}
++
++
++
++/*
++ * This is a helper function for ext3_dx_readdir. It calls filldir
++ * for all entres on the fname linked list. (Normally there is only
++ * one entry on the linked list, unless there are 62 bit hash collisions.)
++ */
++static int call_filldir(struct file * filp, void * dirent,
++ filldir_t filldir, struct fname *fname)
++{
++ struct dir_private_info *info = filp->private_data;
++ loff_t curr_pos;
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct super_block * sb;
++ int error;
++
++ sb = inode->i_sb;
++
++ if (!fname) {
++ printk("call_filldir: called with null fname?!?\n");
++ return 0;
++ }
++ curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ while (fname) {
++ error = filldir(dirent, fname->name,
++ fname->name_len, curr_pos,
++ fname->inode,
++ get_dtype(sb, fname->file_type));
++ if (error) {
++ filp->f_pos = curr_pos;
++ info->extra_fname = fname->next;
++ return error;
++ }
++ fname = fname->next;
++ }
++ return 0;
++}
++
++static int ext3_dx_readdir(struct file * filp,
++ void * dirent, filldir_t filldir)
++{
++ struct dir_private_info *info = filp->private_data;
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct fname *fname;
++ int ret;
++
++ if (!info) {
++ info = create_dir_info(filp->f_pos);
++ if (!info)
++ return -ENOMEM;
++ filp->private_data = info;
++ }
++
++ /* Some one has messed with f_pos; reset the world */
++ if (info->last_pos != filp->f_pos) {
++ free_rb_tree_fname(&info->root);
++ info->curr_node = 0;
++ info->extra_fname = 0;
++ info->curr_hash = pos2maj_hash(filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ }
++
++ /*
++ * If there are any leftover names on the hash collision
++ * chain, return them first.
++ */
++ if (info->extra_fname &&
++ call_filldir(filp, dirent, filldir, info->extra_fname))
++ goto finished;
++
++ if (!info->curr_node)
++ info->curr_node = rb_get_first(&info->root);
++
++ while (1) {
++ /*
++ * Fill the rbtree if we have no more entries,
++ * or the inode has changed since we last read in the
++ * cached entries.
++ */
++ if ((!info->curr_node) ||
++ (filp->f_version != inode->i_version)) {
++ info->curr_node = 0;
++ free_rb_tree_fname(&info->root);
++ filp->f_version = inode->i_version;
++ ret = ext3_htree_fill_tree(filp, info->curr_hash,
++ info->curr_minor_hash,
++ &info->next_hash);
++ if (ret < 0)
++ return ret;
++ if (ret == 0)
++ break;
++ info->curr_node = rb_get_first(&info->root);
++ }
++
++ fname = rb_entry(info->curr_node, struct fname, rb_hash);
++ info->curr_hash = fname->hash;
++ info->curr_minor_hash = fname->minor_hash;
++ if (call_filldir(filp, dirent, filldir, fname))
++ break;
++
++ info->curr_node = rb_get_next(info->curr_node);
++ if (!info->curr_node) {
++ info->curr_hash = info->next_hash;
++ info->curr_minor_hash = 0;
++ }
++ }
++finished:
++ info->last_pos = filp->f_pos;
++ UPDATE_ATIME(inode);
++ return 0;
++}
++#endif
+diff -Nru a/fs/ext3/file.c b/fs/ext3/file.c
+--- a/fs/ext3/file.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext3/file.c Thu Nov 7 11:58:05 2002
+@@ -35,6 +35,9 @@
+ {
+ if (filp->f_mode & FMODE_WRITE)
+ ext3_discard_prealloc (inode);
++ if (is_dx(inode) && filp->private_data)
++ ext3_htree_free_dir_info(filp->private_data);
++
+ return 0;
+ }
+
+diff -Nru a/fs/ext3/hash.c b/fs/ext3/hash.c
+--- /dev/null Wed Dec 31 16:00:00 1969
++++ b/fs/ext3/hash.c Thu Nov 7 11:58:05 2002
+@@ -0,0 +1,215 @@
++/*
++ * linux/fs/ext3/hash.c
++ *
++ * Copyright (C) 2002 by Theodore Ts'o
++ *
++ * This file is released under the GPL v2.
++ *
++ * This file may be redistributed under the terms of the GNU Public
++ * License.
++ */
++
++#include <linux/fs.h>
++#include <linux/jbd.h>
++#include <linux/sched.h>
++#include <linux/ext3_fs.h>
++
++#define DELTA 0x9E3779B9
++
++static void TEA_transform(__u32 buf[4], __u32 const in[])
++{
++ __u32 sum = 0;
++ __u32 b0 = buf[0], b1 = buf[1];
++ __u32 a = in[0], b = in[1], c = in[2], d = in[3];
++ int n = 16;
++
++ do {
++ sum += DELTA;
++ b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
++ b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
++ } while(--n);
++
++ buf[0] += b0;
++ buf[1] += b1;
++}
++
++/* F, G and H are basic MD4 functions: selection, majority, parity */
++#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
++#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
++#define H(x, y, z) ((x) ^ (y) ^ (z))
++
++/*
++ * The generic round function. The application is so specific that
++ * we don't bother protecting all the arguments with parens, as is generally
++ * good macro practice, in favor of extra legibility.
++ * Rotation is separate from addition to prevent recomputation
++ */
++#define ROUND(f, a, b, c, d, x, s) \
++ (a += f(b, c, d) + x, a = (a << s) | (a >> (32-s)))
++#define K1 0
++#define K2 013240474631UL
++#define K3 015666365641UL
++
++/*
++ * Basic cut-down MD4 transform. Returns only 32 bits of result.
++ */
++static void halfMD4Transform (__u32 buf[4], __u32 const in[])
++{
++ __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
++
++ /* Round 1 */
++ ROUND(F, a, b, c, d, in[0] + K1, 3);
++ ROUND(F, d, a, b, c, in[1] + K1, 7);
++ ROUND(F, c, d, a, b, in[2] + K1, 11);
++ ROUND(F, b, c, d, a, in[3] + K1, 19);
++ ROUND(F, a, b, c, d, in[4] + K1, 3);
++ ROUND(F, d, a, b, c, in[5] + K1, 7);
++ ROUND(F, c, d, a, b, in[6] + K1, 11);
++ ROUND(F, b, c, d, a, in[7] + K1, 19);
++
++ /* Round 2 */
++ ROUND(G, a, b, c, d, in[1] + K2, 3);
++ ROUND(G, d, a, b, c, in[3] + K2, 5);
++ ROUND(G, c, d, a, b, in[5] + K2, 9);
++ ROUND(G, b, c, d, a, in[7] + K2, 13);
++ ROUND(G, a, b, c, d, in[0] + K2, 3);
++ ROUND(G, d, a, b, c, in[2] + K2, 5);
++ ROUND(G, c, d, a, b, in[4] + K2, 9);
++ ROUND(G, b, c, d, a, in[6] + K2, 13);
++
++ /* Round 3 */
++ ROUND(H, a, b, c, d, in[3] + K3, 3);
++ ROUND(H, d, a, b, c, in[7] + K3, 9);
++ ROUND(H, c, d, a, b, in[2] + K3, 11);
++ ROUND(H, b, c, d, a, in[6] + K3, 15);
++ ROUND(H, a, b, c, d, in[1] + K3, 3);
++ ROUND(H, d, a, b, c, in[5] + K3, 9);
++ ROUND(H, c, d, a, b, in[0] + K3, 11);
++ ROUND(H, b, c, d, a, in[4] + K3, 15);
++
++ buf[0] += a;
++ buf[1] += b;
++ buf[2] += c;
++ buf[3] += d;
++}
++
++#undef ROUND
++#undef F
++#undef G
++#undef H
++#undef K1
++#undef K2
++#undef K3
++
++/* The old legacy hash */
++static __u32 dx_hack_hash (const char *name, int len)
++{
++ __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
++ while (len--) {
++ __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
++
++ if (hash & 0x80000000) hash -= 0x7fffffff;
++ hash1 = hash0;
++ hash0 = hash;
++ }
++ return (hash0 << 1);
++}
++
++static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
++{
++ __u32 pad, val;
++ int i;
++
++ pad = (__u32)len | ((__u32)len << 8);
++ pad |= pad << 16;
++
++ val = pad;
++ if (len > num*4)
++ len = num * 4;
++ for (i=0; i < len; i++) {
++ if ((i % 4) == 0)
++ val = pad;
++ val = msg[i] + (val << 8);
++ if ((i % 4) == 3) {
++ *buf++ = val;
++ val = pad;
++ num--;
++ }
++ }
++ if (--num >= 0)
++ *buf++ = val;
++ while (--num >= 0)
++ *buf++ = pad;
++}
++
++/*
++ * Returns the hash of a filename. If len is 0 and name is NULL, then
++ * this function can be used to test whether or not a hash version is
++ * supported.
++ *
++ * The seed is an 4 longword (32 bits) "secret" which can be used to
++ * uniquify a hash. If the seed is all zero's, then some default seed
++ * may be used.
++ *
++ * A particular hash version specifies whether or not the seed is
++ * represented, and whether or not the returned hash is 32 bits or 64
++ * bits. 32 bit hashes will return 0 for the minor hash.
++ */
++int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
++{
++ __u32 hash;
++ __u32 minor_hash = 0;
++ const char *p;
++ int i;
++ __u32 in[8], buf[4];
++
++ /* Initialize the default seed for the hash checksum functions */
++ buf[0] = 0x67452301;
++ buf[1] = 0xefcdab89;
++ buf[2] = 0x98badcfe;
++ buf[3] = 0x10325476;
++
++ /* Check to see if the seed is all zero's */
++ if (hinfo->seed) {
++ for (i=0; i < 4; i++) {
++ if (hinfo->seed[i])
++ break;
++ }
++ if (i < 4)
++ memcpy(buf, hinfo->seed, sizeof(buf));
++ }
++
++ switch (hinfo->hash_version) {
++ case DX_HASH_LEGACY:
++ hash = dx_hack_hash(name, len);
++ break;
++ case DX_HASH_HALF_MD4:
++ p = name;
++ while (len > 0) {
++ str2hashbuf(p, len, in, 8);
++ halfMD4Transform(buf, in);
++ len -= 32;
++ p += 32;
++ }
++ minor_hash = buf[2];
++ hash = buf[1];
++ break;
++ case DX_HASH_TEA:
++ p = name;
++ while (len > 0) {
++ str2hashbuf(p, len, in, 4);
++ TEA_transform(buf, in);
++ len -= 16;
++ p += 16;
++ }
++ hash = buf[0];
++ minor_hash = buf[1];
++ break;
++ default:
++ hinfo->hash = 0;
++ return -1;
++ }
++ hinfo->hash = hash & ~1;
++ hinfo->minor_hash = minor_hash;
++ return 0;
++}
+diff -Nru a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
+--- a/fs/ext3/ialloc.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext3/ialloc.c Thu Nov 7 11:58:05 2002
+@@ -21,6 +21,7 @@
+ #include <linux/string.h>
+ #include <linux/locks.h>
+ #include <linux/quotaops.h>
++#include <linux/random.h>
+
+ #include <asm/bitops.h>
+ #include <asm/byteorder.h>
+@@ -293,6 +294,199 @@
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
++ * For other inodes, search forward from the parent directory\'s block
++ * group to find a free inode.
++ */
++static int find_group_dir(struct super_block *sb, const struct inode *parent)
++{
++ struct ext3_super_block * es = EXT3_SB(sb)->s_es;
++ int ngroups = EXT3_SB(sb)->s_groups_count;
++ int avefreei = le32_to_cpu(es->s_free_inodes_count) / ngroups;
++ struct ext3_group_desc *desc, *best_desc = NULL;
++ struct buffer_head *bh;
++ int group, best_group = -1;
++
++ for (group = 0; group < ngroups; group++) {
++ desc = ext3_get_group_desc (sb, group, &bh);
++ if (!desc || !desc->bg_free_inodes_count)
++ continue;
++ if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
++ continue;
++ if (!best_desc ||
++ (le16_to_cpu(desc->bg_free_blocks_count) >
++ le16_to_cpu(best_desc->bg_free_blocks_count))) {
++ best_group = group;
++ best_desc = desc;
++ }
++ }
++ return best_group;
++}
++
++/*
++ * Orlov's allocator for directories.
++ *
++ * We always try to spread first-level directories.
++ *
++ * If there are blockgroups with both free inodes and free blocks counts
++ * not worse than average we return one with smallest directory count.
++ * Otherwise we simply return a random group.
++ *
++ * For the rest rules look so:
++ *
++ * It's OK to put directory into a group unless
++ * it has too many directories already (max_dirs) or
++ * it has too few free inodes left (min_inodes) or
++ * it has too few free blocks left (min_blocks) or
++ * it's already running too large debt (max_debt).
++ * Parent's group is prefered, if it doesn't satisfy these
++ * conditions we search cyclically through the rest. If none
++ * of the groups look good we just look for a group with more
++ * free inodes than average (starting at parent's group).
++ *
++ * Debt is incremented each time we allocate a directory and decremented
++ * when we allocate an inode, within 0--255.
++ */
++
++#define INODE_COST 64
++#define BLOCK_COST 256
++
++static int find_group_orlov(struct super_block *sb, const struct inode *parent)
++{
++ int parent_group = EXT3_I(parent)->i_block_group;
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
++ struct ext3_super_block *es = sbi->s_es;
++ int ngroups = sbi->s_groups_count;
++ int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
++ int avefreei = le32_to_cpu(es->s_free_inodes_count) / ngroups;
++ int avefreeb = le32_to_cpu(es->s_free_blocks_count) / ngroups;
++ int blocks_per_dir;
++ int ndirs = sbi->s_dir_count;
++ int max_debt, max_dirs, min_blocks, min_inodes;
++ int group = -1, i;
++ struct ext3_group_desc *desc;
++ struct buffer_head *bh;
++
++ if ((parent == sb->s_root->d_inode) ||
++ (parent->i_flags & EXT3_TOPDIR_FL)) {
++ int best_ndir = inodes_per_group;
++ int best_group = -1;
++
++ get_random_bytes(&group, sizeof(group));
++ parent_group = (unsigned)group % ngroups;
++ for (i = 0; i < ngroups; i++) {
++ group = (parent_group + i) % ngroups;
++ desc = ext3_get_group_desc (sb, group, &bh);
++ if (!desc || !desc->bg_free_inodes_count)
++ continue;
++ if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
++ continue;
++ if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
++ continue;
++ if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
++ continue;
++ best_group = group;
++ best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
++ }
++ if (best_group >= 0)
++ return best_group;
++ goto fallback;
++ }
++
++ blocks_per_dir = (le32_to_cpu(es->s_blocks_count) -
++ le32_to_cpu(es->s_free_blocks_count)) / ndirs;
++
++ max_dirs = ndirs / ngroups + inodes_per_group / 16;
++ min_inodes = avefreei - inodes_per_group / 4;
++ min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
++
++ max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, BLOCK_COST);
++ if (max_debt * INODE_COST > inodes_per_group)
++ max_debt = inodes_per_group / INODE_COST;
++ if (max_debt > 255)
++ max_debt = 255;
++ if (max_debt == 0)
++ max_debt = 1;
++
++ for (i = 0; i < ngroups; i++) {
++ group = (parent_group + i) % ngroups;
++ desc = ext3_get_group_desc (sb, group, &bh);
++ if (!desc || !desc->bg_free_inodes_count)
++ continue;
++ if (sbi->s_debts[group] >= max_debt)
++ continue;
++ if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
++ continue;
++ if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
++ continue;
++ if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
++ continue;
++ return group;
++ }
++
++fallback:
++ for (i = 0; i < ngroups; i++) {
++ group = (parent_group + i) % ngroups;
++ desc = ext3_get_group_desc (sb, group, &bh);
++ if (!desc || !desc->bg_free_inodes_count)
++ continue;
++ if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
++ return group;
++ }
++
++ return -1;
++}
++
++static int find_group_other(struct super_block *sb, const struct inode *parent)
++{
++ int parent_group = EXT3_I(parent)->i_block_group;
++ int ngroups = EXT3_SB(sb)->s_groups_count;
++ struct ext3_group_desc *desc;
++ struct buffer_head *bh;
++ int group, i;
++
++ /*
++ * Try to place the inode in its parent directory
++ */
++ group = parent_group;
++ desc = ext3_get_group_desc (sb, group, &bh);
++ if (desc && le16_to_cpu(desc->bg_free_inodes_count))
++ return group;
++
++ /*
++ * Use a quadratic hash to find a group with a
++ * free inode
++ */
++ for (i = 1; i < ngroups; i <<= 1) {
++ group += i;
++ if (group >= ngroups)
++ group -= ngroups;
++ desc = ext3_get_group_desc (sb, group, &bh);
++ if (desc && le16_to_cpu(desc->bg_free_inodes_count))
++ return group;
++ }
++
++ /*
++ * That failed: try linear search for a free inode
++ */
++ group = parent_group + 1;
++ for (i = 2; i < ngroups; i++) {
++ if (++group >= ngroups)
++ group = 0;
++ desc = ext3_get_group_desc (sb, group, &bh);
++ if (desc && le16_to_cpu(desc->bg_free_inodes_count))
++ return group;
++ }
++
++ return -1;
++}
++
++/*
++ * There are two policies for allocating an inode. If the new inode is
++ * a directory, then a forward search is made for a block group with both
++ * free space and a low directory-to-inode ratio; if that fails, then of
++ * the groups with above-average free space, that group with the fewest
++ * directories already is chosen.
++ *
+ * For other inodes, search forward from the parent directory's block
+ * group to find a free inode.
+ */
+@@ -302,11 +496,11 @@
+ struct super_block * sb;
+ struct buffer_head * bh;
+ struct buffer_head * bh2;
+- int i, j, avefreei;
++ int group;
++ ino_t ino;
+ struct inode * inode;
+ int bitmap_nr;
+ struct ext3_group_desc * gdp;
+- struct ext3_group_desc * tmp;
+ struct ext3_super_block * es;
+ int err = 0;
+
+@@ -323,94 +517,36 @@
+ lock_super (sb);
+ es = sb->u.ext3_sb.s_es;
+ repeat:
+- gdp = NULL;
+- i = 0;
+-
+ if (S_ISDIR(mode)) {
+- avefreei = le32_to_cpu(es->s_free_inodes_count) /
+- sb->u.ext3_sb.s_groups_count;
+- if (!gdp) {
+- for (j = 0; j < sb->u.ext3_sb.s_groups_count; j++) {
+- struct buffer_head *temp_buffer;
+- tmp = ext3_get_group_desc (sb, j, &temp_buffer);
+- if (tmp &&
+- le16_to_cpu(tmp->bg_free_inodes_count) &&
+- le16_to_cpu(tmp->bg_free_inodes_count) >=
+- avefreei) {
+- if (!gdp || (le16_to_cpu(tmp->bg_free_blocks_count) >
+- le16_to_cpu(gdp->bg_free_blocks_count))) {
+- i = j;
+- gdp = tmp;
+- bh2 = temp_buffer;
+- }
+- }
+- }
+- }
+- } else {
+- /*
+- * Try to place the inode in its parent directory
+- */
+- i = dir->u.ext3_i.i_block_group;
+- tmp = ext3_get_group_desc (sb, i, &bh2);
+- if (tmp && le16_to_cpu(tmp->bg_free_inodes_count))
+- gdp = tmp;
++ if (test_opt (sb, OLDALLOC))
++ group = find_group_dir(sb, dir);
+ else
+- {
+- /*
+- * Use a quadratic hash to find a group with a
+- * free inode
+- */
+- for (j = 1; j < sb->u.ext3_sb.s_groups_count; j <<= 1) {
+- i += j;
+- if (i >= sb->u.ext3_sb.s_groups_count)
+- i -= sb->u.ext3_sb.s_groups_count;
+- tmp = ext3_get_group_desc (sb, i, &bh2);
+- if (tmp &&
+- le16_to_cpu(tmp->bg_free_inodes_count)) {
+- gdp = tmp;
+- break;
+- }
+- }
+- }
+- if (!gdp) {
+- /*
+- * That failed: try linear search for a free inode
+- */
+- i = dir->u.ext3_i.i_block_group + 1;
+- for (j = 2; j < sb->u.ext3_sb.s_groups_count; j++) {
+- if (++i >= sb->u.ext3_sb.s_groups_count)
+- i = 0;
+- tmp = ext3_get_group_desc (sb, i, &bh2);
+- if (tmp &&
+- le16_to_cpu(tmp->bg_free_inodes_count)) {
+- gdp = tmp;
+- break;
+- }
+- }
+- }
+- }
+-
++ group = find_group_orlov(sb, dir);
++ } else
++ group = find_group_other(sb, dir);
++
+ err = -ENOSPC;
+- if (!gdp)
++ if (!group == -1)
+ goto out;
+
+ err = -EIO;
+- bitmap_nr = load_inode_bitmap (sb, i);
++ bitmap_nr = load_inode_bitmap (sb, group);
+ if (bitmap_nr < 0)
+ goto fail;
+
+ bh = sb->u.ext3_sb.s_inode_bitmap[bitmap_nr];
++ gdp = ext3_get_group_desc (sb, group, &bh2);
+
+- if ((j = ext3_find_first_zero_bit ((unsigned long *) bh->b_data,
++ if ((ino = ext3_find_first_zero_bit ((unsigned long *) bh->b_data,
+ EXT3_INODES_PER_GROUP(sb))) <
+ EXT3_INODES_PER_GROUP(sb)) {
+ BUFFER_TRACE(bh, "get_write_access");
+ err = ext3_journal_get_write_access(handle, bh);
+ if (err) goto fail;
+
+- if (ext3_set_bit (j, bh->b_data)) {
++ if (ext3_set_bit (ino, bh->b_data)) {
+ ext3_error (sb, "ext3_new_inode",
+- "bit already set for inode %d", j);
++ "bit already set for inode %lu", ino);
+ goto repeat;
+ }
+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+@@ -420,7 +556,7 @@
+ if (le16_to_cpu(gdp->bg_free_inodes_count) != 0) {
+ ext3_error (sb, "ext3_new_inode",
+ "Free inodes count corrupted in group %d",
+- i);
++ group);
+ /* Is it really ENOSPC? */
+ err = -ENOSPC;
+ if (sb->s_flags & MS_RDONLY)
+@@ -436,11 +572,11 @@
+ }
+ goto repeat;
+ }
+- j += i * EXT3_INODES_PER_GROUP(sb) + 1;
+- if (j < EXT3_FIRST_INO(sb) || j > le32_to_cpu(es->s_inodes_count)) {
++ ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
++ if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+ ext3_error (sb, "ext3_new_inode",
+ "reserved inode or inode > inodes count - "
+- "block_group = %d,inode=%d", i, j);
++ "block_group = %d, inode=%lu", group, ino);
+ err = -EIO;
+ goto fail;
+ }
+@@ -450,9 +586,11 @@
+ if (err) goto fail;
+ gdp->bg_free_inodes_count =
+ cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+- if (S_ISDIR(mode))
++ if (S_ISDIR(mode)) {
+ gdp->bg_used_dirs_count =
+ cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
++ EXT3_SB(sb)->s_dir_count++;
++ }
+ BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
+ err = ext3_journal_dirty_metadata(handle, bh2);
+ if (err) goto fail;
+@@ -478,7 +616,7 @@
+ inode->i_gid = current->fsgid;
+ inode->i_mode = mode;
+
+- inode->i_ino = j;
++ inode->i_ino = ino;
+ /* This is the optimal IO size (for stat), not the fs block size */
+ inode->i_blksize = PAGE_SIZE;
+ inode->i_blocks = 0;
+@@ -498,7 +636,7 @@
+ #ifdef EXT3_PREALLOCATE
+ inode->u.ext3_i.i_prealloc_count = 0;
+ #endif
+- inode->u.ext3_i.i_block_group = i;
++ inode->u.ext3_i.i_block_group = group;
+
+ if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+@@ -620,6 +758,21 @@
+ #else
+ return le32_to_cpu(sb->u.ext3_sb.s_es->s_free_inodes_count);
+ #endif
++}
++
++/* Called at mount-time, super-block is locked */
++unsigned long ext3_count_dirs (struct super_block * sb)
++{
++ unsigned long count = 0;
++ int i;
++
++ for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
++ struct ext3_group_desc *gdp = ext3_get_group_desc (sb, i, NULL);
++ if (!gdp)
++ continue;
++ count += le16_to_cpu(gdp->bg_used_dirs_count);
++ }
++ return count;
+ }
+
+ #ifdef CONFIG_EXT3_CHECK
+diff -Nru a/fs/ext3/inode.c b/fs/ext3/inode.c
+--- a/fs/ext3/inode.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext3/inode.c Thu Nov 7 11:58:05 2002
+@@ -2194,6 +2194,11 @@
+ if (err)
+ goto out_brelse;
+ }
++ /* For fields not not tracking in the in-memory inode,
++ * initialise them to zero for new inodes. */
++ if (EXT3_I(inode)->i_state & EXT3_STATE_NEW)
++ memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
++
+ raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+ if(!(test_opt(inode->i_sb, NO_UID32))) {
+ raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
+@@ -2231,15 +2236,6 @@
+ raw_inode->i_faddr = cpu_to_le32(inode->u.ext3_i.i_faddr);
+ raw_inode->i_frag = inode->u.ext3_i.i_frag_no;
+ raw_inode->i_fsize = inode->u.ext3_i.i_frag_size;
+-#else
+- /* If we are not tracking these fields in the in-memory inode,
+- * then preserve them on disk, but still initialise them to zero
+- * for new inodes. */
+- if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
+- raw_inode->i_faddr = 0;
+- raw_inode->i_frag = 0;
+- raw_inode->i_fsize = 0;
+- }
+ #endif
+ raw_inode->i_file_acl = cpu_to_le32(inode->u.ext3_i.i_file_acl);
+ if (!S_ISREG(inode->i_mode)) {
+diff -Nru a/fs/ext3/namei.c b/fs/ext3/namei.c
+--- a/fs/ext3/namei.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext3/namei.c Thu Nov 7 11:58:05 2002
+@@ -16,6 +16,12 @@
+ * David S. Miller (davem@caip.rutgers.edu), 1995
+ * Directory entry file type support and forward compatibility hooks
+ * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
++ * Hash Tree Directory indexing (c)
++ * Daniel Phillips, 2001
++ * Hash Tree Directory indexing porting
++ * Christopher Li, 2002
++ * Hash Tree Directory indexing cleanup
++ * Theodore Ts'o, 2002
+ */
+
+ #include <linux/fs.h>
+@@ -38,6 +44,642 @@
+ #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+ #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
+
++static struct buffer_head *ext3_append(handle_t *handle,
++ struct inode *inode,
++ u32 *block, int *err)
++{
++ struct buffer_head *bh;
++
++ *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
++
++ if ((bh = ext3_bread(handle, inode, *block, 1, err))) {
++ inode->i_size += inode->i_sb->s_blocksize;
++ EXT3_I(inode)->i_disksize = inode->i_size;
++ ext3_journal_get_write_access(handle,bh);
++ }
++ return bh;
++}
++
++#ifndef assert
++#define assert(test) J_ASSERT(test)
++#endif
++
++#ifndef swap
++#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
++#endif
++
++typedef struct { u32 v; } le_u32;
++typedef struct { u16 v; } le_u16;
++
++#ifdef DX_DEBUG
++#define dxtrace(command) command
++#else
++#define dxtrace(command)
++#endif
++
++struct fake_dirent
++{
++ /*le*/u32 inode;
++ /*le*/u16 rec_len;
++ u8 name_len;
++ u8 file_type;
++};
++
++struct dx_countlimit
++{
++ le_u16 limit;
++ le_u16 count;
++};
++
++struct dx_entry
++{
++ le_u32 hash;
++ le_u32 block;
++};
++
++/*
++ * dx_root_info is laid out so that if it should somehow get overlaid by a
++ * dirent the two low bits of the hash version will be zero. Therefore, the
++ * hash version mod 4 should never be 0. Sincerely, the paranoia department.
++ */
++
++struct dx_root
++{
++ struct fake_dirent dot;
++ char dot_name[4];
++ struct fake_dirent dotdot;
++ char dotdot_name[4];
++ struct dx_root_info
++ {
++ le_u32 reserved_zero;
++ u8 hash_version;
++ u8 info_length; /* 8 */
++ u8 indirect_levels;
++ u8 unused_flags;
++ }
++ info;
++ struct dx_entry entries[0];
++};
++
++struct dx_node
++{
++ struct fake_dirent fake;
++ struct dx_entry entries[0];
++};
++
++
++struct dx_frame
++{
++ struct buffer_head *bh;
++ struct dx_entry *entries;
++ struct dx_entry *at;
++};
++
++struct dx_map_entry
++{
++ u32 hash;
++ u32 offs;
++};
++
++#ifdef CONFIG_EXT3_INDEX
++static inline unsigned dx_get_block (struct dx_entry *entry);
++static void dx_set_block (struct dx_entry *entry, unsigned value);
++static inline unsigned dx_get_hash (struct dx_entry *entry);
++static void dx_set_hash (struct dx_entry *entry, unsigned value);
++static unsigned dx_get_count (struct dx_entry *entries);
++static unsigned dx_get_limit (struct dx_entry *entries);
++static void dx_set_count (struct dx_entry *entries, unsigned value);
++static void dx_set_limit (struct dx_entry *entries, unsigned value);
++static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
++static unsigned dx_node_limit (struct inode *dir);
++static struct dx_frame *dx_probe(struct dentry *dentry,
++ struct inode *dir,
++ struct dx_hash_info *hinfo,
++ struct dx_frame *frame,
++ int *err);
++static void dx_release (struct dx_frame *frames);
++static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
++ struct dx_hash_info *hinfo, struct dx_map_entry map[]);
++static void dx_sort_map(struct dx_map_entry *map, unsigned count);
++static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
++ struct dx_map_entry *offsets, int count);
++static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
++static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
++static int ext3_htree_next_block(struct inode *dir, __u32 hash,
++ struct dx_frame *frame,
++ struct dx_frame *frames, int *err,
++ __u32 *start_hash);
++static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
++ struct ext3_dir_entry_2 **res_dir, int *err);
++static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
++ struct inode *inode);
++
++/*
++ * Future: use high four bits of block for coalesce-on-delete flags
++ * Mask them off for now.
++ */
++
++static inline unsigned dx_get_block (struct dx_entry *entry)
++{
++ return le32_to_cpu(entry->block.v) & 0x00ffffff;
++}
++
++static inline void dx_set_block (struct dx_entry *entry, unsigned value)
++{
++ entry->block.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_hash (struct dx_entry *entry)
++{
++ return le32_to_cpu(entry->hash.v);
++}
++
++static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
++{
++ entry->hash.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_count (struct dx_entry *entries)
++{
++ return le16_to_cpu(((struct dx_countlimit *) entries)->count.v);
++}
++
++static inline unsigned dx_get_limit (struct dx_entry *entries)
++{
++ return le16_to_cpu(((struct dx_countlimit *) entries)->limit.v);
++}
++
++static inline void dx_set_count (struct dx_entry *entries, unsigned value)
++{
++ ((struct dx_countlimit *) entries)->count.v = cpu_to_le16(value);
++}
++
++static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
++{
++ ((struct dx_countlimit *) entries)->limit.v = cpu_to_le16(value);
++}
++
++static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
++{
++ unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
++ EXT3_DIR_REC_LEN(2) - infosize;
++ return 0? 20: entry_space / sizeof(struct dx_entry);
++}
++
++static inline unsigned dx_node_limit (struct inode *dir)
++{
++ unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
++ return 0? 22: entry_space / sizeof(struct dx_entry);
++}
++
++/*
++ * Debug
++ */
++#ifdef DX_DEBUG
++struct stats
++{
++ unsigned names;
++ unsigned space;
++ unsigned bcount;
++};
++
++static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
++ int size, int show_names)
++{
++ unsigned names = 0, space = 0;
++ char *base = (char *) de;
++ struct dx_hash_info h = *hinfo;
++
++ printk("names: ");
++ while ((char *) de < base + size)
++ {
++ if (de->inode)
++ {
++ if (show_names)
++ {
++ int len = de->name_len;
++ char *name = de->name;
++ while (len--) printk("%c", *name++);
++ ext3fs_dirhash(de->name, de->name_len, &h);
++ printk(":%x.%u ", h.hash,
++ ((char *) de - base));
++ }
++ space += EXT3_DIR_REC_LEN(de->name_len);
++ names++;
++ }
++ de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
++ }
++ printk("(%i)\n", names);
++ return (struct stats) { names, space, 1 };
++}
++
++struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
++ struct dx_entry *entries, int levels)
++{
++ unsigned blocksize = dir->i_sb->s_blocksize;
++ unsigned count = dx_get_count (entries), names = 0, space = 0, i;
++ unsigned bcount = 0;
++ struct buffer_head *bh;
++ int err;
++ printk("%i indexed blocks...\n", count);
++ for (i = 0; i < count; i++, entries++)
++ {
++ u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
++ u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
++ struct stats stats;
++ printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
++ if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
++ stats = levels?
++ dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
++ dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
++ names += stats.names;
++ space += stats.space;
++ bcount += stats.bcount;
++ brelse (bh);
++ }
++ if (bcount)
++ printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ",
++ names, space/bcount,(space/bcount)*100/blocksize);
++ return (struct stats) { names, space, bcount};
++}
++#endif /* DX_DEBUG */
++
++/*
++ * Probe for a directory leaf block to search.
++ *
++ * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
++ * error in the directory index, and the caller should fall back to
++ * searching the directory normally. The callers of dx_probe **MUST**
++ * check for this error code, and make sure it never gets reflected
++ * back to userspace.
++ */
++static struct dx_frame *
++dx_probe(struct dentry *dentry, struct inode *dir,
++ struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
++{
++ unsigned count, indirect;
++ struct dx_entry *at, *entries, *p, *q, *m;
++ struct dx_root *root;
++ struct buffer_head *bh;
++ struct dx_frame *frame = frame_in;
++ u32 hash;
++
++ frame->bh = NULL;
++ if (dentry)
++ dir = dentry->d_parent->d_inode;
++ if (!(bh = ext3_bread (NULL,dir, 0, 0, err)))
++ goto fail;
++ root = (struct dx_root *) bh->b_data;
++ if (root->info.hash_version != DX_HASH_TEA &&
++ root->info.hash_version != DX_HASH_HALF_MD4 &&
++ root->info.hash_version != DX_HASH_LEGACY) {
++ ext3_warning(dir->i_sb, __FUNCTION__,
++ "Unrecognised inode hash code %d",
++ root->info.hash_version);
++ brelse(bh);
++ *err = ERR_BAD_DX_DIR;
++ goto fail;
++ }
++ hinfo->hash_version = root->info.hash_version;
++ hinfo->seed = dir->i_sb->u.ext3_sb.s_hash_seed;
++ if (dentry)
++ ext3fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo);
++ hash = hinfo->hash;
++
++ if (root->info.unused_flags & 1) {
++ ext3_warning(dir->i_sb, __FUNCTION__,
++ "Unimplemented inode hash flags: %#06x",
++ root->info.unused_flags);
++ brelse(bh);
++ *err = ERR_BAD_DX_DIR;
++ goto fail;
++ }
++
++ if ((indirect = root->info.indirect_levels) > 1) {
++ ext3_warning(dir->i_sb, __FUNCTION__,
++ "Unimplemented inode hash depth: %#06x",
++ root->info.indirect_levels);
++ brelse(bh);
++ *err = ERR_BAD_DX_DIR;
++ goto fail;
++ }
++
++ entries = (struct dx_entry *) (((char *)&root->info) +
++ root->info.info_length);
++ assert(dx_get_limit(entries) == dx_root_limit(dir,
++ root->info.info_length));
++ dxtrace (printk("Look up %x", hash));
++ while (1)
++ {
++ count = dx_get_count(entries);
++ assert (count && count <= dx_get_limit(entries));
++ p = entries + 1;
++ q = entries + count - 1;
++ while (p <= q)
++ {
++ m = p + (q - p)/2;
++ dxtrace(printk("."));
++ if (dx_get_hash(m) > hash)
++ q = m - 1;
++ else
++ p = m + 1;
++ }
++
++ if (0) // linear search cross check
++ {
++ unsigned n = count - 1;
++ at = entries;
++ while (n--)
++ {
++ dxtrace(printk(","));
++ if (dx_get_hash(++at) > hash)
++ {
++ at--;
++ break;
++ }
++ }
++ assert (at == p - 1);
++ }
++
++ at = p - 1;
++ dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
++ frame->bh = bh;
++ frame->entries = entries;
++ frame->at = at;
++ if (!indirect--) return frame;
++ if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
++ goto fail2;
++ at = entries = ((struct dx_node *) bh->b_data)->entries;
++ assert (dx_get_limit(entries) == dx_node_limit (dir));
++ frame++;
++ }
++fail2:
++ while (frame >= frame_in) {
++ brelse(frame->bh);
++ frame--;
++ }
++fail:
++ return NULL;
++}
++
++static void dx_release (struct dx_frame *frames)
++{
++ if (frames[0].bh == NULL)
++ return;
++
++ if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
++ brelse(frames[1].bh);
++ brelse(frames[0].bh);
++}
++
++/*
++ * This function increments the frame pointer to search the next leaf
++ * block, and reads in the necessary intervening nodes if the search
++ * should be necessary. Whether or not the search is necessary is
++ * controlled by the hash parameter. If the hash value is even, then
++ * the search is only continued if the next block starts with that
++ * hash value. This is used if we are searching for a specific file.
++ *
++ * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
++ *
++ * This function returns 1 if the caller should continue to search,
++ * or 0 if it should not. If there is an error reading one of the
++ * index blocks, it will return -1.
++ *
++ * If start_hash is non-null, it will be filled in with the starting
++ * hash of the next page.
++ */
++static int ext3_htree_next_block(struct inode *dir, __u32 hash,
++ struct dx_frame *frame,
++ struct dx_frame *frames, int *err,
++ __u32 *start_hash)
++{
++ struct dx_frame *p;
++ struct buffer_head *bh;
++ int num_frames = 0;
++ __u32 bhash;
++
++ *err = ENOENT;
++ p = frame;
++ /*
++ * Find the next leaf page by incrementing the frame pointer.
++ * If we run out of entries in the interior node, loop around and
++ * increment pointer in the parent node. When we break out of
++ * this loop, num_frames indicates the number of interior
++ * nodes need to be read.
++ */
++ while (1) {
++ if (++(p->at) < p->entries + dx_get_count(p->entries))
++ break;
++ if (p == frames)
++ return 0;
++ num_frames++;
++ p--;
++ }
++
++ /*
++ * If the hash is 1, then continue only if the next page has a
++ * continuation hash of any value. This is used for readdir
++ * handling. Otherwise, check to see if the hash matches the
++ * desired contiuation hash. If it doesn't, return since
++ * there's no point to read in the successive index pages.
++ */
++ bhash = dx_get_hash(p->at);
++ if (start_hash)
++ *start_hash = bhash;
++ if ((hash & 1) == 0) {
++ if ((bhash & ~1) != hash)
++ return 0;
++ }
++ /*
++ * If the hash is HASH_NB_ALWAYS, we always go to the next
++ * block so no check is necessary
++ */
++ while (num_frames--) {
++ if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at),
++ 0, err)))
++ return -1; /* Failure */
++ p++;
++ brelse (p->bh);
++ p->bh = bh;
++ p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
++ }
++ return 1;
++}
++
++
++/*
++ * p is at least 6 bytes before the end of page
++ */
++static inline struct ext3_dir_entry_2 *ext3_next_entry(struct ext3_dir_entry_2 *p)
++{
++ return (struct ext3_dir_entry_2 *)((char*)p + le16_to_cpu(p->rec_len));
++}
++
++/*
++ * This function fills a red-black tree with information from a
++ * directory. We start scanning the directory in hash order, starting
++ * at start_hash and start_minor_hash.
++ *
++ * This function returns the number of entries inserted into the tree,
++ * or a negative error code.
++ */
++int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
++ __u32 start_minor_hash, __u32 *next_hash)
++{
++ struct dx_hash_info hinfo;
++ struct buffer_head *bh;
++ struct ext3_dir_entry_2 *de, *top;
++ static struct dx_frame frames[2], *frame;
++ struct inode *dir;
++ int block, err;
++ int count = 0;
++ int ret;
++ __u32 hashval;
++
++ dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
++ start_minor_hash));
++ dir = dir_file->f_dentry->d_inode;
++ hinfo.hash = start_hash;
++ hinfo.minor_hash = 0;
++ frame = dx_probe(0, dir_file->f_dentry->d_inode, &hinfo, frames, &err);
++ if (!frame)
++ return err;
++
++ /* Add '.' and '..' from the htree header */
++ if (!start_hash && !start_minor_hash) {
++ de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
++ if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
++ goto errout;
++ de = ext3_next_entry(de);
++ if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
++ goto errout;
++ count += 2;
++ }
++
++ while (1) {
++ block = dx_get_block(frame->at);
++ dxtrace(printk("Reading block %d\n", block));
++ if (!(bh = ext3_bread (NULL, dir, block, 0, &err)))
++ goto errout;
++
++ de = (struct ext3_dir_entry_2 *) bh->b_data;
++ top = (struct ext3_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize -
++ EXT3_DIR_REC_LEN(0));
++ for (; de < top; de = ext3_next_entry(de)) {
++ ext3fs_dirhash(de->name, de->name_len, &hinfo);
++ if ((hinfo.hash < start_hash) ||
++ ((hinfo.hash == start_hash) &&
++ (hinfo.minor_hash < start_minor_hash)))
++ continue;
++ if ((err = ext3_htree_store_dirent(dir_file,
++ hinfo.hash, hinfo.minor_hash, de)) != 0)
++ goto errout;
++ count++;
++ }
++ brelse (bh);
++ hashval = ~1;
++ ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
++ frame, frames, &err, &hashval);
++ if (next_hash)
++ *next_hash = hashval;
++ if (ret == -1)
++ goto errout;
++ /*
++ * Stop if: (a) there are no more entries, or
++ * (b) we have inserted at least one entry and the
++ * next hash value is not a continuation
++ */
++ if ((ret == 0) ||
++ (count && ((hashval & 1) == 0)))
++ break;
++ }
++ dx_release(frames);
++ dxtrace(printk("Fill tree: returned %d entries\n", count));
++ return count;
++errout:
++ dx_release(frames);
++ return (err);
++}
++
++
++/*
++ * Directory block splitting, compacting
++ */
++
++static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
++ struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
++{
++ int count = 0;
++ char *base = (char *) de;
++ struct dx_hash_info h = *hinfo;
++
++ while ((char *) de < base + size)
++ {
++ if (de->name_len && de->inode) {
++ ext3fs_dirhash(de->name, de->name_len, &h);
++ map_tail--;
++ map_tail->hash = h.hash;
++ map_tail->offs = (u32) ((char *) de - base);
++ count++;
++ }
++ /* XXX: do we need to check rec_len == 0 case? -Chris */
++ de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
++ }
++ return count;
++}
++
++static void dx_sort_map (struct dx_map_entry *map, unsigned count)
++{
++ struct dx_map_entry *p, *q, *top = map + count - 1;
++ int more;
++ /* Combsort until bubble sort doesn't suck */
++ while (count > 2)
++ {
++ count = count*10/13;
++ if (count - 9 < 2) /* 9, 10 -> 11 */
++ count = 11;
++ for (p = top, q = p - count; q >= map; p--, q--)
++ if (p->hash < q->hash)
++ swap(*p, *q);
++ }
++ /* Garden variety bubble sort */
++ do {
++ more = 0;
++ q = top;
++ while (q-- > map)
++ {
++ if (q[1].hash >= q[0].hash)
++ continue;
++ swap(*(q+1), *q);
++ more = 1;
++ }
++ } while(more);
++}
++
++static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
++{
++ struct dx_entry *entries = frame->entries;
++ struct dx_entry *old = frame->at, *new = old + 1;
++ int count = dx_get_count(entries);
++
++ assert(count < dx_get_limit(entries));
++ assert(old < entries + count);
++ memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
++ dx_set_hash(new, hash);
++ dx_set_block(new, block);
++ dx_set_count(entries, count + 1);
++}
++#endif
++
++
++static void ext3_update_dx_flag(struct inode *inode)
++{
++ if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT3_FEATURE_COMPAT_DIR_INDEX))
++ EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
++}
++
+ /*
+ * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
+ *
+@@ -94,6 +736,7 @@
+ return 0;
+ }
+
++
+ /*
+ * ext3_find_entry()
+ *
+@@ -105,6 +748,8 @@
+ * The returned buffer_head has ->b_count elevated. The caller is expected
+ * to brelse() it when appropriate.
+ */
++
++
+ static struct buffer_head * ext3_find_entry (struct dentry *dentry,
+ struct ext3_dir_entry_2 ** res_dir)
+ {
+@@ -119,12 +764,32 @@
+ int num = 0;
+ int nblocks, i, err;
+ struct inode *dir = dentry->d_parent->d_inode;
++ int namelen;
++ const u8 *name;
++ unsigned blocksize;
+
+ *res_dir = NULL;
+ sb = dir->i_sb;
+-
++ blocksize = sb->s_blocksize;
++ namelen = dentry->d_name.len;
++ name = dentry->d_name.name;
++ if (namelen > EXT3_NAME_LEN)
++ return NULL;
++#ifdef CONFIG_EXT3_INDEX
++ if (is_dx(dir)) {
++ bh = ext3_dx_find_entry(dentry, res_dir, &err);
++ /*
++ * On success, or if the error was file not found,
++ * return. Otherwise, fall back to doing a search the
++ * old fashioned way.
++ */
++ if (bh || (err != ERR_BAD_DX_DIR))
++ return bh;
++ dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
++ }
++#endif
+ nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
+- start = dir->u.ext3_i.i_dir_start_lookup;
++ start = EXT3_I(dir)->i_dir_start_lookup;
+ if (start >= nblocks)
+ start = 0;
+ block = start;
+@@ -165,7 +830,7 @@
+ i = search_dirblock(bh, dir, dentry,
+ block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
+ if (i == 1) {
+- dir->u.ext3_i.i_dir_start_lookup = block;
++ EXT3_I(dir)->i_dir_start_lookup = block;
+ ret = bh;
+ goto cleanup_and_exit;
+ } else {
+@@ -196,6 +861,66 @@
+ return ret;
+ }
+
++#ifdef CONFIG_EXT3_INDEX
++static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
++ struct ext3_dir_entry_2 **res_dir, int *err)
++{
++ struct super_block * sb;
++ struct dx_hash_info hinfo;
++ u32 hash;
++ struct dx_frame frames[2], *frame;
++ struct ext3_dir_entry_2 *de, *top;
++ struct buffer_head *bh;
++ unsigned long block;
++ int retval;
++ int namelen = dentry->d_name.len;
++ const u8 *name = dentry->d_name.name;
++ struct inode *dir = dentry->d_parent->d_inode;
++
++ sb = dir->i_sb;
++ if (!(frame = dx_probe (dentry, 0, &hinfo, frames, err)))
++ return NULL;
++ hash = hinfo.hash;
++ do {
++ block = dx_get_block(frame->at);
++ if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
++ goto errout;
++ de = (struct ext3_dir_entry_2 *) bh->b_data;
++ top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
++ EXT3_DIR_REC_LEN(0));
++ for (; de < top; de = ext3_next_entry(de))
++ if (ext3_match (namelen, name, de)) {
++ if (!ext3_check_dir_entry("ext3_find_entry",
++ dir, de, bh,
++ (block<<EXT3_BLOCK_SIZE_BITS(sb))
++ +((char *)de - bh->b_data))) {
++ brelse (bh);
++ goto errout;
++ }
++ *res_dir = de;
++ dx_release (frames);
++ return bh;
++ }
++ brelse (bh);
++ /* Check to see if we should continue to search */
++ retval = ext3_htree_next_block(dir, hash, frame,
++ frames, err, 0);
++ if (retval == -1) {
++ ext3_warning(sb, __FUNCTION__,
++ "error reading index page in directory #%lu",
++ dir->i_ino);
++ goto errout;
++ }
++ } while (retval == 1);
++
++ *err = -ENOENT;
++errout:
++ dxtrace(printk("%s not found\n", name));
++ dx_release (frames);
++ return NULL;
++}
++#endif
++
+ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry)
+ {
+ struct inode * inode;
+@@ -212,8 +937,9 @@
+ brelse (bh);
+ inode = iget(dir->i_sb, ino);
+
+- if (!inode)
++ if (!inode) {
+ return ERR_PTR(-EACCES);
++ }
+ }
+ d_add(dentry, inode);
+ return NULL;
+@@ -237,6 +963,300 @@
+ de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+ }
+
++#ifdef CONFIG_EXT3_INDEX
++static struct ext3_dir_entry_2 *
++dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
++{
++ unsigned rec_len = 0;
++
++ while (count--) {
++ struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
++ rec_len = EXT3_DIR_REC_LEN(de->name_len);
++ memcpy (to, de, rec_len);
++ ((struct ext3_dir_entry_2 *) to)->rec_len = rec_len;
++ de->inode = 0;
++ map++;
++ to += rec_len;
++ }
++ return (struct ext3_dir_entry_2 *) (to - rec_len);
++}
++
++static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
++{
++ struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base;
++ unsigned rec_len = 0;
++
++ prev = to = de;
++ while ((char*)de < base + size) {
++ next = (struct ext3_dir_entry_2 *) ((char *) de +
++ le16_to_cpu(de->rec_len));
++ if (de->inode && de->name_len) {
++ rec_len = EXT3_DIR_REC_LEN(de->name_len);
++ if (de > to)
++ memmove(to, de, rec_len);
++ to->rec_len = rec_len;
++ prev = to;
++ to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
++ }
++ de = next;
++ }
++ return prev;
++}
++
++static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
++ struct buffer_head **bh,struct dx_frame *frame,
++ struct dx_hash_info *hinfo, int *error)
++{
++ unsigned blocksize = dir->i_sb->s_blocksize;
++ unsigned count, continued;
++ struct buffer_head *bh2;
++ u32 newblock;
++ u32 hash2;
++ struct dx_map_entry *map;
++ char *data1 = (*bh)->b_data, *data2;
++ unsigned split;
++ struct ext3_dir_entry_2 *de = NULL, *de2;
++ int err;
++
++ bh2 = ext3_append (handle, dir, &newblock, error);
++ if (!(bh2)) {
++ brelse(*bh);
++ *bh = NULL;
++ goto errout;
++ }
++
++ BUFFER_TRACE(*bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, *bh);
++ if (err) {
++ journal_error:
++ brelse(*bh);
++ brelse(bh2);
++ *bh = NULL;
++ ext3_std_error(dir->i_sb, err);
++ goto errout;
++ }
++ BUFFER_TRACE(frame->bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, frame->bh);
++ if (err)
++ goto journal_error;
++
++ data2 = bh2->b_data;
++
++ /* create map in the end of data2 block */
++ map = (struct dx_map_entry *) (data2 + blocksize);
++ count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
++ blocksize, hinfo, map);
++ map -= count;
++ split = count/2; // need to adjust to actual middle
++ dx_sort_map (map, count);
++ hash2 = map[split].hash;
++ continued = hash2 == map[split - 1].hash;
++ dxtrace(printk("Split block %i at %x, %i/%i\n",
++ dx_get_block(frame->at), hash2, split, count-split));
++
++ /* Fancy dance to stay within two buffers */
++ de2 = dx_move_dirents(data1, data2, map + split, count - split);
++ de = dx_pack_dirents(data1,blocksize);
++ de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
++ de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2);
++ dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
++ dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
++
++ /* Which block gets the new entry? */
++ if (hinfo->hash >= hash2)
++ {
++ swap(*bh, bh2);
++ de = de2;
++ }
++ dx_insert_block (frame, hash2 + continued, newblock);
++ err = ext3_journal_dirty_metadata (handle, bh2);
++ if (err)
++ goto journal_error;
++ err = ext3_journal_dirty_metadata (handle, frame->bh);
++ if (err)
++ goto journal_error;
++ brelse (bh2);
++ dxtrace(dx_show_index ("frame", frame->entries));
++errout:
++ return de;
++}
++#endif
++
++
++/*
++ * Add a new entry into a directory (leaf) block. If de is non-NULL,
++ * it points to a directory entry which is guaranteed to be large
++ * enough for new directory entry. If de is NULL, then
++ * add_dirent_to_buf will attempt search the directory block for
++ * space. It will return -ENOSPC if no space is available, and -EIO
++ * and -EEXIST if directory entry already exists.
++ *
++ * NOTE! bh is NOT released in the case where ENOSPC is returned. In
++ * all other cases bh is released.
++ */
++static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
++ struct inode *inode, struct ext3_dir_entry_2 *de,
++ struct buffer_head * bh)
++{
++ struct inode *dir = dentry->d_parent->d_inode;
++ const char *name = dentry->d_name.name;
++ int namelen = dentry->d_name.len;
++ unsigned long offset = 0;
++ unsigned short reclen;
++ int nlen, rlen, err;
++ char *top;
++
++ reclen = EXT3_DIR_REC_LEN(namelen);
++ if (!de) {
++ de = (struct ext3_dir_entry_2 *)bh->b_data;
++ top = bh->b_data + dir->i_sb->s_blocksize - reclen;
++ while ((char *) de <= top) {
++ if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
++ bh, offset)) {
++ brelse (bh);
++ return -EIO;
++ }
++ if (ext3_match (namelen, name, de)) {
++ brelse (bh);
++ return -EEXIST;
++ }
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ if ((de->inode? rlen - nlen: rlen) >= reclen)
++ break;
++ de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
++ offset += rlen;
++ }
++ if ((char *) de > top)
++ return -ENOSPC;
++ }
++ BUFFER_TRACE(bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, bh);
++ if (err) {
++ ext3_std_error(dir->i_sb, err);
++ brelse(bh);
++ return err;
++ }
++
++ /* By now the buffer is marked for journaling */
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ if (de->inode) {
++ struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
++ de1->rec_len = cpu_to_le16(rlen - nlen);
++ de->rec_len = cpu_to_le16(nlen);
++ de = de1;
++ }
++ de->file_type = EXT3_FT_UNKNOWN;
++ if (inode) {
++ de->inode = cpu_to_le32(inode->i_ino);
++ ext3_set_de_type(dir->i_sb, de, inode->i_mode);
++ } else
++ de->inode = 0;
++ de->name_len = namelen;
++ memcpy (de->name, name, namelen);
++ /*
++ * XXX shouldn't update any times until successful
++ * completion of syscall, but too many callers depend
++ * on this.
++ *
++ * XXX similarly, too many callers depend on
++ * ext3_new_inode() setting the times, but error
++ * recovery deletes the inode, so the worst that can
++ * happen is that the times are slightly out of date
++ * and/or different from the directory change time.
++ */
++ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
++ ext3_update_dx_flag(dir);
++ dir->i_version = ++event;
++ ext3_mark_inode_dirty(handle, dir);
++ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
++ err = ext3_journal_dirty_metadata(handle, bh);
++ if (err)
++ ext3_std_error(dir->i_sb, err);
++ brelse(bh);
++ return 0;
++}
++
++#ifdef CONFIG_EXT3_INDEX
++/*
++ * This converts a one block unindexed directory to a 3 block indexed
++ * directory, and adds the dentry to the indexed directory.
++ */
++static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
++ struct inode *inode, struct buffer_head *bh)
++{
++ struct inode *dir = dentry->d_parent->d_inode;
++ const char *name = dentry->d_name.name;
++ int namelen = dentry->d_name.len;
++ struct buffer_head *bh2;
++ struct dx_root *root;
++ struct dx_frame frames[2], *frame;
++ struct dx_entry *entries;
++ struct ext3_dir_entry_2 *de, *de2;
++ char *data1, *top;
++ unsigned len;
++ int retval;
++ unsigned blocksize;
++ struct dx_hash_info hinfo;
++ u32 block;
++
++ blocksize = dir->i_sb->s_blocksize;
++ dxtrace(printk("Creating index\n"));
++ retval = ext3_journal_get_write_access(handle, bh);
++ if (retval) {
++ ext3_std_error(dir->i_sb, retval);
++ brelse(bh);
++ return retval;
++ }
++ root = (struct dx_root *) bh->b_data;
++
++ EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
++ bh2 = ext3_append (handle, dir, &block, &retval);
++ if (!(bh2)) {
++ brelse(bh);
++ return retval;
++ }
++ data1 = bh2->b_data;
++
++ /* The 0th block becomes the root, move the dirents out */
++ de = (struct ext3_dir_entry_2 *) &root->info;
++ len = ((char *) root) + blocksize - (char *) de;
++ memcpy (data1, de, len);
++ de = (struct ext3_dir_entry_2 *) data1;
++ top = data1 + len;
++ while (((char *) de2=(char*)de+le16_to_cpu(de->rec_len)) < top)
++ de = de2;
++ de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
++ /* Initialize the root; the dot dirents already exist */
++ de = (struct ext3_dir_entry_2 *) (&root->dotdot);
++ de->rec_len = cpu_to_le16(blocksize - EXT3_DIR_REC_LEN(2));
++ memset (&root->info, 0, sizeof(root->info));
++ root->info.info_length = sizeof(root->info);
++ root->info.hash_version = dir->i_sb->u.ext3_sb.s_def_hash_version;
++ entries = root->entries;
++ dx_set_block (entries, 1);
++ dx_set_count (entries, 1);
++ dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
++
++ /* Initialize as for dx_probe */
++ hinfo.hash_version = root->info.hash_version;
++ hinfo.seed = dir->i_sb->u.ext3_sb.s_hash_seed;
++ ext3fs_dirhash(name, namelen, &hinfo);
++ frame = frames;
++ frame->entries = entries;
++ frame->at = entries;
++ frame->bh = bh;
++ bh = bh2;
++ de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
++ dx_release (frames);
++ if (!(de))
++ return retval;
++
++ return add_dirent_to_buf(handle, dentry, inode, de, bh);
++}
++#endif
++
+ /*
+ * ext3_add_entry()
+ *
+@@ -247,127 +1267,198 @@
+ * may not sleep between calling this and putting something into
+ * the entry, as someone else might have used it while you slept.
+ */
+-
+-/*
+- * AKPM: the journalling code here looks wrong on the error paths
+- */
+ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
+ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+- const char *name = dentry->d_name.name;
+- int namelen = dentry->d_name.len;
+ unsigned long offset;
+- unsigned short rec_len;
+ struct buffer_head * bh;
+- struct ext3_dir_entry_2 * de, * de1;
++ struct ext3_dir_entry_2 *de;
+ struct super_block * sb;
+ int retval;
++#ifdef CONFIG_EXT3_INDEX
++ int dx_fallback=0;
++#endif
++ unsigned blocksize;
++ unsigned nlen, rlen;
++ u32 block, blocks;
+
+ sb = dir->i_sb;
+-
+- if (!namelen)
++ blocksize = sb->s_blocksize;
++ if (!dentry->d_name.len)
+ return -EINVAL;
+- bh = ext3_bread (handle, dir, 0, 0, &retval);
++#ifdef CONFIG_EXT3_INDEX
++ if (is_dx(dir)) {
++ retval = ext3_dx_add_entry(handle, dentry, inode);
++ if (!retval || (retval != ERR_BAD_DX_DIR))
++ return retval;
++ EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
++ dx_fallback++;
++ ext3_mark_inode_dirty(handle, dir);
++ }
++#endif
++ blocks = dir->i_size >> sb->s_blocksize_bits;
++ for (block = 0, offset = 0; block < blocks; block++) {
++ bh = ext3_bread(handle, dir, block, 0, &retval);
++ if(!bh)
++ return retval;
++ retval = add_dirent_to_buf(handle, dentry, inode, 0, bh);
++ if (retval != -ENOSPC)
++ return retval;
++
++#ifdef CONFIG_EXT3_INDEX
++ if (blocks == 1 && !dx_fallback &&
++ EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
++ return make_indexed_dir(handle, dentry, inode, bh);
++#endif
++ brelse(bh);
++ }
++ bh = ext3_append(handle, dir, &block, &retval);
+ if (!bh)
+ return retval;
+- rec_len = EXT3_DIR_REC_LEN(namelen);
+- offset = 0;
+ de = (struct ext3_dir_entry_2 *) bh->b_data;
+- while (1) {
+- if ((char *)de >= sb->s_blocksize + bh->b_data) {
+- brelse (bh);
+- bh = NULL;
+- bh = ext3_bread (handle, dir,
+- offset >> EXT3_BLOCK_SIZE_BITS(sb), 1, &retval);
+- if (!bh)
+- return retval;
+- if (dir->i_size <= offset) {
+- if (dir->i_size == 0) {
+- brelse(bh);
+- return -ENOENT;
+- }
++ de->inode = 0;
++ de->rec_len = cpu_to_le16(rlen = blocksize);
++ nlen = 0;
++ return add_dirent_to_buf(handle, dentry, inode, de, bh);
++}
+
+- ext3_debug ("creating next block\n");
++#ifdef CONFIG_EXT3_INDEX
++/*
++ * Returns 0 for success, or a negative error value
++ */
++static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
++ struct inode *inode)
++{
++ struct dx_frame frames[2], *frame;
++ struct dx_entry *entries, *at;
++ struct dx_hash_info hinfo;
++ struct buffer_head * bh;
++ struct inode *dir = dentry->d_parent->d_inode;
++ struct super_block * sb = dir->i_sb;
++ struct ext3_dir_entry_2 *de;
++ int err;
+
+- BUFFER_TRACE(bh, "get_write_access");
+- ext3_journal_get_write_access(handle, bh);
+- de = (struct ext3_dir_entry_2 *) bh->b_data;
+- de->inode = 0;
+- de->rec_len = le16_to_cpu(sb->s_blocksize);
+- dir->u.ext3_i.i_disksize =
+- dir->i_size = offset + sb->s_blocksize;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- ext3_mark_inode_dirty(handle, dir);
+- } else {
++ frame = dx_probe(dentry, 0, &hinfo, frames, &err);
++ if (!frame)
++ return err;
++ entries = frame->entries;
++ at = frame->at;
+
+- ext3_debug ("skipping to next block\n");
++ if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
++ goto cleanup;
+
+- de = (struct ext3_dir_entry_2 *) bh->b_data;
+- }
+- }
+- if (!ext3_check_dir_entry ("ext3_add_entry", dir, de, bh,
+- offset)) {
+- brelse (bh);
+- return -ENOENT;
+- }
+- if (ext3_match (namelen, name, de)) {
+- brelse (bh);
+- return -EEXIST;
++ BUFFER_TRACE(bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, bh);
++ if (err)
++ goto journal_error;
++
++ err = add_dirent_to_buf(handle, dentry, inode, 0, bh);
++ if (err != -ENOSPC) {
++ bh = 0;
++ goto cleanup;
++ }
++
++ /* Block full, should compress but for now just split */
++ dxtrace(printk("using %u of %u node entries\n",
++ dx_get_count(entries), dx_get_limit(entries)));
++ /* Need to split index? */
++ if (dx_get_count(entries) == dx_get_limit(entries)) {
++ u32 newblock;
++ unsigned icount = dx_get_count(entries);
++ int levels = frame - frames;
++ struct dx_entry *entries2;
++ struct dx_node *node2;
++ struct buffer_head *bh2;
++
++ if (levels && (dx_get_count(frames->entries) ==
++ dx_get_limit(frames->entries))) {
++ ext3_warning(sb, __FUNCTION__,
++ "Directory index full!\n");
++ err = -ENOSPC;
++ goto cleanup;
+ }
+- if ((le32_to_cpu(de->inode) == 0 &&
+- le16_to_cpu(de->rec_len) >= rec_len) ||
+- (le16_to_cpu(de->rec_len) >=
+- EXT3_DIR_REC_LEN(de->name_len) + rec_len)) {
+- BUFFER_TRACE(bh, "get_write_access");
+- ext3_journal_get_write_access(handle, bh);
+- /* By now the buffer is marked for journaling */
+- offset += le16_to_cpu(de->rec_len);
+- if (le32_to_cpu(de->inode)) {
+- de1 = (struct ext3_dir_entry_2 *) ((char *) de +
+- EXT3_DIR_REC_LEN(de->name_len));
+- de1->rec_len =
+- cpu_to_le16(le16_to_cpu(de->rec_len) -
+- EXT3_DIR_REC_LEN(de->name_len));
+- de->rec_len = cpu_to_le16(
+- EXT3_DIR_REC_LEN(de->name_len));
+- de = de1;
++ bh2 = ext3_append (handle, dir, &newblock, &err);
++ if (!(bh2))
++ goto cleanup;
++ node2 = (struct dx_node *)(bh2->b_data);
++ entries2 = node2->entries;
++ node2->fake.rec_len = cpu_to_le16(sb->s_blocksize);
++ node2->fake.inode = 0;
++ BUFFER_TRACE(frame->bh, "get_write_access");
++ err = ext3_journal_get_write_access(handle, frame->bh);
++ if (err)
++ goto journal_error;
++ if (levels) {
++ unsigned icount1 = icount/2, icount2 = icount - icount1;
++ unsigned hash2 = dx_get_hash(entries + icount1);
++ dxtrace(printk("Split index %i/%i\n", icount1, icount2));
++
++ BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
++ err = ext3_journal_get_write_access(handle,
++ frames[0].bh);
++ if (err)
++ goto journal_error;
++
++ memcpy ((char *) entries2, (char *) (entries + icount1),
++ icount2 * sizeof(struct dx_entry));
++ dx_set_count (entries, icount1);
++ dx_set_count (entries2, icount2);
++ dx_set_limit (entries2, dx_node_limit(dir));
++
++ /* Which index block gets the new entry? */
++ if (at - entries >= icount1) {
++ frame->at = at = at - entries - icount1 + entries2;
++ frame->entries = entries = entries2;
++ swap(frame->bh, bh2);
+ }
+- de->file_type = EXT3_FT_UNKNOWN;
+- if (inode) {
+- de->inode = cpu_to_le32(inode->i_ino);
+- ext3_set_de_type(dir->i_sb, de, inode->i_mode);
+- } else
+- de->inode = 0;
+- de->name_len = namelen;
+- memcpy (de->name, name, namelen);
+- /*
+- * XXX shouldn't update any times until successful
+- * completion of syscall, but too many callers depend
+- * on this.
+- *
+- * XXX similarly, too many callers depend on
+- * ext3_new_inode() setting the times, but error
+- * recovery deletes the inode, so the worst that can
+- * happen is that the times are slightly out of date
+- * and/or different from the directory change time.
+- */
+- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- dir->i_version = ++event;
+- ext3_mark_inode_dirty(handle, dir);
+- BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+- ext3_journal_dirty_metadata(handle, bh);
+- brelse(bh);
+- return 0;
++ dx_insert_block (frames + 0, hash2, newblock);
++ dxtrace(dx_show_index ("node", frames[1].entries));
++ dxtrace(dx_show_index ("node",
++ ((struct dx_node *) bh2->b_data)->entries));
++ err = ext3_journal_dirty_metadata(handle, bh2);
++ if (err)
++ goto journal_error;
++ brelse (bh2);
++ } else {
++ dxtrace(printk("Creating second level index...\n"));
++ memcpy((char *) entries2, (char *) entries,
++ icount * sizeof(struct dx_entry));
++ dx_set_limit(entries2, dx_node_limit(dir));
++
++ /* Set up root */
++ dx_set_count(entries, 1);
++ dx_set_block(entries + 0, newblock);
++ ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
++
++ /* Add new access path frame */
++ frame = frames + 1;
++ frame->at = at = at - entries + entries2;
++ frame->entries = entries = entries2;
++ frame->bh = bh2;
++ err = ext3_journal_get_write_access(handle,
++ frame->bh);
++ if (err)
++ goto journal_error;
+ }
+- offset += le16_to_cpu(de->rec_len);
+- de = (struct ext3_dir_entry_2 *)
+- ((char *) de + le16_to_cpu(de->rec_len));
++ ext3_journal_dirty_metadata(handle, frames[0].bh);
+ }
+- brelse (bh);
+- return -ENOSPC;
++ de = do_split(handle, dir, &bh, frame, &hinfo, &err);
++ if (!de)
++ goto cleanup;
++ err = add_dirent_to_buf(handle, dentry, inode, de, bh);
++ bh = 0;
++ goto cleanup;
++
++journal_error:
++ ext3_std_error(dir->i_sb, err);
++cleanup:
++ if (bh)
++ brelse(bh);
++ dx_release(frames);
++ return err;
+ }
++#endif
+
+ /*
+ * ext3_delete_entry deletes a directory entry by merging it with the
+@@ -451,9 +1542,11 @@
+ struct inode * inode;
+ int err;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -478,9 +1571,11 @@
+ struct inode *inode;
+ int err;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -507,9 +1602,11 @@
+ if (dir->i_nlink >= EXT3_LINK_MAX)
+ return -EMLINK;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -521,7 +1618,7 @@
+
+ inode->i_op = &ext3_dir_inode_operations;
+ inode->i_fop = &ext3_dir_operations;
+- inode->i_size = inode->u.ext3_i.i_disksize = inode->i_sb->s_blocksize;
++ inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+ inode->i_blocks = 0;
+ dir_block = ext3_bread (handle, inode, 0, 1, &err);
+ if (!dir_block) {
+@@ -554,21 +1651,19 @@
+ inode->i_mode |= S_ISGID;
+ ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_entry (handle, dentry, inode);
+- if (err)
+- goto out_no_entry;
++ if (err) {
++ inode->i_nlink = 0;
++ ext3_mark_inode_dirty(handle, inode);
++ iput (inode);
++ goto out_stop;
++ }
+ dir->i_nlink++;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+ d_instantiate(dentry, inode);
+ out_stop:
+ ext3_journal_stop(handle, dir);
+ return err;
+-
+-out_no_entry:
+- inode->i_nlink = 0;
+- ext3_mark_inode_dirty(handle, inode);
+- iput (inode);
+- goto out_stop;
+ }
+
+ /*
+@@ -655,7 +1750,7 @@
+ int err = 0, rc;
+
+ lock_super(sb);
+- if (!list_empty(&inode->u.ext3_i.i_orphan))
++ if (!list_empty(&EXT3_I(inode)->i_orphan))
+ goto out_unlock;
+
+ /* Orphan handling is only valid for files with data blocks
+@@ -696,7 +1791,7 @@
+ * This is safe: on error we're going to ignore the orphan list
+ * anyway on the next recovery. */
+ if (!err)
+- list_add(&inode->u.ext3_i.i_orphan, &EXT3_SB(sb)->s_orphan);
++ list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
+
+ jbd_debug(4, "superblock will point to %ld\n", inode->i_ino);
+ jbd_debug(4, "orphan inode %ld will point to %d\n",
+@@ -714,25 +1809,26 @@
+ int ext3_orphan_del(handle_t *handle, struct inode *inode)
+ {
+ struct list_head *prev;
++ struct ext3_inode_info *ei = EXT3_I(inode);
+ struct ext3_sb_info *sbi;
+ ino_t ino_next;
+ struct ext3_iloc iloc;
+ int err = 0;
+
+ lock_super(inode->i_sb);
+- if (list_empty(&inode->u.ext3_i.i_orphan)) {
++ if (list_empty(&ei->i_orphan)) {
+ unlock_super(inode->i_sb);
+ return 0;
+ }
+
+ ino_next = NEXT_ORPHAN(inode);
+- prev = inode->u.ext3_i.i_orphan.prev;
++ prev = ei->i_orphan.prev;
+ sbi = EXT3_SB(inode->i_sb);
+
+ jbd_debug(4, "remove inode %ld from orphan list\n", inode->i_ino);
+
+- list_del(&inode->u.ext3_i.i_orphan);
+- INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
++ list_del(&ei->i_orphan);
++ INIT_LIST_HEAD(&ei->i_orphan);
+
+ /* If we're on an error path, we may not have a valid
+ * transaction handle with which to update the orphan list on
+@@ -793,8 +1889,9 @@
+ handle_t *handle;
+
+ handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ retval = -ENOENT;
+ bh = ext3_find_entry (dentry, &de);
+@@ -832,7 +1929,7 @@
+ dir->i_nlink--;
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ ext3_mark_inode_dirty(handle, inode);
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+
+ end_rmdir:
+@@ -850,8 +1947,9 @@
+ handle_t *handle;
+
+ handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -878,7 +1976,7 @@
+ if (retval)
+ goto end_unlink;
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+ inode->i_nlink--;
+ if (!inode->i_nlink)
+@@ -904,9 +2002,11 @@
+ if (l > dir->i_sb->s_blocksize)
+ return -ENAMETOOLONG;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 5);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -916,7 +2016,7 @@
+ if (IS_ERR(inode))
+ goto out_stop;
+
+- if (l > sizeof (inode->u.ext3_i.i_data)) {
++ if (l > sizeof (EXT3_I(inode)->i_data)) {
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+ /*
+@@ -925,25 +2025,23 @@
+ * i_size in generic_commit_write().
+ */
+ err = block_symlink(inode, symname, l);
+- if (err)
+- goto out_no_entry;
++ if (err) {
++ ext3_dec_count(handle, inode);
++ ext3_mark_inode_dirty(handle, inode);
++ iput (inode);
++ goto out_stop;
++ }
+ } else {
+ inode->i_op = &ext3_fast_symlink_inode_operations;
+- memcpy((char*)&inode->u.ext3_i.i_data,symname,l);
++ memcpy((char*)&EXT3_I(inode)->i_data,symname,l);
+ inode->i_size = l-1;
+ }
+- inode->u.ext3_i.i_disksize = inode->i_size;
++ EXT3_I(inode)->i_disksize = inode->i_size;
+ err = ext3_add_nondir(handle, dentry, inode);
+ ext3_mark_inode_dirty(handle, inode);
+ out_stop:
+ ext3_journal_stop(handle, dir);
+ return err;
+-
+-out_no_entry:
+- ext3_dec_count(handle, inode);
+- ext3_mark_inode_dirty(handle, inode);
+- iput (inode);
+- goto out_stop;
+ }
+
+ static int ext3_link (struct dentry * old_dentry,
+@@ -956,12 +2054,15 @@
+ if (S_ISDIR(inode->i_mode))
+ return -EPERM;
+
+- if (inode->i_nlink >= EXT3_LINK_MAX)
++ if (inode->i_nlink >= EXT3_LINK_MAX) {
+ return -EMLINK;
++ }
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+@@ -995,9 +2096,11 @@
+
+ old_bh = new_bh = dir_bh = NULL;
+
+- handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS + 2);
+- if (IS_ERR(handle))
++ handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
++ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
++ }
+
+ if (IS_SYNC(old_dir) || IS_SYNC(new_dir))
+ handle->h_sync = 1;
+@@ -1070,14 +2173,33 @@
+ /*
+ * ok, that's it
+ */
+- ext3_delete_entry(handle, old_dir, old_de, old_bh);
++ retval = ext3_delete_entry(handle, old_dir, old_de, old_bh);
++ if (retval == -ENOENT) {
++ /*
++ * old_de could have moved out from under us.
++ */
++ struct buffer_head *old_bh2;
++ struct ext3_dir_entry_2 *old_de2;
++
++ old_bh2 = ext3_find_entry(old_dentry, &old_de2);
++ if (old_bh2) {
++ retval = ext3_delete_entry(handle, old_dir,
++ old_de2, old_bh2);
++ brelse(old_bh2);
++ }
++ }
++ if (retval) {
++ ext3_warning(old_dir->i_sb, "ext3_rename",
++ "Deleting old file (%lu), %d, error=%d",
++ old_dir->i_ino, old_dir->i_nlink, retval);
++ }
+
+ if (new_inode) {
+ new_inode->i_nlink--;
+ new_inode->i_ctime = CURRENT_TIME;
+ }
+ old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+- old_dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(old_dir);
+ if (dir_bh) {
+ BUFFER_TRACE(dir_bh, "get_write_access");
+ ext3_journal_get_write_access(handle, dir_bh);
+@@ -1089,7 +2211,7 @@
+ new_inode->i_nlink--;
+ } else {
+ new_dir->i_nlink++;
+- new_dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(new_dir);
+ ext3_mark_inode_dirty(handle, new_dir);
+ }
+ }
+diff -Nru a/fs/ext3/super.c b/fs/ext3/super.c
+--- a/fs/ext3/super.c Thu Nov 7 11:58:05 2002
++++ b/fs/ext3/super.c Thu Nov 7 11:58:05 2002
+@@ -106,32 +106,6 @@
+
+ static char error_buf[1024];
+
+-/* Determine the appropriate response to ext3_error on a given filesystem */
+-
+-static int ext3_error_behaviour(struct super_block *sb)
+-{
+- /* First check for mount-time options */
+- if (test_opt (sb, ERRORS_PANIC))
+- return EXT3_ERRORS_PANIC;
+- if (test_opt (sb, ERRORS_RO))
+- return EXT3_ERRORS_RO;
+- if (test_opt (sb, ERRORS_CONT))
+- return EXT3_ERRORS_CONTINUE;
+-
+- /* If no overrides were specified on the mount, then fall back
+- * to the default behaviour set in the filesystem's superblock
+- * on disk. */
+- switch (le16_to_cpu(sb->u.ext3_sb.s_es->s_errors)) {
+- case EXT3_ERRORS_PANIC:
+- return EXT3_ERRORS_PANIC;
+- case EXT3_ERRORS_RO:
+- return EXT3_ERRORS_RO;
+- default:
+- break;
+- }
+- return EXT3_ERRORS_CONTINUE;
+-}
+-
+ /* Deal with the reporting of failure conditions on a filesystem such as
+ * inconsistencies detected or read IO failures.
+ *
+@@ -157,18 +131,15 @@
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+- if (ext3_error_behaviour(sb) != EXT3_ERRORS_CONTINUE) {
+- EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
+- journal_abort(EXT3_SB(sb)->s_journal, -EIO);
+- }
+-
+- if (ext3_error_behaviour(sb) == EXT3_ERRORS_PANIC)
++ if (test_opt (sb, ERRORS_PANIC))
+ panic ("EXT3-fs (device %s): panic forced after error\n",
+ bdevname(sb->s_dev));
+-
+- if (ext3_error_behaviour(sb) == EXT3_ERRORS_RO) {
++ if (test_opt (sb, ERRORS_RO)) {
+ printk (KERN_CRIT "Remounting filesystem read-only\n");
+ sb->s_flags |= MS_RDONLY;
++ } else {
++ EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
++ journal_abort(EXT3_SB(sb)->s_journal, -EIO);
+ }
+
+ ext3_commit_super(sb, es, 1);
+@@ -258,7 +229,7 @@
+ vsprintf (error_buf, fmt, args);
+ va_end (args);
+
+- if (ext3_error_behaviour(sb) == EXT3_ERRORS_PANIC)
++ if (test_opt (sb, ERRORS_PANIC))
+ panic ("EXT3-fs panic (device %s): %s: %s\n",
+ bdevname(sb->s_dev), function, error_buf);
+
+@@ -416,6 +387,7 @@
+ for (i = 0; i < sbi->s_gdb_count; i++)
+ brelse(sbi->s_group_desc[i]);
+ kfree(sbi->s_group_desc);
++ kfree(sbi->s_debts);
+ for (i = 0; i < EXT3_MAX_GROUP_LOADED; i++)
+ brelse(sbi->s_inode_bitmap[i]);
+ for (i = 0; i < EXT3_MAX_GROUP_LOADED; i++)
+@@ -490,17 +462,32 @@
+ return 0;
+ }
+
++static unsigned long get_sb_block(void **data)
++{
++ unsigned long sb_block;
++ char *options = (char *) *data;
++
++ if (!options || strncmp(options, "sb=", 3) != 0)
++ return 1; /* Default location */
++ options += 3;
++ sb_block = simple_strtoul(options, &options, 0);
++ if (*options && *options != ',') {
++ printk("EXT3-fs: Invalid sb specification: %s\n",
++ (char *) *data);
++ return 1;
++ }
++ if (*options == ',')
++ options++;
++ *data = (void *) options;
++ return sb_block;
++}
++
+ /*
+ * This function has been shamelessly adapted from the msdos fs
+ */
+-static int parse_options (char * options, unsigned long * sb_block,
+- struct ext3_sb_info *sbi,
+- unsigned long * inum,
+- int is_remount)
+-{
+- unsigned long *mount_options = &sbi->s_mount_opt;
+- uid_t *resuid = &sbi->s_resuid;
+- gid_t *resgid = &sbi->s_resgid;
++static int parse_options (char * options, struct ext3_sb_info *sbi,
++ unsigned long * inum, int is_remount)
++{
+ char * this_char;
+ char * value;
+
+@@ -512,42 +499,42 @@
+ if ((value = strchr (this_char, '=')) != NULL)
+ *value++ = 0;
+ if (!strcmp (this_char, "bsddf"))
+- clear_opt (*mount_options, MINIX_DF);
++ clear_opt (sbi->s_mount_opt, MINIX_DF);
+ else if (!strcmp (this_char, "nouid32")) {
+- set_opt (*mount_options, NO_UID32);
++ set_opt (sbi->s_mount_opt, NO_UID32);
+ }
+ else if (!strcmp (this_char, "abort"))
+- set_opt (*mount_options, ABORT);
++ set_opt (sbi->s_mount_opt, ABORT);
+ else if (!strcmp (this_char, "check")) {
+ if (!value || !*value || !strcmp (value, "none"))
+- clear_opt (*mount_options, CHECK);
++ clear_opt (sbi->s_mount_opt, CHECK);
+ else
+ #ifdef CONFIG_EXT3_CHECK
+- set_opt (*mount_options, CHECK);
++ set_opt (sbi->s_mount_opt, CHECK);
+ #else
+ printk(KERN_ERR
+ "EXT3 Check option not supported\n");
+ #endif
+ }
+ else if (!strcmp (this_char, "debug"))
+- set_opt (*mount_options, DEBUG);
++ set_opt (sbi->s_mount_opt, DEBUG);
+ else if (!strcmp (this_char, "errors")) {
+ if (want_value(value, "errors"))
+ return 0;
+ if (!strcmp (value, "continue")) {
+- clear_opt (*mount_options, ERRORS_RO);
+- clear_opt (*mount_options, ERRORS_PANIC);
+- set_opt (*mount_options, ERRORS_CONT);
++ clear_opt (sbi->s_mount_opt, ERRORS_RO);
++ clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
++ set_opt (sbi->s_mount_opt, ERRORS_CONT);
+ }
+ else if (!strcmp (value, "remount-ro")) {
+- clear_opt (*mount_options, ERRORS_CONT);
+- clear_opt (*mount_options, ERRORS_PANIC);
+- set_opt (*mount_options, ERRORS_RO);
++ clear_opt (sbi->s_mount_opt, ERRORS_CONT);
++ clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
++ set_opt (sbi->s_mount_opt, ERRORS_RO);
+ }
+ else if (!strcmp (value, "panic")) {
+- clear_opt (*mount_options, ERRORS_CONT);
+- clear_opt (*mount_options, ERRORS_RO);
+- set_opt (*mount_options, ERRORS_PANIC);
++ clear_opt (sbi->s_mount_opt, ERRORS_CONT);
++ clear_opt (sbi->s_mount_opt, ERRORS_RO);
++ set_opt (sbi->s_mount_opt, ERRORS_PANIC);
+ }
+ else {
+ printk (KERN_ERR
+@@ -558,29 +545,25 @@
+ }
+ else if (!strcmp (this_char, "grpid") ||
+ !strcmp (this_char, "bsdgroups"))
+- set_opt (*mount_options, GRPID);
++ set_opt (sbi->s_mount_opt, GRPID);
+ else if (!strcmp (this_char, "minixdf"))
+- set_opt (*mount_options, MINIX_DF);
++ set_opt (sbi->s_mount_opt, MINIX_DF);
+ else if (!strcmp (this_char, "nocheck"))
+- clear_opt (*mount_options, CHECK);
++ clear_opt (sbi->s_mount_opt, CHECK);
+ else if (!strcmp (this_char, "nogrpid") ||
+ !strcmp (this_char, "sysvgroups"))
+- clear_opt (*mount_options, GRPID);
++ clear_opt (sbi->s_mount_opt, GRPID);
+ else if (!strcmp (this_char, "resgid")) {
+ unsigned long v;
+ if (want_numeric(value, "resgid", &v))
+ return 0;
+- *resgid = v;
++ sbi->s_resgid = v;
+ }
+ else if (!strcmp (this_char, "resuid")) {
+ unsigned long v;
+ if (want_numeric(value, "resuid", &v))
+ return 0;
+- *resuid = v;
+- }
+- else if (!strcmp (this_char, "sb")) {
+- if (want_numeric(value, "sb", sb_block))
+- return 0;
++ sbi->s_resuid = v;
+ }
+ #ifdef CONFIG_JBD_DEBUG
+ else if (!strcmp (this_char, "ro-after")) {
+@@ -611,12 +594,12 @@
+ if (want_value(value, "journal"))
+ return 0;
+ if (!strcmp (value, "update"))
+- set_opt (*mount_options, UPDATE_JOURNAL);
++ set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
+ else if (want_numeric(value, "journal", inum))
+ return 0;
+ }
+ else if (!strcmp (this_char, "noload"))
+- set_opt (*mount_options, NOLOAD);
++ set_opt (sbi->s_mount_opt, NOLOAD);
+ else if (!strcmp (this_char, "data")) {
+ int data_opt = 0;
+
+@@ -635,7 +618,7 @@
+ return 0;
+ }
+ if (is_remount) {
+- if ((*mount_options & EXT3_MOUNT_DATA_FLAGS) !=
++ if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS) !=
+ data_opt) {
+ printk(KERN_ERR
+ "EXT3-fs: cannot change data "
+@@ -643,8 +626,8 @@
+ return 0;
+ }
+ } else {
+- *mount_options &= ~EXT3_MOUNT_DATA_FLAGS;
+- *mount_options |= data_opt;
++ sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS;
++ sbi->s_mount_opt |= data_opt;
+ }
+ } else if (!strcmp (this_char, "commit")) {
+ unsigned long v;
+@@ -707,6 +690,7 @@
+ es->s_mtime = cpu_to_le32(CURRENT_TIME);
+ ext3_update_dynamic_rev(sb);
+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
++
+ ext3_commit_super (sb, es, 1);
+ if (test_opt (sb, DEBUG))
+ printk (KERN_INFO
+@@ -717,6 +701,7 @@
+ EXT3_BLOCKS_PER_GROUP(sb),
+ EXT3_INODES_PER_GROUP(sb),
+ sbi->s_mount_opt);
++
+ printk(KERN_INFO "EXT3 FS " EXT3FS_VERSION ", " EXT3FS_DATE " on %s, ",
+ bdevname(sb->s_dev));
+ if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
+@@ -890,17 +875,39 @@
+ return res;
+ }
+
++static unsigned long descriptor_loc(struct super_block *sb,
++ unsigned long logic_sb_block,
++ int nr)
++{
++ struct ext3_sb_info *sbi = EXT3_SB(sb);
++ unsigned long bg, first_data_block, first_meta_bg;
++ int has_super = 0;
++
++ first_data_block = le32_to_cpu(sbi->s_es->s_first_data_block);
++ first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
++
++ if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) ||
++ nr < first_meta_bg)
++ return (logic_sb_block + nr + 1);
++ bg = sbi->s_desc_per_block * nr;
++ if (ext3_bg_has_super(sb, bg))
++ has_super = 1;
++ return (first_data_block + has_super + (bg * sbi->s_blocks_per_group));
++}
++
++
+ struct super_block * ext3_read_super (struct super_block * sb, void * data,
+ int silent)
+ {
+ struct buffer_head * bh;
+ struct ext3_super_block *es = 0;
+ struct ext3_sb_info *sbi = EXT3_SB(sb);
+- unsigned long sb_block = 1;
+- unsigned long logic_sb_block = 1;
+- unsigned long offset = 0;
++ unsigned long sb_block = get_sb_block(&data);
++ unsigned long block, logic_sb_block;
++ unsigned long offset;
+ unsigned long journal_inum = 0;
+ kdev_t dev = sb->s_dev;
++ unsigned long def_mount_opts;
+ int blocksize;
+ int hblock;
+ int db_count;
+@@ -922,14 +929,6 @@
+ if (blocksize < hblock)
+ blocksize = hblock;
+
+- sbi->s_mount_opt = 0;
+- sbi->s_resuid = EXT3_DEF_RESUID;
+- sbi->s_resgid = EXT3_DEF_RESGID;
+- if (!parse_options ((char *) data, &sb_block, sbi, &journal_inum, 0)) {
+- sb->s_dev = 0;
+- goto out_fail;
+- }
+-
+ sb->s_blocksize = blocksize;
+ set_blocksize (dev, blocksize);
+
+@@ -937,10 +936,8 @@
+ * The ext3 superblock will not be buffer aligned for other than 1kB
+ * block sizes. We need to calculate the offset from buffer start.
+ */
+- if (blocksize != EXT3_MIN_BLOCK_SIZE) {
+- logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
+- offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
+- }
++ logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
++ offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
+
+ if (!(bh = sb_bread(sb, logic_sb_block))) {
+ printk (KERN_ERR "EXT3-fs: unable to read superblock\n");
+@@ -960,6 +957,34 @@
+ bdevname(dev));
+ goto failed_mount;
+ }
++
++ /* Set defaults before we parse the mount options */
++ def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
++ sbi->s_mount_opt = 0;
++ if (def_mount_opts & EXT3_DEFM_DEBUG)
++ set_opt(sbi->s_mount_opt, DEBUG);
++ if (def_mount_opts & EXT3_DEFM_BSDGROUPS)
++ set_opt(sbi->s_mount_opt, GRPID);
++ if (def_mount_opts & EXT3_DEFM_UID16)
++ set_opt(sbi->s_mount_opt, NO_UID32);
++ if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA)
++ sbi->s_mount_opt |= EXT3_MOUNT_JOURNAL_DATA;
++ else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED)
++ sbi->s_mount_opt |= EXT3_MOUNT_ORDERED_DATA;
++ else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK)
++ sbi->s_mount_opt |= EXT3_MOUNT_WRITEBACK_DATA;
++
++ if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
++ set_opt(sbi->s_mount_opt, ERRORS_PANIC);
++ else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_RO)
++ set_opt(sbi->s_mount_opt, ERRORS_RO);
++
++ sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
++ sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
++
++ if (!parse_options ((char *) data, sbi, &journal_inum, 0))
++ goto failed_mount;
++
+ if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
+ (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
+ EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
+@@ -1036,7 +1061,9 @@
+ } else {
+ sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
+ sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+- if (sbi->s_inode_size != EXT3_GOOD_OLD_INODE_SIZE) {
++ if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) ||
++ (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
++ (sbi->s_inode_size > blocksize)) {
+ printk (KERN_ERR
+ "EXT3-fs: unsupported inode size: %d\n",
+ sbi->s_inode_size);
+@@ -1059,13 +1086,12 @@
+ sbi->s_itb_per_group = sbi->s_inodes_per_group /sbi->s_inodes_per_block;
+ sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc);
+ sbi->s_sbh = bh;
+- if (sbi->s_resuid == EXT3_DEF_RESUID)
+- sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
+- if (sbi->s_resgid == EXT3_DEF_RESGID)
+- sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
+ sbi->s_mount_state = le16_to_cpu(es->s_state);
+ sbi->s_addr_per_block_bits = log2(EXT3_ADDR_PER_BLOCK(sb));
+ sbi->s_desc_per_block_bits = log2(EXT3_DESC_PER_BLOCK(sb));
++ for (i=0; i < 4; i++)
++ sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
++ sbi->s_def_hash_version = es->s_def_hash_version;
+
+ if (sbi->s_blocks_per_group > blocksize * 8) {
+ printk (KERN_ERR
+@@ -1098,8 +1124,16 @@
+ printk (KERN_ERR "EXT3-fs: not enough memory\n");
+ goto failed_mount;
+ }
++ sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
++ GFP_KERNEL);
++ if (!sbi->s_debts) {
++ printk ("EXT3-fs: not enough memory\n");
++ goto failed_mount2;
++ }
++ memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
+ for (i = 0; i < db_count; i++) {
+- sbi->s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
++ block = descriptor_loc(sb, logic_sb_block, i);
++ sbi->s_group_desc[i] = sb_bread(sb, block);
+ if (!sbi->s_group_desc[i]) {
+ printk (KERN_ERR "EXT3-fs: "
+ "can't read group descriptor %d\n", i);
+@@ -1120,6 +1154,7 @@
+ sbi->s_loaded_inode_bitmaps = 0;
+ sbi->s_loaded_block_bitmaps = 0;
+ sbi->s_gdb_count = db_count;
++ sbi->s_dir_count = ext3_count_dirs(sb);
+ get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+ /*
+ * set up enough so that it can read an inode
+@@ -1223,6 +1258,8 @@
+ failed_mount3:
+ journal_destroy(sbi->s_journal);
+ failed_mount2:
++ if (sbi->s_debts)
++ kfree(sbi->s_debts);
+ for (i = 0; i < db_count; i++)
+ brelse(sbi->s_group_desc[i]);
+ kfree(sbi->s_group_desc);
+@@ -1651,7 +1688,7 @@
+ /*
+ * Allow the "check" option to be passed as a remount option.
+ */
+- if (!parse_options(data, &tmp, sbi, &tmp, 1))
++ if (!parse_options(data, sbi, &tmp, 1))
+ return -EINVAL;
+
+ if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
+diff -Nru a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
+--- a/include/linux/ext2_fs.h Thu Nov 7 11:58:05 2002
++++ b/include/linux/ext2_fs.h Thu Nov 7 11:58:05 2002
+@@ -198,10 +198,11 @@
+ #define EXT2_ECOMPR_FL 0x00000800 /* Compression error */
+ /* End compression flags --- maybe not all used */
+ #define EXT2_BTREE_FL 0x00001000 /* btree format dir */
++#define EXT2_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
+ #define EXT2_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
+
+-#define EXT2_FL_USER_VISIBLE 0x00001FFF /* User visible flags */
+-#define EXT2_FL_USER_MODIFIABLE 0x000000FF /* User modifiable flags */
++#define EXT2_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
++#define EXT2_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
+
+ /*
+ * ioctl commands
+@@ -307,6 +308,7 @@
+ * Mount flags
+ */
+ #define EXT2_MOUNT_CHECK 0x0001 /* Do mount-time checks */
++#define EXT2_MOUNT_OLDALLOC 0x0002 /* Don't use the new Orlov allocator */
+ #define EXT2_MOUNT_GRPID 0x0004 /* Create files with directory's group */
+ #define EXT2_MOUNT_DEBUG 0x0008 /* Some debugging messages */
+ #define EXT2_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
+@@ -392,7 +394,20 @@
+ __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
+ __u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
+ __u16 s_padding1;
+- __u32 s_reserved[204]; /* Padding to the end of the block */
++ /*
++ * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
++ */
++ __u8 s_journal_uuid[16]; /* uuid of journal superblock */
++ __u32 s_journal_inum; /* inode number of journal file */
++ __u32 s_journal_dev; /* device number of journal file */
++ __u32 s_last_orphan; /* start of list of inodes to delete */
++ __u32 s_hash_seed[4]; /* HTREE hash seed */
++ __u8 s_def_hash_version; /* Default hash version to use */
++ __u8 s_reserved_char_pad;
++ __u16 s_reserved_word_pad;
++ __u32 s_default_mount_opts;
++ __u32 s_first_meta_bg; /* First metablock block group */
++ __u32 s_reserved[190]; /* Padding to the end of the block */
+ };
+
+ #ifdef __KERNEL__
+@@ -464,10 +479,12 @@
+ #define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002
+ #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004
+ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008
++#define EXT2_FEATURE_INCOMPAT_META_BG 0x0010
+ #define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff
+
+ #define EXT2_FEATURE_COMPAT_SUPP 0
+-#define EXT2_FEATURE_INCOMPAT_SUPP EXT2_FEATURE_INCOMPAT_FILETYPE
++#define EXT2_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \
++ EXT2_FEATURE_INCOMPAT_META_BG)
+ #define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
+ EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
+@@ -475,10 +492,18 @@
+ #define EXT2_FEATURE_INCOMPAT_UNSUPPORTED ~EXT2_FEATURE_INCOMPAT_SUPP
+
+ /*
+- * Default values for user and/or group using reserved blocks
++ * Default mount options
+ */
+-#define EXT2_DEF_RESUID 0
+-#define EXT2_DEF_RESGID 0
++#define EXT2_DEFM_DEBUG 0x0001
++#define EXT2_DEFM_BSDGROUPS 0x0002
++#define EXT2_DEFM_XATTR_USER 0x0004
++#define EXT2_DEFM_ACL 0x0008
++#define EXT2_DEFM_UID16 0x0010
++ /* Not used by ext2, but reserved for use by ext3 */
++#define EXT3_DEFM_JMODE 0x0060
++#define EXT3_DEFM_JMODE_DATA 0x0020
++#define EXT3_DEFM_JMODE_ORDERED 0x0040
++#define EXT3_DEFM_JMODE_WBACK 0x0060
+
+ /*
+ * Structure of a directory entry
+@@ -576,6 +601,7 @@
+ extern struct inode * ext2_new_inode (const struct inode *, int);
+ extern void ext2_free_inode (struct inode *);
+ extern unsigned long ext2_count_free_inodes (struct super_block *);
++extern unsigned long ext2_count_dirs (struct super_block *);
+ extern void ext2_check_inodes_bitmap (struct super_block *);
+ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
+
+diff -Nru a/include/linux/ext2_fs_i.h b/include/linux/ext2_fs_i.h
+--- a/include/linux/ext2_fs_i.h Thu Nov 7 11:58:05 2002
++++ b/include/linux/ext2_fs_i.h Thu Nov 7 11:58:05 2002
+@@ -25,6 +25,7 @@
+ __u32 i_faddr;
+ __u8 i_frag_no;
+ __u8 i_frag_size;
++ __u16 i_state;
+ __u32 i_file_acl;
+ __u32 i_dir_acl;
+ __u32 i_dtime;
+@@ -34,7 +35,11 @@
+ __u32 i_prealloc_block;
+ __u32 i_prealloc_count;
+ __u32 i_dir_start_lookup;
+- int i_new_inode:1; /* Is a freshly allocated inode */
+ };
++
++/*
++ * Inode dynamic state flags
++ */
++#define EXT2_STATE_NEW 0x00000001 /* inode is newly created */
+
+ #endif /* _LINUX_EXT2_FS_I */
+diff -Nru a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
+--- a/include/linux/ext2_fs_sb.h Thu Nov 7 11:58:05 2002
++++ b/include/linux/ext2_fs_sb.h Thu Nov 7 11:58:05 2002
+@@ -56,6 +56,8 @@
+ int s_desc_per_block_bits;
+ int s_inode_size;
+ int s_first_ino;
++ unsigned long s_dir_count;
++ u8 *s_debts;
+ };
+
+ #endif /* _LINUX_EXT2_FS_SB */
+diff -Nru a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
+--- a/include/linux/ext3_fs.h Thu Nov 7 11:58:05 2002
++++ b/include/linux/ext3_fs.h Thu Nov 7 11:58:05 2002
+@@ -40,6 +40,11 @@
+ #define EXT3FS_VERSION "2.4-0.9.19"
+
+ /*
++ * Always enable hashed directories
++ */
++#define CONFIG_EXT3_INDEX
++
++/*
+ * Debug code
+ */
+ #ifdef EXT3FS_DEBUG
+@@ -203,10 +208,11 @@
+ #define EXT3_INDEX_FL 0x00001000 /* hash-indexed directory */
+ #define EXT3_IMAGIC_FL 0x00002000 /* AFS directory */
+ #define EXT3_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */
++#define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
+ #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
+
+-#define EXT3_FL_USER_VISIBLE 0x00005FFF /* User visible flags */
+-#define EXT3_FL_USER_MODIFIABLE 0x000000FF /* User modifiable flags */
++#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
++#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
+
+ /*
+ * Inode dynamic state flags
+@@ -325,6 +331,7 @@
+ * Mount flags
+ */
+ #define EXT3_MOUNT_CHECK 0x0001 /* Do mount-time checks */
++#define EXT3_MOUNT_OLDALLOC 0x0002 /* Don't use the new Orlov allocator */
+ #define EXT3_MOUNT_GRPID 0x0004 /* Create files with directory's group */
+ #define EXT3_MOUNT_DEBUG 0x0008 /* Some debugging messages */
+ #define EXT3_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
+@@ -437,8 +444,13 @@
+ /*E0*/ __u32 s_journal_inum; /* inode number of journal file */
+ __u32 s_journal_dev; /* device number of journal file */
+ __u32 s_last_orphan; /* start of list of inodes to delete */
+-
+-/*EC*/ __u32 s_reserved[197]; /* Padding to the end of the block */
++ __u32 s_hash_seed[4]; /* HTREE hash seed */
++ __u8 s_def_hash_version; /* Default hash version to use */
++ __u8 s_reserved_char_pad;
++ __u16 s_reserved_word_pad;
++ __u32 s_default_mount_opts;
++ __u32 s_first_meta_bg; /* First metablock block group */
++ __u32 s_reserved[190]; /* Padding to the end of the block */
+ };
+
+ #ifdef __KERNEL__
+@@ -511,19 +523,28 @@
+ #define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
+ #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
+ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
++#define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
+
+ #define EXT3_FEATURE_COMPAT_SUPP 0
+ #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
+- EXT3_FEATURE_INCOMPAT_RECOVER)
++ EXT3_FEATURE_INCOMPAT_RECOVER| \
++ EXT3_FEATURE_INCOMPAT_META_BG)
+ #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
+ EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
+
+ /*
+- * Default values for user and/or group using reserved blocks
++ * Default mount options
+ */
+-#define EXT3_DEF_RESUID 0
+-#define EXT3_DEF_RESGID 0
++#define EXT3_DEFM_DEBUG 0x0001
++#define EXT3_DEFM_BSDGROUPS 0x0002
++#define EXT3_DEFM_XATTR_USER 0x0004
++#define EXT3_DEFM_ACL 0x0008
++#define EXT3_DEFM_UID16 0x0010
++#define EXT3_DEFM_JMODE 0x0060
++#define EXT3_DEFM_JMODE_DATA 0x0020
++#define EXT3_DEFM_JMODE_ORDERED 0x0040
++#define EXT3_DEFM_JMODE_WBACK 0x0060
+
+ /*
+ * Structure of a directory entry
+@@ -575,9 +596,46 @@
+ #define EXT3_DIR_ROUND (EXT3_DIR_PAD - 1)
+ #define EXT3_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT3_DIR_ROUND) & \
+ ~EXT3_DIR_ROUND)
++/*
++ * Hash Tree Directory indexing
++ * (c) Daniel Phillips, 2001
++ */
++
++#ifdef CONFIG_EXT3_INDEX
++ #define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
++ EXT3_FEATURE_COMPAT_DIR_INDEX) && \
++ (EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
++#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
++#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
++#else
++ #define is_dx(dir) 0
++#define EXT3_DIR_LINK_MAX(dir) ((dir)->i_nlink >= EXT3_LINK_MAX)
++#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2)
++#endif
++
++/* Legal values for the dx_root hash_version field: */
++
++#define DX_HASH_LEGACY 0
++#define DX_HASH_HALF_MD4 1
++#define DX_HASH_TEA 2
++
++/* hash info structure used by the directory hash */
++struct dx_hash_info
++{
++ u32 hash;
++ u32 minor_hash;
++ int hash_version;
++ u32 *seed;
++};
+
+ #ifdef __KERNEL__
+ /*
++ * Control parameters used by ext3_htree_next_block
++ */
++#define HASH_NB_ALWAYS 1
++
++
++/*
+ * Describe an inode's exact location on disk and in memory
+ */
+ struct ext3_iloc
+@@ -587,6 +645,27 @@
+ unsigned long block_group;
+ };
+
++
++/*
++ * This structure is stuffed into the struct file's private_data field
++ * for directories. It is where we put information so that we can do
++ * readdir operations in hash tree order.
++ */
++struct dir_private_info {
++ rb_root_t root;
++ rb_node_t *curr_node;
++ struct fname *extra_fname;
++ loff_t last_pos;
++ __u32 curr_hash;
++ __u32 curr_minor_hash;
++ __u32 next_hash;
++};
++
++/*
++ * Special error return code only used by dx_probe() and its callers.
++ */
++#define ERR_BAD_DX_DIR -75000
++
+ /*
+ * Function prototypes
+ */
+@@ -614,16 +693,26 @@
+
+ /* dir.c */
+ extern int ext3_check_dir_entry(const char *, struct inode *,
+- struct ext3_dir_entry_2 *, struct buffer_head *,
+- unsigned long);
++ struct ext3_dir_entry_2 *,
++ struct buffer_head *, unsigned long);
++extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
++ __u32 minor_hash,
++ struct ext3_dir_entry_2 *dirent);
++extern void ext3_htree_free_dir_info(struct dir_private_info *p);
++
+ /* fsync.c */
+ extern int ext3_sync_file (struct file *, struct dentry *, int);
+
++/* hash.c */
++extern int ext3fs_dirhash(const char *name, int len, struct
++ dx_hash_info *hinfo);
++
+ /* ialloc.c */
+ extern struct inode * ext3_new_inode (handle_t *, const struct inode *, int);
+ extern void ext3_free_inode (handle_t *, struct inode *);
+ extern struct inode * ext3_orphan_get (struct super_block *, ino_t);
+ extern unsigned long ext3_count_free_inodes (struct super_block *);
++extern unsigned long ext3_count_dirs (struct super_block *);
+ extern void ext3_check_inodes_bitmap (struct super_block *);
+ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
+
+@@ -650,6 +739,8 @@
+ /* namei.c */
+ extern int ext3_orphan_add(handle_t *, struct inode *);
+ extern int ext3_orphan_del(handle_t *, struct inode *);
++extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
++ __u32 start_minor_hash, __u32 *next_hash);
+
+ /* super.c */
+ extern void ext3_error (struct super_block *, const char *, const char *, ...)
+diff -Nru a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
+--- a/include/linux/ext3_fs_sb.h Thu Nov 7 11:58:05 2002
++++ b/include/linux/ext3_fs_sb.h Thu Nov 7 11:58:05 2002
+@@ -62,6 +62,10 @@
+ int s_inode_size;
+ int s_first_ino;
+ u32 s_next_generation;
++ u32 s_hash_seed[4];
++ int s_def_hash_version;
++ unsigned long s_dir_count;
++ u8 *s_debts;
+
+ /* Journaling */
+ struct inode * s_journal_inode;
+diff -Nru a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
+--- a/include/linux/ext3_jbd.h Thu Nov 7 11:58:05 2002
++++ b/include/linux/ext3_jbd.h Thu Nov 7 11:58:05 2002
+@@ -63,6 +63,8 @@
+
+ #define EXT3_RESERVE_TRANS_BLOCKS 12
+
++#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
++
+ int
+ ext3_mark_iloc_dirty(handle_t *handle,
+ struct inode *inode,
+diff -Nru a/include/linux/rbtree.h b/include/linux/rbtree.h
+--- a/include/linux/rbtree.h Thu Nov 7 11:58:05 2002
++++ b/include/linux/rbtree.h Thu Nov 7 11:58:05 2002
+@@ -120,6 +120,8 @@
+
+ extern void rb_insert_color(rb_node_t *, rb_root_t *);
+ extern void rb_erase(rb_node_t *, rb_root_t *);
++extern rb_node_t *rb_get_first(rb_root_t *root);
++extern rb_node_t *rb_get_next(rb_node_t *n);
+
+ static inline void rb_link_node(rb_node_t * node, rb_node_t * parent, rb_node_t ** rb_link)
+ {
+diff -Nru a/lib/rbtree.c b/lib/rbtree.c
+--- a/lib/rbtree.c Thu Nov 7 11:58:05 2002
++++ b/lib/rbtree.c Thu Nov 7 11:58:05 2002
+@@ -17,6 +17,8 @@
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/lib/rbtree.c
++
++ rb_get_first and rb_get_next written by Theodore Ts'o, 9/8/2002
+ */
+
+ #include <linux/rbtree.h>
+@@ -294,3 +296,43 @@
+ __rb_erase_color(child, parent, root);
+ }
+ EXPORT_SYMBOL(rb_erase);
++
++/*
++ * This function returns the first node (in sort order) of the tree.
++ */
++rb_node_t *rb_get_first(rb_root_t *root)
++{
++ rb_node_t *n;
++
++ n = root->rb_node;
++ if (!n)
++ return 0;
++ while (n->rb_left)
++ n = n->rb_left;
++ return n;
++}
++EXPORT_SYMBOL(rb_get_first);
++
++/*
++ * Given a node, this function will return the next node in the tree.
++ */
++rb_node_t *rb_get_next(rb_node_t *n)
++{
++ rb_node_t *parent;
++
++ if (n->rb_right) {
++ n = n->rb_right;
++ while (n->rb_left)
++ n = n->rb_left;
++ return n;
++ } else {
++ while ((parent = n->rb_parent)) {
++ if (n == parent->rb_left)
++ return parent;
++ n = parent;
++ }
++ return 0;
++ }
++}
++EXPORT_SYMBOL(rb_get_next);
++
--- /dev/null
+--- ./fs/ext3/super.c 2002/03/05 06:18:59 2.1
++++ ./fs/ext3/super.c 2002/03/05 06:26:56
+@@ -529,6 +529,12 @@
+ "EXT3 Check option not supported\n");
+ #endif
+ }
++ else if (!strcmp (this_char, "index"))
++#ifdef CONFIG_EXT3_INDEX
++ set_opt (*mount_options, INDEX);
++#else
++ printk("EXT3 index option not supported\n");
++#endif
+ else if (!strcmp (this_char, "debug"))
+ set_opt (*mount_options, DEBUG);
+ else if (!strcmp (this_char, "errors")) {
+@@ -702,6 +708,12 @@
+ es->s_mtime = cpu_to_le32(CURRENT_TIME);
+ ext3_update_dynamic_rev(sb);
+ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
++
++ if (test_opt(sb, INDEX))
++ EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX);
++ else if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
++ set_opt (EXT3_SB(sb)->s_mount_opt, INDEX);
++
+ ext3_commit_super (sb, es, 1);
+ if (test_opt (sb, DEBUG))
+ printk (KERN_INFO
+--- ./fs/ext3/namei.c 2002/03/05 06:18:59 2.1
++++ ./fs/ext3/namei.c 2002/03/06 00:13:18
+@@ -16,6 +16,10 @@
+ * David S. Miller (davem@caip.rutgers.edu), 1995
+ * Directory entry file type support and forward compatibility hooks
+ * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
++ * Hash Tree Directory indexing (c)
++ * Daniel Phillips, 2001
++ * Hash Tree Directory indexing porting
++ * Christopher Li, 2002
+ */
+
+ #include <linux/fs.h>
+@@ -33,7 +33,7 @@
+ #include <linux/string.h>
+ #include <linux/locks.h>
+ #include <linux/quotaops.h>
+-
++#include <linux/slab.h>
+
+ /*
+ * define how far ahead to read directories while searching them.
+@@ -38,6 +42,437 @@
+ #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+ #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
+
++static struct buffer_head *ext3_append(handle_t *handle,
++ struct inode *inode,
++ u32 *block, int *err)
++{
++ struct buffer_head *bh;
++
++ *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
++
++ if ((bh = ext3_bread(handle, inode, *block, 1, err))) {
++ inode->i_size += inode->i_sb->s_blocksize;
++ EXT3_I(inode)->i_disksize = inode->i_size;
++ ext3_journal_get_write_access(handle,bh);
++ }
++ return bh;
++}
++
++#ifndef assert
++#define assert(test) J_ASSERT(test)
++#endif
++
++#ifndef swap
++#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
++#endif
++
++typedef struct { u32 v; } le_u32;
++typedef struct { u16 v; } le_u16;
++
++#define dxtrace_on(command) command
++#define dxtrace_off(command)
++
++struct fake_dirent
++{
++ /*le*/u32 inode;
++ /*le*/u16 rec_len;
++ u8 name_len;
++ u8 file_type;
++};
++
++struct dx_countlimit
++{
++ le_u16 limit;
++ le_u16 count;
++};
++
++struct dx_entry
++{
++ le_u32 hash;
++ le_u32 block;
++};
++
++/*
++ * dx_root_info is laid out so that if it should somehow get overlaid by a
++ * dirent the two low bits of the hash version will be zero. Therefore, the
++ * hash version mod 4 should never be 0. Sincerely, the paranoia department.
++ */
++
++struct dx_root
++{
++ struct fake_dirent dot;
++ char dot_name[4];
++ struct fake_dirent dotdot;
++ char dotdot_name[4];
++ struct dx_root_info
++ {
++ le_u32 reserved_zero;
++ u8 hash_version; /* 0 now, 1 at release */
++ u8 info_length; /* 8 */
++ u8 indirect_levels;
++ u8 unused_flags;
++ }
++ info;
++ struct dx_entry entries[0];
++};
++
++struct dx_node
++{
++ struct fake_dirent fake;
++ struct dx_entry entries[0];
++};
++
++
++struct dx_frame
++{
++ struct buffer_head *bh;
++ struct dx_entry *entries;
++ struct dx_entry *at;
++};
++
++struct dx_map_entry
++{
++ u32 hash;
++ u32 offs;
++};
++
++typedef struct ext3_dir_entry_2 ext3_dirent;
++static inline unsigned dx_get_block (struct dx_entry *entry);
++static void dx_set_block (struct dx_entry *entry, unsigned value);
++static inline unsigned dx_get_hash (struct dx_entry *entry);
++static void dx_set_hash (struct dx_entry *entry, unsigned value);
++static unsigned dx_get_count (struct dx_entry *entries);
++static unsigned dx_get_limit (struct dx_entry *entries);
++static void dx_set_count (struct dx_entry *entries, unsigned value);
++static void dx_set_limit (struct dx_entry *entries, unsigned value);
++static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
++static unsigned dx_node_limit (struct inode *dir);
++static unsigned dx_hack_hash (const u8 *name, int len);
++static struct dx_frame *dx_probe (struct inode *dir, u32 hash, struct dx_frame *frame);
++static void dx_release (struct dx_frame *frames);
++static int dx_make_map (ext3_dirent *de, int size, struct dx_map_entry map[]);
++static void dx_sort_map(struct dx_map_entry *map, unsigned count);
++static ext3_dirent *dx_copy_dirents (char *from, char *to,
++ struct dx_map_entry *map, int count);
++static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
++
++
++#ifdef CONFIG_EXT3_INDEX
++/*
++ * Future: use high four bits of block for coalesce-on-delete flags
++ * Mask them off for now.
++ */
++
++static inline unsigned dx_get_block (struct dx_entry *entry)
++{
++ return le32_to_cpu(entry->block.v) & 0x00ffffff;
++}
++
++static inline void dx_set_block (struct dx_entry *entry, unsigned value)
++{
++ entry->block.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_hash (struct dx_entry *entry)
++{
++ return le32_to_cpu(entry->hash.v);
++}
++
++static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
++{
++ entry->hash.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_count (struct dx_entry *entries)
++{
++ return le16_to_cpu(((struct dx_countlimit *) entries)->count.v);
++}
++
++static inline unsigned dx_get_limit (struct dx_entry *entries)
++{
++ return le16_to_cpu(((struct dx_countlimit *) entries)->limit.v);
++}
++
++static inline void dx_set_count (struct dx_entry *entries, unsigned value)
++{
++ ((struct dx_countlimit *) entries)->count.v = cpu_to_le16(value);
++}
++
++static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
++{
++ ((struct dx_countlimit *) entries)->limit.v = cpu_to_le16(value);
++}
++
++static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
++{
++ unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
++ EXT3_DIR_REC_LEN(2) - infosize;
++ return 0? 20: entry_space / sizeof(struct dx_entry);
++}
++
++static inline unsigned dx_node_limit (struct inode *dir)
++{
++ unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
++ return 0? 22: entry_space / sizeof(struct dx_entry);
++}
++
++/* Hash function - not bad, but still looking for an ideal default */
++
++static unsigned dx_hack_hash (const u8 *name, int len)
++{
++ u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
++ while (len--)
++ {
++ u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
++ if (hash & 0x80000000) hash -= 0x7fffffff;
++ hash1 = hash0;
++ hash0 = hash;
++ }
++ return hash0;
++}
++
++#define dx_hash(s,n) (dx_hack_hash(s,n) << 1)
++
++/*
++ * Debug
++ */
++#ifdef DX_DEBUG
++#define dxtrace dxtrace_on
++static void dx_show_index (char * label, struct dx_entry *entries)
++{
++ int i, n = dx_get_count (entries);
++ printk("%s index ", label);
++ for (i = 0; i < n; i++)
++ {
++ printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
++ }
++ printk("\n");
++}
++
++struct stats
++{
++ unsigned names;
++ unsigned space;
++ unsigned bcount;
++};
++
++static struct stats dx_show_leaf (ext3_dirent *de, int size, int show_names)
++{
++ unsigned names = 0, space = 0;
++ char *base = (char *) de;
++ printk("names: ");
++ while ((char *) de < base + size)
++ {
++ if (de->inode)
++ {
++ if (show_names)
++ {
++ int len = de->name_len;
++ char *name = de->name;
++ while (len--) printk("%c", *name++);
++ printk(":%x.%u ", dx_hash (de->name, de->name_len), ((char *) de - base));
++ }
++ space += EXT3_DIR_REC_LEN(de->name_len);
++ names++;
++ }
++ de = (ext3_dirent *) ((char *) de + le16_to_cpu(de->rec_len));
++ }
++ printk("(%i)\n", names);
++ return (struct stats) { names, space, 1 };
++}
++
++struct stats dx_show_entries (struct inode *dir, struct dx_entry *entries, int levels)
++{
++ unsigned blocksize = dir->i_sb->s_blocksize;
++ unsigned count = dx_get_count (entries), names = 0, space = 0, i;
++ unsigned bcount = 0;
++ struct buffer_head *bh;
++ int err;
++ printk("%i indexed blocks...\n", count);
++ for (i = 0; i < count; i++, entries++)
++ {
++ u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
++ u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
++ struct stats stats;
++ printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
++ if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
++ stats = levels?
++ dx_show_entries (dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
++ dx_show_leaf ((ext3_dirent *) bh->b_data, blocksize, 0);
++ names += stats.names;
++ space += stats.space;
++ bcount += stats.bcount;
++ brelse (bh);
++ }
++ if (bcount)
++ printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ",
++ names, space/bcount,(space/bcount)*100/blocksize);
++ return (struct stats) { names, space, bcount};
++}
++#else
++#define dxtrace dxtrace_off
++#endif
++
++/*
++ * Probe for a directory leaf block to search
++ */
++
++static struct dx_frame *
++dx_probe(struct inode *dir, u32 hash, struct dx_frame *frame_in)
++{
++ unsigned count, indirect;
++ struct dx_entry *at, *entries, *p, *q, *m;
++ struct dx_root *root;
++ struct buffer_head *bh;
++ struct dx_frame *frame = frame_in;
++ int err;
++
++ frame->bh = NULL;
++ if (!(bh = ext3_bread(NULL, dir, 0, 0, &err)))
++ goto fail;
++ root = (struct dx_root *) bh->b_data;
++ if (root->info.hash_version > 0 || root->info.unused_flags & 1) {
++ brelse(bh);
++ goto fail;
++ }
++ if ((indirect = root->info.indirect_levels) > 1) {
++ brelse(bh);
++ goto fail;
++ }
++ entries = (struct dx_entry *) (((char *) &root->info) + root->info.info_length);
++ assert (dx_get_limit(entries) == dx_root_limit(dir, root->info.info_length));
++ dxtrace (printk("Look up %x", hash));
++ while (1)
++ {
++ count = dx_get_count(entries);
++ assert (count && count <= dx_get_limit(entries));
++ p = entries + 1;
++ q = entries + count - 1;
++ while (p <= q)
++ {
++ m = p + (q - p)/2;
++ dxtrace(printk("."));
++ if (dx_get_hash(m) > hash)
++ q = m - 1;
++ else
++ p = m + 1;
++ }
++
++ if (0) // linear search cross check
++ {
++ unsigned n = count - 1;
++ at = entries;
++ while (n--)
++ {
++ dxtrace(printk(","));
++ if (dx_get_hash(++at) > hash)
++ {
++ at--;
++ break;
++ }
++ }
++ assert (at == p - 1);
++ }
++
++ at = p - 1;
++ dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
++ frame->bh = bh;
++ frame->entries = entries;
++ frame->at = at;
++ if (!indirect--) return frame;
++ if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0,&err)))
++ goto fail2;
++ at = entries = ((struct dx_node *) bh->b_data)->entries;
++ assert (dx_get_limit(entries) == dx_node_limit (dir));
++ frame++;
++ }
++fail2:
++ while (frame >= frame_in) {
++ brelse(frame->bh);
++ frame--;
++ }
++fail:
++ return NULL;
++}
++
++static void dx_release (struct dx_frame *frames)
++{
++ if (frames[0].bh == NULL)
++ return;
++
++ if (((struct dx_root *)frames[0].bh->b_data)->info.indirect_levels)
++ brelse (frames[1].bh);
++ brelse (frames[0].bh);
++}
++
++/*
++ * Directory block splitting, compacting
++ */
++
++static int dx_make_map (ext3_dirent *de, int size, struct dx_map_entry map[])
++{
++ int count = 0;
++ char *base = (char *) de;
++ while ((char *) de < base + size) {
++ if (de->name_len && de->inode) {
++ map[count].hash = dx_hash (de->name, de->name_len);
++ map[count].offs = (u32) ((char *) de - base);
++ count++;
++ }
++ de = (ext3_dirent *) ((char *) de + le16_to_cpu(de->rec_len));
++ }
++ return count;
++}
++
++static void dx_sort_map (struct dx_map_entry *map, unsigned count)
++{
++ struct dx_map_entry *p, *q, *top = map + count - 1;
++ int more;
++ /* Combsort until bubble sort doesn't suck */
++ while (count > 2)
++ {
++ count = count*10/13;
++ if (count - 9 < 2) /* 9, 10 -> 11 */
++ count = 11;
++ for (p = top, q = p - count; q >= map; p--, q--)
++ if (p->hash < q->hash)
++ swap(*p, *q);
++ }
++ /* Garden variety bubble sort */
++ do {
++ more = 0;
++ q = top;
++ while (q-- > map)
++ {
++ if (q[1].hash >= q[0].hash)
++ continue;
++ swap(*(q+1), *q);
++ more = 1;
++ }
++ } while(more);
++}
++
++static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
++{
++ struct dx_entry *entries = frame->entries;
++ struct dx_entry *old = frame->at, *new = old + 1;
++ int count = dx_get_count(entries);
++
++ assert(count < dx_get_limit(entries));
++ assert(old < entries + count);
++ memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
++ dx_set_hash(new, hash);
++ dx_set_block(new, block);
++ dx_set_count(entries, count + 1);
++}
++#endif
++
++static void ext3_update_dx_flag(struct inode *inode)
++{
++ if (!test_opt(inode->i_sb, INDEX))
++ EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
++}
++
+ /*
+ * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
+ *
+@@ -95,6 +529,15 @@
+ }
+
+ /*
++ * p is at least 6 bytes before the end of page
++ */
++static inline ext3_dirent *ext3_next_entry(ext3_dirent *p)
++{
++ return (ext3_dirent *)((char*)p + le16_to_cpu(p->rec_len));
++}
++
++
++/*
+ * ext3_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+@@ -105,6 +548,8 @@
+ * The returned buffer_head has ->b_count elevated. The caller is expected
+ * to brelse() it when appropriate.
+ */
++
++
+ static struct buffer_head * ext3_find_entry (struct dentry *dentry,
+ struct ext3_dir_entry_2 ** res_dir)
+ {
+@@ -119,10 +564,70 @@
+ int num = 0;
+ int nblocks, i, err;
+ struct inode *dir = dentry->d_parent->d_inode;
++ ext3_dirent *de, *top;
+
+ *res_dir = NULL;
+ sb = dir->i_sb;
++ if (dentry->d_name.len > EXT3_NAME_LEN)
++ return NULL;
++ if (ext3_dx && is_dx(dir)) {
++ u32 hash = dx_hash(dentry->d_name.name, dentry->d_name.len);
++ struct dx_frame frames[2], *frame;
++ if (!(frame = dx_probe (dir, hash, frames)))
++ return NULL;
++dxnext:
++ block = dx_get_block(frame->at);
++ if (!(bh = ext3_bread (NULL,dir, block, 0, &err)))
++ goto dxfail;
++ de = (ext3_dirent *) bh->b_data;
++ top = (ext3_dirent *) ((char *) de + sb->s_blocksize -
++ EXT3_DIR_REC_LEN(0));
++ for (; de < top; de = ext3_next_entry(de))
++ if (ext3_match(dentry->d_name.len, dentry->d_name.name, de)) {
++ if (!ext3_check_dir_entry("ext3_find_entry",
++ dir, de, bh,
++ (block<<EXT3_BLOCK_SIZE_BITS(sb))
++ +((char *)de - bh->b_data))) {
++ brelse (bh);
++ goto dxfail;
++ }
++ *res_dir = de;
++ goto dxfound;
++ }
++ brelse (bh);
++ /* Same hash continues in next block? Search on. */
++ if (++(frame->at) == frame->entries + dx_get_count(frame->entries))
++ {
++ struct buffer_head *bh2;
++ if (frame == frames)
++ goto dxfail;
++ if (++(frames->at) == frames->entries + dx_get_count(frames->entries))
++ goto dxfail;
++ /* should omit read if not continued */
++ if (!(bh2 = ext3_bread (NULL, dir,
++ dx_get_block(frames->at),
++ 0, &err)))
++ goto dxfail;
++ brelse (frame->bh);
++ frame->bh = bh2;
++ frame->at = frame->entries = ((struct dx_node *) bh2->b_data)->entries;
++ /* Subtle: the 0th entry has the count, find the hash in frame above */
++ if ((dx_get_hash(frames->at) & -2) == hash)
++ goto dxnext;
++ goto dxfail;
++ }
++ if ((dx_get_hash(frame->at) & -2) == hash)
++ goto dxnext;
++dxfail:
++ dxtrace(printk("%s not found\n", name));
++ dx_release (frames);
++ return NULL;
++dxfound:
++ dx_release (frames);
++ return bh;
+
++ }
++
+ nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
+ start = dir->u.ext3_i.i_dir_start_lookup;
+ if (start >= nblocks)
+@@ -237,6 +748,90 @@
+ de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+ }
+
++static ext3_dirent *
++dx_copy_dirents (char *from, char *to, struct dx_map_entry *map, int count)
++{
++ unsigned rec_len = 0;
++
++ while (count--) {
++ ext3_dirent *de = (ext3_dirent *) (from + map->offs);
++ rec_len = EXT3_DIR_REC_LEN(de->name_len);
++ memcpy (to, de, rec_len);
++ ((ext3_dirent *) to)->rec_len = rec_len;
++ to += rec_len;
++ map++;
++ }
++ return (ext3_dirent *) (to - rec_len);
++}
++
++#ifdef CONFIG_EXT3_INDEX
++static ext3_dirent *do_split(handle_t *handle, struct inode *dir,
++ struct buffer_head **bh,struct dx_frame *frame,
++ u32 hash, int *error)
++{
++ unsigned count;
++ struct buffer_head *bh2;
++ u32 newblock;
++ u32 hash2;
++ struct dx_map_entry *map;
++ char *data1 = (*bh)->b_data, *data2, *data3;
++ unsigned split;
++ ext3_dirent *de, *de2;
++
++ bh2 = ext3_append (handle, dir, &newblock, error);
++ if (!(bh2))
++ {
++ brelse(*bh);
++ *bh = NULL;
++ return (ext3_dirent *)bh2;
++ }
++
++ BUFFER_TRACE(*bh, "get_write_access");
++ ext3_journal_get_write_access(handle, *bh);
++ BUFFER_TRACE(frame->bh, "get_write_access");
++ ext3_journal_get_write_access(handle, frame->bh);
++
++ data2 = bh2->b_data;
++
++ map = kmalloc(sizeof(*map) * PAGE_CACHE_SIZE/EXT3_DIR_REC_LEN(1) + 1,
++ GFP_KERNEL);
++ if (!map)
++ panic("no memory for do_split\n");
++ count = dx_make_map((ext3_dirent *)data1, dir->i_sb->s_blocksize, map);
++ split = count/2; // need to adjust to actual middle
++ dx_sort_map (map, count);
++ hash2 = map[split].hash;
++ dxtrace(printk("Split block %i at %x, %i/%i\n",
++ dx_get_block(frame->at), hash2, split, count-split));
++
++ /* Fancy dance to stay within two buffers */
++ de2 = dx_copy_dirents (data1, data2, map + split, count - split);
++ data3 = (char *) de2 + de2->rec_len;
++ de = dx_copy_dirents (data1, data3, map, split);
++ memcpy(data1, data3, (char *) de + de->rec_len - data3);
++ de = (ext3_dirent *) ((char *) de - data3 + data1); // relocate de
++ de->rec_len = cpu_to_le16(data1 + dir->i_sb->s_blocksize - (char *)de);
++ de2->rec_len = cpu_to_le16(data2 + dir->i_sb->s_blocksize-(char *)de2);
++ dxtrace(dx_show_leaf((ext3_dirent *)data1, dir->i_sb->s_blocksize, 1));
++ dxtrace(dx_show_leaf((ext3_dirent *)data2, dir->i_sb->s_blocksize, 1));
++
++ /* Which block gets the new entry? */
++ if (hash >= hash2)
++ {
++ swap(*bh, bh2);
++ de = de2;
++ }
++ dx_insert_block(frame, hash2 + (hash2 == map[split-1].hash), newblock);
++ ext3_journal_dirty_metadata (handle, bh2);
++ brelse (bh2);
++ ext3_journal_dirty_metadata (handle, frame->bh);
++ dxtrace(dx_show_index ("frame", frame->entries));
++ kfree(map);
++ return de;
++}
++#endif
++
++
+ /*
+ * ext3_add_entry()
+ *
+@@ -255,118 +849,278 @@
+ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+- const char *name = dentry->d_name.name;
+- int namelen = dentry->d_name.len;
+ unsigned long offset;
+- unsigned short rec_len;
+ struct buffer_head * bh;
+- struct ext3_dir_entry_2 * de, * de1;
+- struct super_block * sb;
++ ext3_dirent *de;
++ struct super_block * sb = dir->i_sb;
+ int retval;
++ unsigned short reclen = EXT3_DIR_REC_LEN(dentry->d_name.len);
+
+- sb = dir->i_sb;
++ unsigned nlen, rlen;
++ u32 block, blocks;
++ char *top;
+
+- if (!namelen)
++ if (!dentry->d_name.len)
+ return -EINVAL;
+- bh = ext3_bread (handle, dir, 0, 0, &retval);
+- if (!bh)
+- return retval;
+- rec_len = EXT3_DIR_REC_LEN(namelen);
+- offset = 0;
+- de = (struct ext3_dir_entry_2 *) bh->b_data;
+- while (1) {
+- if ((char *)de >= sb->s_blocksize + bh->b_data) {
+- brelse (bh);
+- bh = NULL;
+- bh = ext3_bread (handle, dir,
+- offset >> EXT3_BLOCK_SIZE_BITS(sb), 1, &retval);
+- if (!bh)
+- return retval;
+- if (dir->i_size <= offset) {
+- if (dir->i_size == 0) {
+- brelse(bh);
+- return -ENOENT;
++ if (ext3_dx && is_dx(dir)) {
++ struct dx_frame frames[2], *frame;
++ struct dx_entry *entries, *at;
++ u32 hash;
++ char *data1;
++
++ hash = dx_hash(dentry->d_name.name, dentry->d_name.len);
++ /* FIXME: do something if dx_probe() fails here */
++ frame = dx_probe(dir, hash, frames);
++ entries = frame->entries;
++ at = frame->at;
++
++ if (!(bh = ext3_bread(handle,dir, dx_get_block(at), 0,&retval)))
++ goto dxfail1;
++
++ BUFFER_TRACE(bh, "get_write_access");
++ ext3_journal_get_write_access(handle, bh);
++
++ data1 = bh->b_data;
++ de = (ext3_dirent *) data1;
++ top = data1 + (0? 200: sb->s_blocksize);
++ while ((char *) de < top)
++ {
++ /* FIXME: check EEXIST and dir */
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ if ((de->inode? rlen - nlen: rlen) >= reclen)
++ goto dx_add;
++ de = (ext3_dirent *) ((char *) de + rlen);
++ }
++ /* Block full, should compress but for now just split */
++ dxtrace(printk("using %u of %u node entries\n",
++ dx_get_count(entries), dx_get_limit(entries)));
++ /* Need to split index? */
++ if (dx_get_count(entries) == dx_get_limit(entries))
++ {
++ u32 newblock;
++ unsigned icount = dx_get_count(entries);
++ int levels = frame - frames;
++ struct dx_entry *entries2;
++ struct dx_node *node2;
++ struct buffer_head *bh2;
++ if (levels && dx_get_count(frames->entries) == dx_get_limit(frames->entries))
++ goto dxfull;
++ bh2 = ext3_append (handle, dir, &newblock, &retval);
++ if (!(bh2))
++ goto dxfail2;
++ node2 = (struct dx_node *)(bh2->b_data);
++ entries2 = node2->entries;
++ node2->fake.rec_len = cpu_to_le16(sb->s_blocksize);
++ node2->fake.inode = 0;
++ BUFFER_TRACE(frame->bh, "get_write_access");
++ ext3_journal_get_write_access(handle, frame->bh);
++ if (levels)
++ {
++ unsigned icount1 = icount/2, icount2 = icount - icount1;
++ unsigned hash2 = dx_get_hash(entries + icount1);
++ dxtrace(printk("Split index %i/%i\n", icount1, icount2));
++
++ BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
++ ext3_journal_get_write_access(handle, frames[0].bh);
++
++ memcpy ((char *) entries2, (char *) (entries + icount1),
++ icount2 * sizeof(struct dx_entry));
++ dx_set_count (entries, icount1);
++ dx_set_count (entries2, icount2);
++ dx_set_limit (entries2, dx_node_limit(dir));
++
++ /* Which index block gets the new entry? */
++ if (at - entries >= icount1) {
++ frame->at = at = at - entries - icount1 + entries2;
++ frame->entries = entries = entries2;
++ swap(frame->bh, bh2);
+ }
+-
+- ext3_debug ("creating next block\n");
+-
+- BUFFER_TRACE(bh, "get_write_access");
+- ext3_journal_get_write_access(handle, bh);
+- de = (struct ext3_dir_entry_2 *) bh->b_data;
+- de->inode = 0;
+- de->rec_len = le16_to_cpu(sb->s_blocksize);
+- dir->u.ext3_i.i_disksize =
+- dir->i_size = offset + sb->s_blocksize;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- ext3_mark_inode_dirty(handle, dir);
++ dx_insert_block (frames + 0, hash2, newblock);
++ dxtrace(dx_show_index ("node", frames[1].entries));
++ dxtrace(dx_show_index ("node",
++ ((struct dx_node *) bh2->b_data)->entries));
++ ext3_journal_dirty_metadata(handle, bh2);
++ brelse (bh2);
+ } else {
+-
+- ext3_debug ("skipping to next block\n");
+-
+- de = (struct ext3_dir_entry_2 *) bh->b_data;
++ dxtrace(printk("Creating second level index...\n"));
++ memcpy((char *) entries2, (char *) entries,
++ icount * sizeof(struct dx_entry));
++ dx_set_limit(entries2, dx_node_limit(dir));
++
++ /* Set up root */
++ dx_set_count(entries, 1);
++ dx_set_block(entries + 0, newblock);
++ ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
++
++ /* Add new access path frame */
++ frame = frames + 1;
++ frame->at = at = at - entries + entries2;
++ frame->entries = entries = entries2;
++ frame->bh = bh2;
++ ext3_journal_get_write_access(handle, frame->bh);
+ }
++ ext3_journal_dirty_metadata(handle, frames[0].bh);
+ }
+- if (!ext3_check_dir_entry ("ext3_add_entry", dir, de, bh,
+- offset)) {
+- brelse (bh);
+- return -ENOENT;
+- }
+- if (ext3_match (namelen, name, de)) {
++ de = do_split(handle, dir, &bh, frame, hash, &retval);
++ dx_release (frames);
++ if (!(de))
++ goto fail;
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ goto add;
++
++dx_add:
++ dx_release (frames);
++ goto add;
++
++dxfull:
++ ext3_warning(sb, __FUNCTION__, "Directory index full!\n");
++ retval = -ENOSPC;
++dxfail2:
++ brelse(bh);
++dxfail1:
++ dx_release (frames);
++ goto fail1;
++ }
++
++ blocks = dir->i_size >> sb->s_blocksize_bits;
++ for (block = 0, offset = 0; block < blocks; block++) {
++ bh = ext3_bread(handle, dir, block, 0, &retval);
++ if(!bh)
++ return retval;
++ de = (ext3_dirent *)bh->b_data;
++ top = bh->b_data + sb->s_blocksize - reclen;
++ while ((char *) de <= top) {
++ if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
++ bh, offset)) {
++ brelse (bh);
++ return -EIO;
++ }
++ if (ext3_match(dentry->d_name.len,dentry->d_name.name,de)) {
+ brelse (bh);
+ return -EEXIST;
+- }
+- if ((le32_to_cpu(de->inode) == 0 &&
+- le16_to_cpu(de->rec_len) >= rec_len) ||
+- (le16_to_cpu(de->rec_len) >=
+- EXT3_DIR_REC_LEN(de->name_len) + rec_len)) {
+- BUFFER_TRACE(bh, "get_write_access");
+- ext3_journal_get_write_access(handle, bh);
+- /* By now the buffer is marked for journaling */
+- offset += le16_to_cpu(de->rec_len);
+- if (le32_to_cpu(de->inode)) {
+- de1 = (struct ext3_dir_entry_2 *) ((char *) de +
+- EXT3_DIR_REC_LEN(de->name_len));
+- de1->rec_len =
+- cpu_to_le16(le16_to_cpu(de->rec_len) -
+- EXT3_DIR_REC_LEN(de->name_len));
+- de->rec_len = cpu_to_le16(
+- EXT3_DIR_REC_LEN(de->name_len));
+- de = de1;
+ }
+- de->file_type = EXT3_FT_UNKNOWN;
+- if (inode) {
+- de->inode = cpu_to_le32(inode->i_ino);
+- ext3_set_de_type(dir->i_sb, de, inode->i_mode);
+- } else
+- de->inode = 0;
+- de->name_len = namelen;
+- memcpy (de->name, name, namelen);
+- /*
+- * XXX shouldn't update any times until successful
+- * completion of syscall, but too many callers depend
+- * on this.
+- *
+- * XXX similarly, too many callers depend on
+- * ext3_new_inode() setting the times, but error
+- * recovery deletes the inode, so the worst that can
+- * happen is that the times are slightly out of date
+- * and/or different from the directory change time.
+- */
+- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- dir->i_version = ++event;
+- ext3_mark_inode_dirty(handle, dir);
+- BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+- ext3_journal_dirty_metadata(handle, bh);
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ if ((de->inode ? rlen - nlen: rlen) >= reclen)
++ goto add;
++ de = (ext3_dirent *)((char *)de + rlen);
++ offset += rlen;
++ }
++ if (ext3_dx && blocks == 1 && test_opt(sb, INDEX))
++ goto dx_make_index;
++ brelse(bh);
++ }
++ bh = ext3_append(handle, dir, &block, &retval);
++ if (!bh)
++ return retval;
++ de = (ext3_dirent *) bh->b_data;
++ de->inode = 0;
++ de->rec_len = cpu_to_le16(rlen = sb->s_blocksize);
++ nlen = 0;
++ goto add;
++
++add:
++ BUFFER_TRACE(bh, "get_write_access");
++ ext3_journal_get_write_access(handle, bh);
++ /* By now the buffer is marked for journaling */
++ if (de->inode) {
++ ext3_dirent *de1 = (ext3_dirent *)((char *)de + nlen);
++ de1->rec_len = cpu_to_le16(rlen - nlen);
++ de->rec_len = cpu_to_le16(nlen);
++ de = de1;
++ }
++ de->file_type = EXT3_FT_UNKNOWN;
++ if (inode) {
++ de->inode = cpu_to_le32(inode->i_ino);
++ ext3_set_de_type(dir->i_sb, de, inode->i_mode);
++ } else
++ de->inode = 0;
++ de->name_len = dentry->d_name.len;
++ memcpy (de->name, dentry->d_name.name, dentry->d_name.len);
++ /*
++ * XXX shouldn't update any times until successful
++ * completion of syscall, but too many callers depend
++ * on this.
++ *
++ * XXX similarly, too many callers depend on
++ * ext3_new_inode() setting the times, but error
++ * recovery deletes the inode, so the worst that can
++ * happen is that the times are slightly out of date
++ * and/or different from the directory change time.
++ */
++ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
++ ext3_update_dx_flag(dir);
++ dir->i_version = ++event;
++ ext3_mark_inode_dirty(handle, dir);
++ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
++ ext3_journal_dirty_metadata(handle, bh);
++ brelse(bh);
++ return 0;
++
++dx_make_index:
++ {
++ struct buffer_head *bh2;
++ struct dx_root *root;
++ struct dx_frame frames[2], *frame;
++ struct dx_entry *entries;
++ ext3_dirent *de2;
++ char *data1;
++ unsigned len;
++ u32 hash;
++
++ dxtrace(printk("Creating index\n"));
++ ext3_journal_get_write_access(handle, bh);
++ root = (struct dx_root *) bh->b_data;
++
++ EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
++ bh2 = ext3_append (handle, dir, &block, &retval);
++ if (!(bh2))
++ {
+ brelse(bh);
+- return 0;
++ return retval;
+ }
+- offset += le16_to_cpu(de->rec_len);
+- de = (struct ext3_dir_entry_2 *)
+- ((char *) de + le16_to_cpu(de->rec_len));
++ data1 = bh2->b_data;
++
++ /* The 0th block becomes the root, move the dirents out */
++ de = (ext3_dirent *) &root->info;
++ len = ((char *) root) + sb->s_blocksize - (char *) de;
++ memcpy (data1, de, len);
++ de = (ext3_dirent *) data1;
++ top = data1 + len;
++ while (((char *) de2=(char*)de+le16_to_cpu(de->rec_len)) < top)
++ de = de2;
++ de->rec_len = cpu_to_le16(data1 + sb->s_blocksize - (char *)de);
++ /* Initialize the root; the dot dirents already exist */
++ de = (ext3_dirent *) (&root->dotdot);
++ de->rec_len = cpu_to_le16(sb->s_blocksize-EXT3_DIR_REC_LEN(2));
++ memset (&root->info, 0, sizeof(root->info));
++ root->info.info_length = sizeof(root->info);
++ entries = root->entries;
++ dx_set_block (entries, 1);
++ dx_set_count (entries, 1);
++ dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
++
++ /* Initialize as for dx_probe */
++ hash = dx_hash (dentry->d_name.name, dentry->d_name.len);
++ frame = frames;
++ frame->entries = entries;
++ frame->at = entries;
++ frame->bh = bh;
++ bh = bh2;
++ de = do_split(handle,dir, &bh, frame, hash, &retval);
++ dx_release (frames);
++ if (!(de))
++ return retval;
++ nlen = EXT3_DIR_REC_LEN(de->name_len);
++ rlen = le16_to_cpu(de->rec_len);
++ goto add;
+ }
+- brelse (bh);
+- return -ENOSPC;
++fail1:
++ return retval;
++fail:
++ return -ENOENT;
+ }
+
+ /*
+@@ -451,7 +1212,8 @@
+ struct inode * inode;
+ int err;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+@@ -478,7 +1240,8 @@
+ struct inode *inode;
+ int err;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+@@ -507,7 +1270,8 @@
+ if (dir->i_nlink >= EXT3_LINK_MAX)
+ return -EMLINK;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 3);
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+@@ -550,7 +1320,7 @@
+ if (err)
+ goto out_no_entry;
+ dir->i_nlink++;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+ d_instantiate(dentry, inode);
+ out_stop:
+@@ -832,7 +1596,7 @@
+ ext3_mark_inode_dirty(handle, inode);
+ dir->i_nlink--;
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+
+ end_rmdir:
+@@ -878,7 +1642,7 @@
+ if (retval)
+ goto end_unlink;
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+- dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(dir);
+ ext3_mark_inode_dirty(handle, dir);
+ inode->i_nlink--;
+ if (!inode->i_nlink)
+@@ -904,7 +1668,8 @@
+ if (l > dir->i_sb->s_blocksize)
+ return -ENAMETOOLONG;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS + 5);
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+@@ -959,7 +1724,8 @@
+ if (inode->i_nlink >= EXT3_LINK_MAX)
+ return -EMLINK;
+
+- handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS);
++ handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+@@ -995,7 +1761,8 @@
+
+ old_bh = new_bh = dir_bh = NULL;
+
+- handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS + 2);
++ handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+@@ -1077,7 +1844,7 @@
+ new_inode->i_ctime = CURRENT_TIME;
+ }
+ old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+- old_dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(old_dir);
+ if (dir_bh) {
+ BUFFER_TRACE(dir_bh, "get_write_access");
+ ext3_journal_get_write_access(handle, dir_bh);
+@@ -1089,7 +1856,7 @@
+ new_inode->i_nlink--;
+ } else {
+ new_dir->i_nlink++;
+- new_dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
++ ext3_update_dx_flag(new_dir);
+ ext3_mark_inode_dirty(handle, new_dir);
+ }
+ }
+--- ./include/linux/ext3_fs.h 2002/03/05 06:18:59 2.1
++++ ./include/linux/ext3_fs.h 2002/03/05 06:26:56
+@@ -339,6 +339,7 @@
+ #define EXT3_MOUNT_WRITEBACK_DATA 0x0C00 /* No data ordering */
+ #define EXT3_MOUNT_UPDATE_JOURNAL 0x1000 /* Update the journal format */
+ #define EXT3_MOUNT_NO_UID32 0x2000 /* Disable 32-bit UIDs */
++#define EXT3_MOUNT_INDEX 0x4000 /* Enable directory index */
+
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef _LINUX_EXT2_FS_H
+@@ -575,6 +576,24 @@
+ #define EXT3_DIR_ROUND (EXT3_DIR_PAD - 1)
+ #define EXT3_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT3_DIR_ROUND) & \
+ ~EXT3_DIR_ROUND)
++/*
++ * Hash Tree Directory indexing
++ * (c) Daniel Phillips, 2001
++ */
++
++#define CONFIG_EXT3_INDEX
++
++#ifdef CONFIG_EXT3_INDEX
++ enum {ext3_dx = 1};
++ #define is_dx(dir) (EXT3_I(dir)->i_flags & EXT3_INDEX_FL)
++#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
++#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
++#else
++ enum {ext3_dx = 0};
++ #define is_dx(dir) 0
++#define EXT3_DIR_LINK_MAX(dir) ((dir)->i_nlink >= EXT3_LINK_MAX)
++#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2)
++#endif
+
+ #ifdef __KERNEL__
+ /*
+--- ./include/linux/ext3_jbd.h 2002/03/05 06:18:59 2.1
++++ ./include/linux/ext3_jbd.h 2002/03/05 06:33:54
+@@ -63,6 +63,8 @@
+
+ #define EXT3_RESERVE_TRANS_BLOCKS 12
+
++#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
++
+ int
+ ext3_mark_iloc_dirty(handle_t *handle,
+ struct inode *inode,
--- /dev/null
+ fs/ext3/ialloc.c | 2
+ fs/ext3/inode.c | 29 -
+ fs/ext3/namei.c | 12
+ fs/ext3/super.c | 21
+ fs/ext3/xattr.c | 1247 +++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/ext3_fs.h | 46 -
+ include/linux/ext3_jbd.h | 8
+ include/linux/ext3_xattr.h | 155 +++++
+ include/linux/xattr.h | 15
+ 9 files changed, 1486 insertions(+), 49 deletions(-)
+
+--- linux-2.4.18-18/fs/ext3/ialloc.c~linux-2.4.18ea-0.8.26 Sat Apr 5 02:54:56 2003
++++ linux-2.4.18-18-braam/fs/ext3/ialloc.c Sat Apr 5 03:03:06 2003
+@@ -17,6 +17,7 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/ext3_jbd.h>
++#include <linux/ext3_xattr.h>
+ #include <linux/stat.h>
+ #include <linux/string.h>
+ #include <linux/locks.h>
+@@ -216,6 +217,7 @@ void ext3_free_inode (handle_t *handle,
+ * as writing the quota to disk may need the lock as well.
+ */
+ DQUOT_INIT(inode);
++ ext3_xattr_drop_inode(handle, inode);
+ DQUOT_FREE_INODE(inode);
+ DQUOT_DROP(inode);
+
+--- linux-2.4.18-18/fs/ext3/inode.c~linux-2.4.18ea-0.8.26 Sat Apr 5 02:54:56 2003
++++ linux-2.4.18-18-braam/fs/ext3/inode.c Sat Apr 5 03:03:06 2003
+@@ -39,6 +39,18 @@
+ */
+ #undef SEARCH_FROM_ZERO
+
++/*
++ * Test whether an inode is a fast symlink.
++ */
++static inline int ext3_inode_is_fast_symlink(struct inode *inode)
++{
++ int ea_blocks = EXT3_I(inode)->i_file_acl ?
++ (inode->i_sb->s_blocksize >> 9) : 0;
++
++ return (S_ISLNK(inode->i_mode) &&
++ inode->i_blocks - ea_blocks == 0);
++}
++
+ /* The ext3 forget function must perform a revoke if we are freeing data
+ * which has been journaled. Metadata (eg. indirect blocks) must be
+ * revoked in all cases.
+@@ -48,7 +60,7 @@
+ * still needs to be revoked.
+ */
+
+-static int ext3_forget(handle_t *handle, int is_metadata,
++int ext3_forget(handle_t *handle, int is_metadata,
+ struct inode *inode, struct buffer_head *bh,
+ int blocknr)
+ {
+@@ -164,9 +176,7 @@ void ext3_delete_inode (struct inode * i
+ {
+ handle_t *handle;
+
+- if (is_bad_inode(inode) ||
+- inode->i_ino == EXT3_ACL_IDX_INO ||
+- inode->i_ino == EXT3_ACL_DATA_INO)
++ if (is_bad_inode(inode))
+ goto no_delete;
+
+ lock_kernel();
+@@ -1861,6 +1871,8 @@ void ext3_truncate(struct inode * inode)
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
+ return;
++ if (ext3_inode_is_fast_symlink(inode))
++ return;
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ return;
+
+@@ -2008,8 +2020,6 @@ int ext3_get_inode_loc (struct inode *in
+ struct ext3_group_desc * gdp;
+
+ if ((inode->i_ino != EXT3_ROOT_INO &&
+- inode->i_ino != EXT3_ACL_IDX_INO &&
+- inode->i_ino != EXT3_ACL_DATA_INO &&
+ inode->i_ino != EXT3_JOURNAL_INO &&
+ inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) ||
+ inode->i_ino > le32_to_cpu(
+@@ -2136,10 +2146,7 @@ void ext3_read_inode(struct inode * inod
+
+ brelse (iloc.bh);
+
+- if (inode->i_ino == EXT3_ACL_IDX_INO ||
+- inode->i_ino == EXT3_ACL_DATA_INO)
+- /* Nothing to do */ ;
+- else if (S_ISREG(inode->i_mode)) {
++ if (S_ISREG(inode->i_mode)) {
+ inode->i_op = &ext3_file_inode_operations;
+ inode->i_fop = &ext3_file_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+@@ -2147,7 +2154,7 @@ void ext3_read_inode(struct inode * inod
+ inode->i_op = &ext3_dir_inode_operations;
+ inode->i_fop = &ext3_dir_operations;
+ } else if (S_ISLNK(inode->i_mode)) {
+- if (!inode->i_blocks)
++ if (ext3_inode_is_fast_symlink(inode))
+ inode->i_op = &ext3_fast_symlink_inode_operations;
+ else {
+ inode->i_op = &page_symlink_inode_operations;
+--- linux-2.4.18-18/fs/ext3/namei.c~linux-2.4.18ea-0.8.26 Sat Apr 5 02:54:56 2003
++++ linux-2.4.18-18-braam/fs/ext3/namei.c Sat Apr 5 03:03:06 2003
+@@ -27,6 +27,7 @@
+ #include <linux/sched.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/ext3_jbd.h>
++#include <linux/ext3_xattr.h>
+ #include <linux/fcntl.h>
+ #include <linux/stat.h>
+ #include <linux/string.h>
+@@ -1183,6 +1184,7 @@ static int ext3_add_nondir(handle_t *han
+ d_instantiate(dentry, inode);
+ return 0;
+ }
++ ext3_xattr_drop_inode(handle, inode);
+ ext3_dec_count(handle, inode);
+ iput(inode);
+ return err;
+@@ -1268,15 +1270,14 @@ static int ext3_mkdir(struct inode * dir
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = ext3_new_inode (handle, dir, S_IFDIR);
++ inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
+
+ inode->i_op = &ext3_dir_inode_operations;
+ inode->i_fop = &ext3_dir_operations;
+- inode->i_size = inode->u.ext3_i.i_disksize = inode->i_sb->s_blocksize;
+- inode->i_blocks = 0;
++ inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+ dir_block = ext3_bread (handle, inode, 0, 1, &err);
+ if (!dir_block) {
+ inode->i_nlink--; /* is this nlink == 0? */
+@@ -1303,9 +1304,6 @@ static int ext3_mkdir(struct inode * dir
+ BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
+ ext3_journal_dirty_metadata(handle, dir_block);
+ brelse (dir_block);
+- inode->i_mode = S_IFDIR | mode;
+- if (dir->i_mode & S_ISGID)
+- inode->i_mode |= S_ISGID;
+ ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_entry (handle, dentry, inode);
+ if (err)
+@@ -1671,7 +1669,7 @@ static int ext3_symlink (struct inode *
+ if (IS_ERR(inode))
+ goto out_stop;
+
+- if (l > sizeof (inode->u.ext3_i.i_data)) {
++ if (l > sizeof(EXT3_I(inode)->i_data)) {
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+ /*
+--- linux-2.4.18-18/fs/ext3/super.c~linux-2.4.18ea-0.8.26 Sat Apr 5 02:54:56 2003
++++ linux-2.4.18-18-braam/fs/ext3/super.c Sat Apr 5 03:03:06 2003
+@@ -24,6 +24,7 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/ext3_jbd.h>
++#include <linux/ext3_xattr.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/locks.h>
+@@ -404,6 +405,7 @@ void ext3_put_super (struct super_block
+ kdev_t j_dev = sbi->s_journal->j_dev;
+ int i;
+
++ ext3_xattr_put_super(sb);
+ journal_destroy(sbi->s_journal);
+ if (!(sb->s_flags & MS_RDONLY)) {
+ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+@@ -1748,14 +1750,25 @@ int ext3_statfs (struct super_block * sb
+
+ static DECLARE_FSTYPE_DEV(ext3_fs_type, "ext3", ext3_read_super);
+
+-static int __init init_ext3_fs(void)
++static void exit_ext3_fs(void)
+ {
+- return register_filesystem(&ext3_fs_type);
++ unregister_filesystem(&ext3_fs_type);
++ exit_ext3_xattr_user();
++ exit_ext3_xattr();
+ }
+
+-static void __exit exit_ext3_fs(void)
++static int __init init_ext3_fs(void)
+ {
+- unregister_filesystem(&ext3_fs_type);
++ int error = init_ext3_xattr();
++ if (!error)
++ error = init_ext3_xattr_user();
++ if (!error)
++ error = register_filesystem(&ext3_fs_type);
++ if (!error)
++ return 0;
++
++ exit_ext3_fs();
++ return error;
+ }
+
+ EXPORT_SYMBOL(ext3_bread);
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.18-18-braam/fs/ext3/xattr.c Sat Apr 5 02:54:56 2003
+@@ -0,0 +1,1247 @@
++/*
++ * linux/fs/ext3/xattr.c
++ *
++ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
++ *
++ * Fix by Harrison Xing <harrison@mountainviewdata.com>.
++ * Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>.
++ * Extended attributes for symlinks and special files added per
++ * suggestion of Luka Renko <luka.renko@hermes.si>.
++ */
++
++/*
++ * Extended attributes are stored on disk blocks allocated outside of
++ * any inode. The i_file_acl field is then made to point to this allocated
++ * block. If all extended attributes of an inode are identical, these
++ * inodes may share the same extended attribute block. Such situations
++ * are automatically detected by keeping a cache of recent attribute block
++ * numbers and hashes over the block's contents in memory.
++ *
++ *
++ * Extended attribute block layout:
++ *
++ * +------------------+
++ * | header |
++ * ¦ entry 1 | |
++ * | entry 2 | | growing downwards
++ * | entry 3 | v
++ * | four null bytes |
++ * | . . . |
++ * | value 1 | ^
++ * | value 3 | | growing upwards
++ * | value 2 | |
++ * +------------------+
++ *
++ * The block header is followed by multiple entry descriptors. These entry
++ * descriptors are variable in size, and alligned to EXT3_XATTR_PAD
++ * byte boundaries. The entry descriptors are sorted by attribute name,
++ * so that two extended attribute blocks can be compared efficiently.
++ *
++ * Attribute values are aligned to the end of the block, stored in
++ * no specific order. They are also padded to EXT3_XATTR_PAD byte
++ * boundaries. No additional gaps are left between them.
++ *
++ * Locking strategy
++ * ----------------
++ * The VFS already holds the BKL and the inode->i_sem semaphore when any of
++ * the xattr inode operations are called, so we are guaranteed that only one
++ * processes accesses extended attributes of an inode at any time.
++ *
++ * For writing we also grab the ext3_xattr_sem semaphore. This ensures that
++ * only a single process is modifying an extended attribute block, even
++ * if the block is shared among inodes.
++ *
++ * Note for porting to 2.5
++ * -----------------------
++ * The BKL will no longer be held in the xattr inode operations.
++ */
++
++#include <linux/fs.h>
++#include <linux/locks.h>
++#include <linux/slab.h>
++#include <linux/ext3_jbd.h>
++#include <linux/ext3_fs.h>
++#include <linux/ext3_xattr.h>
++#ifdef CONFIG_EXT3_FS_XATTR_SHARING
++#include <linux/mbcache.h>
++#endif
++#include <linux/quotaops.h>
++#include <asm/semaphore.h>
++#include <linux/compatmac.h>
++#include <linux/module.h>
++
++/* These symbols may be needed by a module. */
++EXPORT_SYMBOL(ext3_xattr_register);
++EXPORT_SYMBOL(ext3_xattr_unregister);
++EXPORT_SYMBOL(ext3_xattr_get);
++EXPORT_SYMBOL(ext3_xattr_list);
++EXPORT_SYMBOL(ext3_xattr_set);
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
++# define mark_buffer_dirty(bh) mark_buffer_dirty(bh, 1)
++#endif
++
++#define HDR(bh) ((struct ext3_xattr_header *)((bh)->b_data))
++#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr))
++#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
++#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
++
++#ifdef EXT3_XATTR_DEBUG
++# define ea_idebug(inode, f...) do { \
++ printk(KERN_DEBUG "inode %s:%ld: ", \
++ kdevname(inode->i_dev), inode->i_ino); \
++ printk(f); \
++ printk("\n"); \
++ } while (0)
++# define ea_bdebug(bh, f...) do { \
++ printk(KERN_DEBUG "block %s:%ld: ", \
++ kdevname(bh->b_dev), bh->b_blocknr); \
++ printk(f); \
++ printk("\n"); \
++ } while (0)
++#else
++# define ea_idebug(f...)
++# define ea_bdebug(f...)
++#endif
++
++static int ext3_xattr_set2(handle_t *, struct inode *, struct buffer_head *,
++ struct ext3_xattr_header *);
++
++#ifdef CONFIG_EXT3_FS_XATTR_SHARING
++
++static int ext3_xattr_cache_insert(struct buffer_head *);
++static struct buffer_head *ext3_xattr_cache_find(struct inode *,
++ struct ext3_xattr_header *);
++static void ext3_xattr_cache_remove(struct buffer_head *);
++static void ext3_xattr_rehash(struct ext3_xattr_header *,
++ struct ext3_xattr_entry *);
++
++static struct mb_cache *ext3_xattr_cache;
++
++#else
++# define ext3_xattr_cache_insert(bh) 0
++# define ext3_xattr_cache_find(inode, header) NULL
++# define ext3_xattr_cache_remove(bh) do {} while(0)
++# define ext3_xattr_rehash(header, entry) do {} while(0)
++#endif
++
++/*
++ * If a file system does not share extended attributes among inodes,
++ * we should not need the ext3_xattr_sem semaphore. However, the
++ * filesystem may still contain shared blocks, so we always take
++ * the lock.
++ */
++
++DECLARE_MUTEX(ext3_xattr_sem);
++
++static inline void
++ext3_xattr_lock(void)
++{
++ down(&ext3_xattr_sem);
++}
++
++static inline void
++ext3_xattr_unlock(void)
++{
++ up(&ext3_xattr_sem);
++}
++
++static inline int
++ext3_xattr_new_block(handle_t *handle, struct inode *inode,
++ int * errp, int force)
++{
++ struct super_block *sb = inode->i_sb;
++ int goal = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
++ EXT3_I(inode)->i_block_group * EXT3_BLOCKS_PER_GROUP(sb);
++
++ /* How can we enforce the allocation? */
++ int block = ext3_new_block(handle, inode, goal, 0, 0, errp);
++#ifdef OLD_QUOTAS
++ if (!*errp)
++ inode->i_blocks += inode->i_sb->s_blocksize >> 9;
++#endif
++ return block;
++}
++
++static inline int
++ext3_xattr_quota_alloc(struct inode *inode, int force)
++{
++ /* How can we enforce the allocation? */
++#ifdef OLD_QUOTAS
++ int error = DQUOT_ALLOC_BLOCK(inode->i_sb, inode, 1);
++ if (!error)
++ inode->i_blocks += inode->i_sb->s_blocksize >> 9;
++#else
++ int error = DQUOT_ALLOC_BLOCK(inode, 1);
++#endif
++ return error;
++}
++
++#ifdef OLD_QUOTAS
++
++static inline void
++ext3_xattr_quota_free(struct inode *inode)
++{
++ DQUOT_FREE_BLOCK(inode->i_sb, inode, 1);
++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9;
++}
++
++static inline void
++ext3_xattr_free_block(handle_t *handle, struct inode * inode,
++ unsigned long block)
++{
++ ext3_free_blocks(handle, inode, block, 1);
++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9;
++}
++
++#else
++# define ext3_xattr_quota_free(inode) \
++ DQUOT_FREE_BLOCK(inode, 1)
++# define ext3_xattr_free_block(handle, inode, block) \
++ ext3_free_blocks(handle, inode, block, 1)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
++
++static inline struct buffer_head *
++sb_bread(struct super_block *sb, int block)
++{
++ return bread(sb->s_dev, block, sb->s_blocksize);
++}
++
++static inline struct buffer_head *
++sb_getblk(struct super_block *sb, int block)
++{
++ return getblk(sb->s_dev, block, sb->s_blocksize);
++}
++
++#endif
++
++struct ext3_xattr_handler *ext3_xattr_handlers[EXT3_XATTR_INDEX_MAX];
++rwlock_t ext3_handler_lock = RW_LOCK_UNLOCKED;
++
++int
++ext3_xattr_register(int name_index, struct ext3_xattr_handler *handler)
++{
++ int error = -EINVAL;
++
++ if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) {
++ write_lock(&ext3_handler_lock);
++ if (!ext3_xattr_handlers[name_index-1]) {
++ ext3_xattr_handlers[name_index-1] = handler;
++ error = 0;
++ }
++ write_unlock(&ext3_handler_lock);
++ }
++ return error;
++}
++
++void
++ext3_xattr_unregister(int name_index, struct ext3_xattr_handler *handler)
++{
++ if (name_index > 0 || name_index <= EXT3_XATTR_INDEX_MAX) {
++ write_lock(&ext3_handler_lock);
++ ext3_xattr_handlers[name_index-1] = NULL;
++ write_unlock(&ext3_handler_lock);
++ }
++}
++
++static inline const char *
++strcmp_prefix(const char *a, const char *a_prefix)
++{
++ while (*a_prefix && *a == *a_prefix) {
++ a++;
++ a_prefix++;
++ }
++ return *a_prefix ? NULL : a;
++}
++
++/*
++ * Decode the extended attribute name, and translate it into
++ * the name_index and name suffix.
++ */
++static inline struct ext3_xattr_handler *
++ext3_xattr_resolve_name(const char **name)
++{
++ struct ext3_xattr_handler *handler = NULL;
++ int i;
++
++ if (!*name)
++ return NULL;
++ read_lock(&ext3_handler_lock);
++ for (i=0; i<EXT3_XATTR_INDEX_MAX; i++) {
++ if (ext3_xattr_handlers[i]) {
++ const char *n = strcmp_prefix(*name,
++ ext3_xattr_handlers[i]->prefix);
++ if (n) {
++ handler = ext3_xattr_handlers[i];
++ *name = n;
++ break;
++ }
++ }
++ }
++ read_unlock(&ext3_handler_lock);
++ return handler;
++}
++
++static inline struct ext3_xattr_handler *
++ext3_xattr_handler(int name_index)
++{
++ struct ext3_xattr_handler *handler = NULL;
++ if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) {
++ read_lock(&ext3_handler_lock);
++ handler = ext3_xattr_handlers[name_index-1];
++ read_unlock(&ext3_handler_lock);
++ }
++ return handler;
++}
++
++/*
++ * Inode operation getxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++ssize_t
++ext3_getxattr(struct dentry *dentry, const char *name,
++ void *buffer, size_t size)
++{
++ struct ext3_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ handler = ext3_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->get(inode, name, buffer, size);
++}
++
++/*
++ * Inode operation listxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++ssize_t
++ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
++{
++ return ext3_xattr_list(dentry->d_inode, buffer, size);
++}
++
++/*
++ * Inode operation setxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++int
++ext3_setxattr(struct dentry *dentry, const char *name,
++ void *value, size_t size, int flags)
++{
++ struct ext3_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ if (size == 0)
++ value = ""; /* empty EA, do not remove */
++ handler = ext3_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->set(inode, name, value, size, flags);
++}
++
++/*
++ * Inode operation removexattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++int
++ext3_removexattr(struct dentry *dentry, const char *name)
++{
++ struct ext3_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ handler = ext3_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
++}
++
++/*
++ * ext3_xattr_get()
++ *
++ * Copy an extended attribute into the buffer
++ * provided, or compute the buffer size required.
++ * Buffer is NULL to compute the size of the buffer required.
++ *
++ * Returns a negative error number on failure, or the number of bytes
++ * used / required on success.
++ */
++int
++ext3_xattr_get(struct inode *inode, int name_index, const char *name,
++ void *buffer, size_t buffer_size)
++{
++ struct buffer_head *bh = NULL;
++ struct ext3_xattr_entry *entry;
++ unsigned int block, size;
++ char *end;
++ int name_len, error;
++
++ ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
++ name_index, name, buffer, (long)buffer_size);
++
++ if (name == NULL)
++ return -EINVAL;
++ if (!EXT3_I(inode)->i_file_acl)
++ return -ENOATTR;
++ block = EXT3_I(inode)->i_file_acl;
++ ea_idebug(inode, "reading block %d", block);
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh)
++ return -EIO;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
++ end = bh->b_data + bh->b_size;
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++bad_block: ext3_error(inode->i_sb, "ext3_xattr_get",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* find named attribute */
++ name_len = strlen(name);
++
++ error = -ERANGE;
++ if (name_len > 255)
++ goto cleanup;
++ entry = FIRST_ENTRY(bh);
++ while (!IS_LAST_ENTRY(entry)) {
++ struct ext3_xattr_entry *next =
++ EXT3_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (name_index == entry->e_name_index &&
++ name_len == entry->e_name_len &&
++ memcmp(name, entry->e_name, name_len) == 0)
++ goto found;
++ entry = next;
++ }
++ /* Check the remaining name entries */
++ while (!IS_LAST_ENTRY(entry)) {
++ struct ext3_xattr_entry *next =
++ EXT3_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++ entry = next;
++ }
++ if (ext3_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ error = -ENOATTR;
++ goto cleanup;
++found:
++ /* check the buffer size */
++ if (entry->e_value_block != 0)
++ goto bad_block;
++ size = le32_to_cpu(entry->e_value_size);
++ if (size > inode->i_sb->s_blocksize ||
++ le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
++ goto bad_block;
++
++ if (ext3_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ if (buffer) {
++ error = -ERANGE;
++ if (size > buffer_size)
++ goto cleanup;
++ /* return value of attribute */
++ memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
++ size);
++ }
++ error = size;
++
++cleanup:
++ brelse(bh);
++
++ return error;
++}
++
++/*
++ * ext3_xattr_list()
++ *
++ * Copy a list of attribute names into the buffer
++ * provided, or compute the buffer size required.
++ * Buffer is NULL to compute the size of the buffer required.
++ *
++ * Returns a negative error number on failure, or the number of bytes
++ * used / required on success.
++ */
++int
++ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
++{
++ struct buffer_head *bh = NULL;
++ struct ext3_xattr_entry *entry;
++ unsigned int block, size = 0;
++ char *buf, *end;
++ int error;
++
++ ea_idebug(inode, "buffer=%p, buffer_size=%ld",
++ buffer, (long)buffer_size);
++
++ if (!EXT3_I(inode)->i_file_acl)
++ return 0;
++ block = EXT3_I(inode)->i_file_acl;
++ ea_idebug(inode, "reading block %d", block);
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh)
++ return -EIO;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
++ end = bh->b_data + bh->b_size;
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++bad_block: ext3_error(inode->i_sb, "ext3_xattr_list",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* compute the size required for the list of attribute names */
++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
++ entry = EXT3_XATTR_NEXT(entry)) {
++ struct ext3_xattr_handler *handler;
++ struct ext3_xattr_entry *next =
++ EXT3_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++
++ handler = ext3_xattr_handler(entry->e_name_index);
++ if (handler) {
++ size += handler->list(NULL, inode, entry->e_name,
++ entry->e_name_len) + 1;
++ }
++ }
++
++ if (ext3_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ if (!buffer) {
++ error = size;
++ goto cleanup;
++ } else {
++ error = -ERANGE;
++ if (size > buffer_size)
++ goto cleanup;
++ }
++
++ /* list the attribute names */
++ buf = buffer;
++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
++ entry = EXT3_XATTR_NEXT(entry)) {
++ struct ext3_xattr_handler *handler;
++
++ handler = ext3_xattr_handler(entry->e_name_index);
++ if (handler) {
++ buf += handler->list(buf, inode, entry->e_name,
++ entry->e_name_len);
++ *buf++ = '\0';
++ }
++ }
++ error = size;
++
++cleanup:
++ brelse(bh);
++
++ return error;
++}
++
++/*
++ * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
++ * not set, set it.
++ */
++static void ext3_xattr_update_super_block(handle_t *handle,
++ struct super_block *sb)
++{
++ if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
++ return;
++
++ lock_super(sb);
++ ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
++ EXT3_SB(sb)->s_feature_compat |= EXT3_FEATURE_COMPAT_EXT_ATTR;
++#endif
++ EXT3_SB(sb)->s_es->s_feature_compat |=
++ cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR);
++ sb->s_dirt = 1;
++ ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
++ unlock_super(sb);
++}
++
++/*
++ * ext3_xattr_set()
++ *
++ * Create, replace or remove an extended attribute for this inode. Buffer
++ * is NULL to remove an existing extended attribute, and non-NULL to
++ * either replace an existing extended attribute, or create a new extended
++ * attribute. The flags XATTR_REPLACE and XATTR_CREATE
++ * specify that an extended attribute must exist and must not exist
++ * previous to the call, respectively.
++ *
++ * Returns 0, or a negative error number on failure.
++ */
++int
++ext3_xattr_set(handle_t *handle, struct inode *inode, int name_index,
++ const char *name, void *value, size_t value_len, int flags)
++{
++ struct super_block *sb = inode->i_sb;
++ struct buffer_head *bh = NULL;
++ struct ext3_xattr_header *header = NULL;
++ struct ext3_xattr_entry *here, *last;
++ unsigned int name_len;
++ int min_offs = sb->s_blocksize, not_found = 1, free, error;
++ char *end;
++
++ /*
++ * header -- Points either into bh, or to a temporarily
++ * allocated buffer.
++ * here -- The named entry found, or the place for inserting, within
++ * the block pointed to by header.
++ * last -- Points right after the last named entry within the block
++ * pointed to by header.
++ * min_offs -- The offset of the first value (values are aligned
++ * towards the end of the block).
++ * end -- Points right after the block pointed to by header.
++ */
++
++ ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
++ name_index, name, value, (long)value_len);
++
++ if (IS_RDONLY(inode))
++ return -EROFS;
++ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
++ return -EPERM;
++ if (value == NULL)
++ value_len = 0;
++ if (name == NULL)
++ return -EINVAL;
++ name_len = strlen(name);
++ if (name_len > 255 || value_len > sb->s_blocksize)
++ return -ERANGE;
++ ext3_xattr_lock();
++
++ if (EXT3_I(inode)->i_file_acl) {
++ /* The inode already has an extended attribute block. */
++ int block = EXT3_I(inode)->i_file_acl;
++
++ bh = sb_bread(sb, block);
++ error = -EIO;
++ if (!bh)
++ goto cleanup;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)),
++ le32_to_cpu(HDR(bh)->h_refcount));
++ header = HDR(bh);
++ end = bh->b_data + bh->b_size;
++ if (header->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ header->h_blocks != cpu_to_le32(1)) {
++bad_block: ext3_error(sb, "ext3_xattr_set",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* Find the named attribute. */
++ here = FIRST_ENTRY(bh);
++ while (!IS_LAST_ENTRY(here)) {
++ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(here);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (!here->e_value_block && here->e_value_size) {
++ int offs = le16_to_cpu(here->e_value_offs);
++ if (offs < min_offs)
++ min_offs = offs;
++ }
++ not_found = name_index - here->e_name_index;
++ if (!not_found)
++ not_found = name_len - here->e_name_len;
++ if (!not_found)
++ not_found = memcmp(name, here->e_name,name_len);
++ if (not_found <= 0)
++ break;
++ here = next;
++ }
++ last = here;
++ /* We still need to compute min_offs and last. */
++ while (!IS_LAST_ENTRY(last)) {
++ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (!last->e_value_block && last->e_value_size) {
++ int offs = le16_to_cpu(last->e_value_offs);
++ if (offs < min_offs)
++ min_offs = offs;
++ }
++ last = next;
++ }
++
++ /* Check whether we have enough space left. */
++ free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
++ } else {
++ /* We will use a new extended attribute block. */
++ free = sb->s_blocksize -
++ sizeof(struct ext3_xattr_header) - sizeof(__u32);
++ here = last = NULL; /* avoid gcc uninitialized warning. */
++ }
++
++ if (not_found) {
++ /* Request to remove a nonexistent attribute? */
++ error = -ENOATTR;
++ if (flags & XATTR_REPLACE)
++ goto cleanup;
++ error = 0;
++ if (value == NULL)
++ goto cleanup;
++ else
++ free -= EXT3_XATTR_LEN(name_len);
++ } else {
++ /* Request to create an existing attribute? */
++ error = -EEXIST;
++ if (flags & XATTR_CREATE)
++ goto cleanup;
++ if (!here->e_value_block && here->e_value_size) {
++ unsigned int size = le32_to_cpu(here->e_value_size);
++
++ if (le16_to_cpu(here->e_value_offs) + size >
++ sb->s_blocksize || size > sb->s_blocksize)
++ goto bad_block;
++ free += EXT3_XATTR_SIZE(size);
++ }
++ }
++ free -= EXT3_XATTR_SIZE(value_len);
++ error = -ENOSPC;
++ if (free < 0)
++ goto cleanup;
++
++ /* Here we know that we can set the new attribute. */
++
++ if (header) {
++ if (header->h_refcount == cpu_to_le32(1)) {
++ ea_bdebug(bh, "modifying in-place");
++ ext3_xattr_cache_remove(bh);
++ error = ext3_journal_get_write_access(handle, bh);
++ if (error)
++ goto cleanup;
++ } else {
++ int offset;
++
++ ea_bdebug(bh, "cloning");
++ header = kmalloc(bh->b_size, GFP_KERNEL);
++ error = -ENOMEM;
++ if (header == NULL)
++ goto cleanup;
++ memcpy(header, HDR(bh), bh->b_size);
++ header->h_refcount = cpu_to_le32(1);
++ offset = (char *)header - bh->b_data;
++ here = ENTRY((char *)here + offset);
++ last = ENTRY((char *)last + offset);
++ }
++ } else {
++ /* Allocate a buffer where we construct the new block. */
++ header = kmalloc(sb->s_blocksize, GFP_KERNEL);
++ error = -ENOMEM;
++ if (header == NULL)
++ goto cleanup;
++ memset(header, 0, sb->s_blocksize);
++ end = (char *)header + sb->s_blocksize;
++ header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
++ header->h_blocks = header->h_refcount = cpu_to_le32(1);
++ last = here = ENTRY(header+1);
++ }
++
++ if (not_found) {
++ /* Insert the new name. */
++ int size = EXT3_XATTR_LEN(name_len);
++ int rest = (char *)last - (char *)here;
++ memmove((char *)here + size, here, rest);
++ memset(here, 0, size);
++ here->e_name_index = name_index;
++ here->e_name_len = name_len;
++ memcpy(here->e_name, name, name_len);
++ } else {
++ /* Remove the old value. */
++ if (!here->e_value_block && here->e_value_size) {
++ char *first_val = (char *)header + min_offs;
++ int offs = le16_to_cpu(here->e_value_offs);
++ char *val = (char *)header + offs;
++ size_t size = EXT3_XATTR_SIZE(
++ le32_to_cpu(here->e_value_size));
++ memmove(first_val + size, first_val, val - first_val);
++ memset(first_val, 0, size);
++ here->e_value_offs = 0;
++ min_offs += size;
++
++ /* Adjust all value offsets. */
++ last = ENTRY(header+1);
++ while (!IS_LAST_ENTRY(last)) {
++ int o = le16_to_cpu(last->e_value_offs);
++ if (!last->e_value_block && o < offs)
++ last->e_value_offs =
++ cpu_to_le16(o + size);
++ last = EXT3_XATTR_NEXT(last);
++ }
++ }
++ if (value == NULL) {
++ /* Remove this attribute. */
++ if (EXT3_XATTR_NEXT(ENTRY(header+1)) == last) {
++ /* This block is now empty. */
++ error = ext3_xattr_set2(handle, inode, bh,NULL);
++ goto cleanup;
++ } else {
++ /* Remove the old name. */
++ int size = EXT3_XATTR_LEN(name_len);
++ last = ENTRY((char *)last - size);
++ memmove(here, (char*)here + size,
++ (char*)last - (char*)here);
++ memset(last, 0, size);
++ }
++ }
++ }
++
++ if (value != NULL) {
++ /* Insert the new value. */
++ here->e_value_size = cpu_to_le32(value_len);
++ if (value_len) {
++ size_t size = EXT3_XATTR_SIZE(value_len);
++ char *val = (char *)header + min_offs - size;
++ here->e_value_offs =
++ cpu_to_le16((char *)val - (char *)header);
++ memset(val + size - EXT3_XATTR_PAD, 0,
++ EXT3_XATTR_PAD); /* Clear the pad bytes. */
++ memcpy(val, value, value_len);
++ }
++ }
++ ext3_xattr_rehash(header, here);
++
++ error = ext3_xattr_set2(handle, inode, bh, header);
++
++cleanup:
++ brelse(bh);
++ if (!(bh && header == HDR(bh)))
++ kfree(header);
++ ext3_xattr_unlock();
++
++ return error;
++}
++
++/*
++ * Second half of ext3_xattr_set(): Update the file system.
++ */
++static int
++ext3_xattr_set2(handle_t *handle, struct inode *inode,
++ struct buffer_head *old_bh, struct ext3_xattr_header *header)
++{
++ struct super_block *sb = inode->i_sb;
++ struct buffer_head *new_bh = NULL;
++ int error;
++
++ if (header) {
++ new_bh = ext3_xattr_cache_find(inode, header);
++ if (new_bh) {
++ /*
++ * We found an identical block in the cache.
++ * The old block will be released after updating
++ * the inode.
++ */
++ ea_bdebug(old_bh, "reusing block %ld",
++ new_bh->b_blocknr);
++
++ error = -EDQUOT;
++ if (ext3_xattr_quota_alloc(inode, 1))
++ goto cleanup;
++
++ error = ext3_journal_get_write_access(handle, new_bh);
++ if (error)
++ goto cleanup;
++ HDR(new_bh)->h_refcount = cpu_to_le32(
++ le32_to_cpu(HDR(new_bh)->h_refcount) + 1);
++ ea_bdebug(new_bh, "refcount now=%d",
++ le32_to_cpu(HDR(new_bh)->h_refcount));
++ } else if (old_bh && header == HDR(old_bh)) {
++ /* Keep this block. */
++ new_bh = old_bh;
++ (void)ext3_xattr_cache_insert(new_bh);
++ } else {
++ /* We need to allocate a new block */
++ int force = EXT3_I(inode)->i_file_acl != 0;
++ int block = ext3_xattr_new_block(handle, inode,
++ &error, force);
++ if (error)
++ goto cleanup;
++ ea_idebug(inode, "creating block %d", block);
++
++ new_bh = sb_getblk(sb, block);
++ if (!new_bh) {
++getblk_failed: ext3_xattr_free_block(handle, inode, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ lock_buffer(new_bh);
++ error = ext3_journal_get_create_access(handle, new_bh);
++ if (error) {
++ unlock_buffer(new_bh);
++ goto getblk_failed;
++ }
++ memcpy(new_bh->b_data, header, new_bh->b_size);
++ mark_buffer_uptodate(new_bh, 1);
++ unlock_buffer(new_bh);
++ (void)ext3_xattr_cache_insert(new_bh);
++ ext3_xattr_update_super_block(handle, sb);
++ }
++ error = ext3_journal_dirty_metadata(handle, new_bh);
++ if (error)
++ goto cleanup;
++ }
++
++ /* Update the inode. */
++ EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
++ inode->i_ctime = CURRENT_TIME;
++ ext3_mark_inode_dirty(handle, inode);
++ if (IS_SYNC(inode))
++ handle->h_sync = 1;
++
++ error = 0;
++ if (old_bh && old_bh != new_bh) {
++ /*
++ * If there was an old block, and we are not still using it,
++ * we now release the old block.
++ */
++ unsigned int refcount = le32_to_cpu(HDR(old_bh)->h_refcount);
++
++ error = ext3_journal_get_write_access(handle, old_bh);
++ if (error)
++ goto cleanup;
++ if (refcount == 1) {
++ /* Free the old block. */
++ ea_bdebug(old_bh, "freeing");
++ ext3_xattr_free_block(handle, inode, old_bh->b_blocknr);
++
++ /* ext3_forget() calls bforget() for us, but we
++ let our caller release old_bh, so we need to
++ duplicate the handle before. */
++ get_bh(old_bh);
++ ext3_forget(handle, 1, inode, old_bh,old_bh->b_blocknr);
++ } else {
++ /* Decrement the refcount only. */
++ refcount--;
++ HDR(old_bh)->h_refcount = cpu_to_le32(refcount);
++ ext3_xattr_quota_free(inode);
++ ext3_journal_dirty_metadata(handle, old_bh);
++ ea_bdebug(old_bh, "refcount now=%d", refcount);
++ }
++ }
++
++cleanup:
++ if (old_bh != new_bh)
++ brelse(new_bh);
++
++ return error;
++}
++
++/*
++ * ext3_xattr_drop_inode()
++ *
++ * Free extended attribute resources associated with this inode. This
++ * is called immediately before an inode is freed.
++ */
++void
++ext3_xattr_drop_inode(handle_t *handle, struct inode *inode)
++{
++ struct buffer_head *bh;
++ unsigned int block = EXT3_I(inode)->i_file_acl;
++
++ if (!block)
++ return;
++ ext3_xattr_lock();
++
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh) {
++ ext3_error(inode->i_sb, "ext3_xattr_drop_inode",
++ "inode %ld: block %d read error", inode->i_ino, block);
++ goto cleanup;
++ }
++ ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++ ext3_error(inode->i_sb, "ext3_xattr_drop_inode",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ goto cleanup;
++ }
++ ext3_journal_get_write_access(handle, bh);
++ ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
++ if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
++ ext3_xattr_cache_remove(bh);
++ ext3_xattr_free_block(handle, inode, block);
++ ext3_forget(handle, 1, inode, bh, block);
++ bh = NULL;
++ } else {
++ HDR(bh)->h_refcount = cpu_to_le32(
++ le32_to_cpu(HDR(bh)->h_refcount) - 1);
++ ext3_journal_dirty_metadata(handle, bh);
++ if (IS_SYNC(inode))
++ handle->h_sync = 1;
++ ext3_xattr_quota_free(inode);
++ }
++ EXT3_I(inode)->i_file_acl = 0;
++
++cleanup:
++ brelse(bh);
++ ext3_xattr_unlock();
++}
++
++/*
++ * ext3_xattr_put_super()
++ *
++ * This is called when a file system is unmounted.
++ */
++void
++ext3_xattr_put_super(struct super_block *sb)
++{
++#ifdef CONFIG_EXT3_FS_XATTR_SHARING
++ mb_cache_shrink(ext3_xattr_cache, sb->s_dev);
++#endif
++}
++
++#ifdef CONFIG_EXT3_FS_XATTR_SHARING
++
++/*
++ * ext3_xattr_cache_insert()
++ *
++ * Create a new entry in the extended attribute cache, and insert
++ * it unless such an entry is already in the cache.
++ *
++ * Returns 0, or a negative error number on failure.
++ */
++static int
++ext3_xattr_cache_insert(struct buffer_head *bh)
++{
++ __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
++ struct mb_cache_entry *ce;
++ int error;
++
++ ce = mb_cache_entry_alloc(ext3_xattr_cache);
++ if (!ce)
++ return -ENOMEM;
++ error = mb_cache_entry_insert(ce, bh->b_dev, bh->b_blocknr, &hash);
++ if (error) {
++ mb_cache_entry_free(ce);
++ if (error == -EBUSY) {
++ ea_bdebug(bh, "already in cache (%d cache entries)",
++ atomic_read(&ext3_xattr_cache->c_entry_count));
++ error = 0;
++ }
++ } else {
++ ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,
++ atomic_read(&ext3_xattr_cache->c_entry_count));
++ mb_cache_entry_release(ce);
++ }
++ return error;
++}
++
++/*
++ * ext3_xattr_cmp()
++ *
++ * Compare two extended attribute blocks for equality.
++ *
++ * Returns 0 if the blocks are equal, 1 if they differ, and
++ * a negative error number on errors.
++ */
++static int
++ext3_xattr_cmp(struct ext3_xattr_header *header1,
++ struct ext3_xattr_header *header2)
++{
++ struct ext3_xattr_entry *entry1, *entry2;
++
++ entry1 = ENTRY(header1+1);
++ entry2 = ENTRY(header2+1);
++ while (!IS_LAST_ENTRY(entry1)) {
++ if (IS_LAST_ENTRY(entry2))
++ return 1;
++ if (entry1->e_hash != entry2->e_hash ||
++ entry1->e_name_len != entry2->e_name_len ||
++ entry1->e_value_size != entry2->e_value_size ||
++ memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
++ return 1;
++ if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
++ return -EIO;
++ if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
++ (char *)header2 + le16_to_cpu(entry2->e_value_offs),
++ le32_to_cpu(entry1->e_value_size)))
++ return 1;
++
++ entry1 = EXT3_XATTR_NEXT(entry1);
++ entry2 = EXT3_XATTR_NEXT(entry2);
++ }
++ if (!IS_LAST_ENTRY(entry2))
++ return 1;
++ return 0;
++}
++
++/*
++ * ext3_xattr_cache_find()
++ *
++ * Find an identical extended attribute block.
++ *
++ * Returns a pointer to the block found, or NULL if such a block was
++ * not found or an error occurred.
++ */
++static struct buffer_head *
++ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header)
++{
++ __u32 hash = le32_to_cpu(header->h_hash);
++ struct mb_cache_entry *ce;
++
++ if (!header->h_hash)
++ return NULL; /* never share */
++ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
++ ce = mb_cache_entry_find_first(ext3_xattr_cache, 0, inode->i_dev, hash);
++ while (ce) {
++ struct buffer_head *bh = sb_bread(inode->i_sb, ce->e_block);
++
++ if (!bh) {
++ ext3_error(inode->i_sb, "ext3_xattr_cache_find",
++ "inode %ld: block %ld read error",
++ inode->i_ino, ce->e_block);
++ } else if (le32_to_cpu(HDR(bh)->h_refcount) >
++ EXT3_XATTR_REFCOUNT_MAX) {
++ ea_idebug(inode, "block %ld refcount %d>%d",ce->e_block,
++ le32_to_cpu(HDR(bh)->h_refcount),
++ EXT3_XATTR_REFCOUNT_MAX);
++ } else if (!ext3_xattr_cmp(header, HDR(bh))) {
++ ea_bdebug(bh, "b_count=%d",atomic_read(&(bh->b_count)));
++ mb_cache_entry_release(ce);
++ return bh;
++ }
++ brelse(bh);
++ ce = mb_cache_entry_find_next(ce, 0, inode->i_dev, hash);
++ }
++ return NULL;
++}
++
++/*
++ * ext3_xattr_cache_remove()
++ *
++ * Remove the cache entry of a block from the cache. Called when a
++ * block becomes invalid.
++ */
++static void
++ext3_xattr_cache_remove(struct buffer_head *bh)
++{
++ struct mb_cache_entry *ce;
++
++ ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_dev, bh->b_blocknr);
++ if (ce) {
++ ea_bdebug(bh, "removing (%d cache entries remaining)",
++ atomic_read(&ext3_xattr_cache->c_entry_count)-1);
++ mb_cache_entry_free(ce);
++ } else
++ ea_bdebug(bh, "no cache entry");
++}
++
++#define NAME_HASH_SHIFT 5
++#define VALUE_HASH_SHIFT 16
++
++/*
++ * ext3_xattr_hash_entry()
++ *
++ * Compute the hash of an extended attribute.
++ */
++static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header,
++ struct ext3_xattr_entry *entry)
++{
++ __u32 hash = 0;
++ char *name = entry->e_name;
++ int n;
++
++ for (n=0; n < entry->e_name_len; n++) {
++ hash = (hash << NAME_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
++ *name++;
++ }
++
++ if (entry->e_value_block == 0 && entry->e_value_size != 0) {
++ __u32 *value = (__u32 *)((char *)header +
++ le16_to_cpu(entry->e_value_offs));
++ for (n = (le32_to_cpu(entry->e_value_size) +
++ EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) {
++ hash = (hash << VALUE_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
++ le32_to_cpu(*value++);
++ }
++ }
++ entry->e_hash = cpu_to_le32(hash);
++}
++
++#undef NAME_HASH_SHIFT
++#undef VALUE_HASH_SHIFT
++
++#define BLOCK_HASH_SHIFT 16
++
++/*
++ * ext3_xattr_rehash()
++ *
++ * Re-compute the extended attribute hash value after an entry has changed.
++ */
++static void ext3_xattr_rehash(struct ext3_xattr_header *header,
++ struct ext3_xattr_entry *entry)
++{
++ struct ext3_xattr_entry *here;
++ __u32 hash = 0;
++
++ ext3_xattr_hash_entry(header, entry);
++ here = ENTRY(header+1);
++ while (!IS_LAST_ENTRY(here)) {
++ if (!here->e_hash) {
++ /* Block is not shared if an entry's hash value == 0 */
++ hash = 0;
++ break;
++ }
++ hash = (hash << BLOCK_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
++ le32_to_cpu(here->e_hash);
++ here = EXT3_XATTR_NEXT(here);
++ }
++ header->h_hash = cpu_to_le32(hash);
++}
++
++#undef BLOCK_HASH_SHIFT
++
++int __init
++init_ext3_xattr(void)
++{
++ ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL,
++ sizeof(struct mb_cache_entry) +
++ sizeof(struct mb_cache_entry_index), 1, 61);
++ if (!ext3_xattr_cache)
++ return -ENOMEM;
++
++ return 0;
++}
++
++void
++exit_ext3_xattr(void)
++{
++ if (ext3_xattr_cache)
++ mb_cache_destroy(ext3_xattr_cache);
++ ext3_xattr_cache = NULL;
++}
++
++#else /* CONFIG_EXT3_FS_XATTR_SHARING */
++
++int __init
++init_ext3_xattr(void)
++{
++ return 0;
++}
++
++void
++exit_ext3_xattr(void)
++{
++}
++
++#endif /* CONFIG_EXT3_FS_XATTR_SHARING */
+--- linux-2.4.18-18/include/linux/ext3_fs.h~linux-2.4.18ea-0.8.26 Sat Apr 5 02:54:56 2003
++++ linux-2.4.18-18-braam/include/linux/ext3_fs.h Sat Apr 5 03:03:06 2003
+@@ -58,8 +58,6 @@
+ */
+ #define EXT3_BAD_INO 1 /* Bad blocks inode */
+ #define EXT3_ROOT_INO 2 /* Root inode */
+-#define EXT3_ACL_IDX_INO 3 /* ACL inode */
+-#define EXT3_ACL_DATA_INO 4 /* ACL inode */
+ #define EXT3_BOOT_LOADER_INO 5 /* Boot loader inode */
+ #define EXT3_UNDEL_DIR_INO 6 /* Undelete directory inode */
+ #define EXT3_RESIZE_INO 7 /* Reserved group descriptors inode */
+@@ -89,7 +87,6 @@
+ #else
+ # define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
+ #endif
+-#define EXT3_ACLE_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_acl_entry))
+ #define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
+ #ifdef __KERNEL__
+ # define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
+@@ -124,28 +121,6 @@
+ #endif
+
+ /*
+- * ACL structures
+- */
+-struct ext3_acl_header /* Header of Access Control Lists */
+-{
+- __u32 aclh_size;
+- __u32 aclh_file_count;
+- __u32 aclh_acle_count;
+- __u32 aclh_first_acle;
+-};
+-
+-struct ext3_acl_entry /* Access Control List Entry */
+-{
+- __u32 acle_size;
+- __u16 acle_perms; /* Access permissions */
+- __u16 acle_type; /* Type of entry */
+- __u16 acle_tag; /* User or group identity */
+- __u16 acle_pad1;
+- __u32 acle_next; /* Pointer on next entry for the */
+- /* same inode or on next free entry */
+-};
+-
+-/*
+ * Structure of a blocks group descriptor
+ */
+ struct ext3_group_desc
+@@ -513,7 +488,7 @@ struct ext3_super_block {
+ #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
+ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
+
+-#define EXT3_FEATURE_COMPAT_SUPP 0
++#define EXT3_FEATURE_COMPAT_SUPP EXT3_FEATURE_COMPAT_EXT_ATTR
+ #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
+ EXT3_FEATURE_INCOMPAT_RECOVER)
+ #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+@@ -606,6 +581,24 @@ struct ext3_iloc
+ unsigned long block_group;
+ };
+
++/* Defined for extended attributes */
++#define CONFIG_EXT3_FS_XATTR y
++#ifndef ENOATTR
++#define ENOATTR ENODATA /* No such attribute */
++#endif
++#ifndef ENOTSUP
++#define ENOTSUP EOPNOTSUPP /* Operation not supported */
++#endif
++#ifndef XATTR_NAME_MAX
++#define XATTR_NAME_MAX 255 /* # chars in an extended attribute name */
++#define XATTR_SIZE_MAX 65536 /* size of an extended attribute value (64k) */
++#define XATTR_LIST_MAX 65536 /* size of extended attribute namelist (64k) */
++#endif
++#ifndef XATTR_CREATE
++#define XATTR_CREATE 1 /* set value, fail if attr already exists */
++#define XATTR_REPLACE 2 /* set value, fail if attr does not exist */
++#endif
++
+ /*
+ * Function prototypes
+ */
+@@ -647,6 +640,7 @@ extern void ext3_check_inodes_bitmap (st
+ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
+
+ /* inode.c */
++extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
+ extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+ extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+
+--- linux-2.4.18-18/include/linux/ext3_jbd.h~linux-2.4.18ea-0.8.26 Sat Apr 5 02:54:56 2003
++++ linux-2.4.18-18-braam/include/linux/ext3_jbd.h Sat Apr 5 03:03:06 2003
+@@ -30,13 +30,19 @@
+
+ #define EXT3_SINGLEDATA_TRANS_BLOCKS 8
+
++/* Extended attributes may touch two data buffers, two bitmap buffers,
++ * and two group and summaries. */
++
++#define EXT3_XATTR_TRANS_BLOCKS 8
++
+ /* Define the minimum size for a transaction which modifies data. This
+ * needs to take into account the fact that we may end up modifying two
+ * quota files too (one for the group, one for the user quota). The
+ * superblock only gets updated once, of course, so don't bother
+ * counting that again for the quota updates. */
+
+-#define EXT3_DATA_TRANS_BLOCKS (3 * EXT3_SINGLEDATA_TRANS_BLOCKS - 2)
++#define EXT3_DATA_TRANS_BLOCKS (3 * EXT3_SINGLEDATA_TRANS_BLOCKS + \
++ EXT3_XATTR_TRANS_BLOCKS - 2)
+
+ extern int ext3_writepage_trans_blocks(struct inode *inode);
+
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.18-18-braam/include/linux/ext3_xattr.h Sat Apr 5 02:54:56 2003
+@@ -0,0 +1,155 @@
++/*
++ File: linux/ext3_xattr.h
++
++ On-disk format of extended attributes for the ext3 filesystem.
++
++ (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
++*/
++
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/xattr.h>
++
++/* Magic value in attribute blocks */
++#define EXT3_XATTR_MAGIC 0xEA020000
++
++/* Maximum number of references to one attribute block */
++#define EXT3_XATTR_REFCOUNT_MAX 1024
++
++/* Name indexes */
++#define EXT3_XATTR_INDEX_MAX 10
++#define EXT3_XATTR_INDEX_USER 1
++
++struct ext3_xattr_header {
++ __u32 h_magic; /* magic number for identification */
++ __u32 h_refcount; /* reference count */
++ __u32 h_blocks; /* number of disk blocks used */
++ __u32 h_hash; /* hash value of all attributes */
++ __u32 h_reserved[4]; /* zero right now */
++};
++
++struct ext3_xattr_entry {
++ __u8 e_name_len; /* length of name */
++ __u8 e_name_index; /* attribute name index */
++ __u16 e_value_offs; /* offset in disk block of value */
++ __u32 e_value_block; /* disk block attribute is stored on (n/i) */
++ __u32 e_value_size; /* size of attribute value */
++ __u32 e_hash; /* hash value of name and value */
++ char e_name[0]; /* attribute name */
++};
++
++#define EXT3_XATTR_PAD_BITS 2
++#define EXT3_XATTR_PAD (1<<EXT3_XATTR_PAD_BITS)
++#define EXT3_XATTR_ROUND (EXT3_XATTR_PAD-1)
++#define EXT3_XATTR_LEN(name_len) \
++ (((name_len) + EXT3_XATTR_ROUND + \
++ sizeof(struct ext3_xattr_entry)) & ~EXT3_XATTR_ROUND)
++#define EXT3_XATTR_NEXT(entry) \
++ ( (struct ext3_xattr_entry *)( \
++ (char *)(entry) + EXT3_XATTR_LEN((entry)->e_name_len)) )
++#define EXT3_XATTR_SIZE(size) \
++ (((size) + EXT3_XATTR_ROUND) & ~EXT3_XATTR_ROUND)
++
++#ifdef __KERNEL__
++
++# ifdef CONFIG_EXT3_FS_XATTR
++
++struct ext3_xattr_handler {
++ char *prefix;
++ size_t (*list)(char *list, struct inode *inode, const char *name,
++ int name_len);
++ int (*get)(struct inode *inode, const char *name, void *buffer,
++ size_t size);
++ int (*set)(struct inode *inode, const char *name, void *buffer,
++ size_t size, int flags);
++};
++
++extern int ext3_xattr_register(int, struct ext3_xattr_handler *);
++extern void ext3_xattr_unregister(int, struct ext3_xattr_handler *);
++
++extern int ext3_setxattr(struct dentry *, const char *, void *, size_t, int);
++extern ssize_t ext3_getxattr(struct dentry *, const char *, void *, size_t);
++extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
++extern int ext3_removexattr(struct dentry *, const char *);
++
++extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
++extern int ext3_xattr_list(struct inode *, char *, size_t);
++extern int ext3_xattr_set(handle_t *handle, struct inode *, int, const char *, void *, size_t, int);
++
++extern void ext3_xattr_drop_inode(handle_t *, struct inode *);
++extern void ext3_xattr_put_super(struct super_block *);
++
++extern int init_ext3_xattr(void) __init;
++extern void exit_ext3_xattr(void);
++
++# else /* CONFIG_EXT3_FS_XATTR */
++# define ext3_setxattr NULL
++# define ext3_getxattr NULL
++# define ext3_listxattr NULL
++# define ext3_removexattr NULL
++
++static inline int
++ext3_xattr_get(struct inode *inode, int name_index, const char *name,
++ void *buffer, size_t size, int flags)
++{
++ return -ENOTSUP;
++}
++
++static inline int
++ext3_xattr_list(struct inode *inode, void *buffer, size_t size, int flags)
++{
++ return -ENOTSUP;
++}
++
++static inline int
++ext3_xattr_set(handle_t *handle, struct inode *inode, int name_index,
++ const char *name, void *value, size_t size, int flags)
++{
++ return -ENOTSUP;
++}
++
++static inline void
++ext3_xattr_drop_inode(handle_t *handle, struct inode *inode)
++{
++}
++
++static inline void
++ext3_xattr_put_super(struct super_block *sb)
++{
++}
++
++static inline int
++init_ext3_xattr(void)
++{
++ return 0;
++}
++
++static inline void
++exit_ext3_xattr(void)
++{
++}
++
++# endif /* CONFIG_EXT3_FS_XATTR */
++
++# ifdef CONFIG_EXT3_FS_XATTR_USER
++
++extern int init_ext3_xattr_user(void) __init;
++extern void exit_ext3_xattr_user(void);
++
++# else /* CONFIG_EXT3_FS_XATTR_USER */
++
++static inline int
++init_ext3_xattr_user(void)
++{
++ return 0;
++}
++
++static inline void
++exit_ext3_xattr_user(void)
++{
++}
++
++#endif /* CONFIG_EXT3_FS_XATTR_USER */
++
++#endif /* __KERNEL__ */
++
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.18-18-braam/include/linux/xattr.h Sat Apr 5 02:54:56 2003
+@@ -0,0 +1,15 @@
++/*
++ File: linux/xattr.h
++
++ Extended attributes handling.
++
++ Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
++ Copyright (C) 2001 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com>
++*/
++#ifndef _LINUX_XATTR_H
++#define _LINUX_XATTR_H
++
++#define XATTR_CREATE 1 /* set value, fail if attr already exists */
++#define XATTR_REPLACE 2 /* set value, fail if attr does not exist */
++
++#endif /* _LINUX_XATTR_H */
+
+_
--- /dev/null
+ Documentation/Configure.help | 66 ++
+ arch/alpha/defconfig | 7
+ arch/alpha/kernel/entry.S | 12
+ arch/arm/defconfig | 7
+ arch/arm/kernel/calls.S | 24
+ arch/i386/defconfig | 7
+ arch/ia64/defconfig | 7
+ arch/ia64/kernel/entry.S | 24
+ arch/m68k/defconfig | 7
+ arch/mips/defconfig | 7
+ arch/mips64/defconfig | 7
+ arch/ppc/defconfig | 14
+ arch/ppc64/kernel/misc.S | 2
+ arch/s390/defconfig | 7
+ arch/s390/kernel/entry.S | 24
+ arch/s390x/defconfig | 7
+ arch/s390x/kernel/entry.S | 24
+ arch/s390x/kernel/wrapper32.S | 92 +++
+ arch/sparc/defconfig | 7
+ arch/sparc/kernel/systbls.S | 10
+ arch/sparc64/defconfig | 7
+ arch/sparc64/kernel/systbls.S | 20
+ fs/Config.in | 14
+ fs/Makefile | 3
+ fs/ext2/Makefile | 4
+ fs/ext2/file.c | 5
+ fs/ext2/ialloc.c | 2
+ fs/ext2/inode.c | 34 -
+ fs/ext2/namei.c | 14
+ fs/ext2/super.c | 29
+ fs/ext2/symlink.c | 14
+ fs/ext2/xattr.c | 1212 +++++++++++++++++++++++++++++++++++++++++
+ fs/ext2/xattr_user.c | 103 +++
+ fs/ext3/Makefile | 10
+ fs/ext3/file.c | 5
+ fs/ext3/ialloc.c | 2
+ fs/ext3/inode.c | 35 -
+ fs/ext3/namei.c | 21
+ fs/ext3/super.c | 33 +
+ fs/ext3/symlink.c | 14
+ fs/ext3/xattr.c | 1224 ++++++++++++++++++++++++++++++++++++++++++
+ fs/ext3/xattr_user.c | 111 +++
+ fs/jfs/jfs_xattr.h | 6
+ fs/jfs/xattr.c | 6
+ fs/mbcache.c | 648 ++++++++++++++++++++++
+ include/asm-arm/unistd.h | 2
+ include/asm-ia64/unistd.h | 13
+ include/asm-ppc64/unistd.h | 2
+ include/asm-s390/unistd.h | 15
+ include/asm-s390x/unistd.h | 15
+ include/asm-sparc/unistd.h | 26
+ include/asm-sparc64/unistd.h | 24
+ include/linux/cache_def.h | 15
+ include/linux/errno.h | 4
+ include/linux/ext2_fs.h | 31 -
+ include/linux/ext2_xattr.h | 157 +++++
+ include/linux/ext3_fs.h | 31 -
+ include/linux/ext3_jbd.h | 8
+ include/linux/ext3_xattr.h | 157 +++++
+ include/linux/fs.h | 2
+ include/linux/mbcache.h | 69 ++
+ kernel/ksyms.c | 4
+ mm/vmscan.c | 35 +
+ 63 files changed, 4355 insertions(+), 193 deletions(-)
+
+--- linux-2.4.20/Documentation/Configure.help~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/Documentation/Configure.help Sat Apr 5 03:57:18 2003
+@@ -15242,6 +15242,39 @@ CONFIG_EXT2_FS
+ be compiled as a module, and so this could be dangerous. Most
+ everyone wants to say Y here.
+
++Ext2 extended attributes
++CONFIG_EXT2_FS_XATTR
++ Extended attributes are name:value pairs associated with inodes by
++ the kernel or by users (see the attr(5) manual page, or visit
++ <http://acl.bestbits.at/> for details).
++
++ If unsure, say N.
++
++Ext2 extended attribute block sharing
++CONFIG_EXT2_FS_XATTR_SHARING
++ This options enables code for sharing identical extended attribute
++ blocks among multiple inodes.
++
++ Usually, say Y.
++
++Ext2 extended user attributes
++CONFIG_EXT2_FS_XATTR_USER
++ This option enables extended user attributes on ext2. Processes can
++ associate extended user attributes with inodes to store additional
++ information such as the character encoding of files, etc. (see the
++ attr(5) manual page, or visit <http://acl.bestbits.at/> for details).
++
++ If unsure, say N.
++
++Ext2 trusted extended attributes
++CONFIG_EXT2_FS_XATTR_TRUSTED
++ This option enables extended attributes on ext2 that are accessible
++ (and visible) only to users capable of CAP_SYS_ADMIN. Usually this
++ is only the super user. Trusted extended attributes are meant for
++ implementing system/security services.
++
++ If unsure, say N.
++
+ Ext3 journalling file system support (EXPERIMENTAL)
+ CONFIG_EXT3_FS
+ This is the journalling version of the Second extended file system
+@@ -15274,6 +15307,39 @@ CONFIG_EXT3_FS
+ of your root partition (the one containing the directory /) cannot
+ be compiled as a module, and so this may be dangerous.
+
++Ext3 extended attributes
++CONFIG_EXT3_FS_XATTR
++ Extended attributes are name:value pairs associated with inodes by
++ the kernel or by users (see the attr(5) manual page, or visit
++ <http://acl.bestbits.at/> for details).
++
++ If unsure, say N.
++
++Ext3 extended attribute block sharing
++CONFIG_EXT3_FS_XATTR_SHARING
++ This options enables code for sharing identical extended attribute
++ blocks among multiple inodes.
++
++ Usually, say Y.
++
++Ext3 extended user attributes
++CONFIG_EXT3_FS_XATTR_USER
++ This option enables extended user attributes on ext3. Processes can
++ associate extended user attributes with inodes to store additional
++ information such as the character encoding of files, etc. (see the
++ attr(5) manual page, or visit <http://acl.bestbits.at/> for details).
++
++ If unsure, say N.
++
++Ext3 trusted extended attributes
++CONFIG_EXT3_FS_XATTR_TRUSTED
++ This option enables extended attributes on ext3 that are accessible
++ (and visible) only to users capable of CAP_SYS_ADMIN. Usually this
++ is only the super user. Trusted extended attributes are meant for
++ implementing system/security services.
++
++ If unsure, say N.
++
+ Journal Block Device support (JBD for ext3) (EXPERIMENTAL)
+ CONFIG_JBD
+ This is a generic journalling layer for block devices. It is
+--- linux-2.4.20/arch/alpha/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/alpha/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ CONFIG_ALPHA=y
+ # CONFIG_UID16 is not set
+ # CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+--- linux-2.4.20/arch/alpha/kernel/entry.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/alpha/kernel/entry.S Sat Apr 5 03:57:18 2003
+@@ -1154,6 +1154,18 @@ sys_call_table:
+ .quad sys_readahead
+ .quad sys_ni_syscall /* 380, sys_security */
+ .quad sys_tkill
++ .quad sys_setxattr
++ .quad sys_lsetxattr
++ .quad sys_fsetxattr
++ .quad sys_getxattr /* 385 */
++ .quad sys_lgetxattr
++ .quad sys_fgetxattr
++ .quad sys_listxattr
++ .quad sys_llistxattr
++ .quad sys_flistxattr /* 390 */
++ .quad sys_removexattr
++ .quad sys_lremovexattr
++ .quad sys_fremovexattr
+
+ /* Remember to update everything, kids. */
+ .ifne (. - sys_call_table) - (NR_SYSCALLS * 8)
+--- linux-2.4.20/arch/arm/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/arm/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ CONFIG_ARM=y
+ # CONFIG_EISA is not set
+ # CONFIG_SBUS is not set
+--- linux-2.4.20/arch/arm/kernel/calls.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/arm/kernel/calls.S Sat Apr 5 03:57:18 2003
+@@ -240,18 +240,18 @@ __syscall_start:
+ .long SYMBOL_NAME(sys_ni_syscall) /* Security */
+ .long SYMBOL_NAME(sys_gettid)
+ /* 225 */ .long SYMBOL_NAME(sys_readahead)
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_setxattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_lsetxattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_fsetxattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_getxattr */
+-/* 230 */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_lgetxattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_fgetxattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_listxattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_llistxattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_flistxattr */
+-/* 235 */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_removexattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_lremovexattr */
+- .long SYMBOL_NAME(sys_ni_syscall) /* sys_fremovexattr */
++ .long SYMBOL_NAME(sys_setxattr)
++ .long SYMBOL_NAME(sys_lsetxattr)
++ .long SYMBOL_NAME(sys_fsetxattr)
++ .long SYMBOL_NAME(sys_getxattr)
++/* 230 */ .long SYMBOL_NAME(sys_lgetxattr)
++ .long SYMBOL_NAME(sys_fgetxattr)
++ .long SYMBOL_NAME(sys_listxattr)
++ .long SYMBOL_NAME(sys_llistxattr)
++ .long SYMBOL_NAME(sys_flistxattr)
++/* 235 */ .long SYMBOL_NAME(sys_removexattr)
++ .long SYMBOL_NAME(sys_lremovexattr)
++ .long SYMBOL_NAME(sys_fremovexattr)
+ .long SYMBOL_NAME(sys_tkill)
+ /*
+ * Please check 2.5 _before_ adding calls here,
+--- linux-2.4.20/arch/i386/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/i386/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ CONFIG_X86=y
+ CONFIG_ISA=y
+ # CONFIG_SBUS is not set
+--- linux-2.4.20/arch/ia64/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/ia64/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+
+ #
+ # Code maturity level options
+--- linux-2.4.20/arch/ia64/kernel/entry.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/ia64/kernel/entry.S Sat Apr 5 03:57:18 2003
+@@ -1170,18 +1170,18 @@ sys_call_table:
+ data8 sys_getdents64
+ data8 sys_getunwind // 1215
+ data8 sys_readahead
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall // 1220
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall // 1225
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall
+- data8 ia64_ni_syscall
++ data8 sys_setxattr
++ data8 sys_lsetxattr
++ data8 sys_fsetxattr
++ data8 sys_getxattr // 1220
++ data8 sys_lgetxattr
++ data8 sys_fgetxattr
++ data8 sys_listxattr
++ data8 sys_llistxattr
++ data8 sys_flistxattr // 1225
++ data8 sys_removexattr
++ data8 sys_lremovexattr
++ data8 sys_fremovexattr
+ data8 sys_tkill
+ data8 ia64_ni_syscall // 1230
+ data8 ia64_ni_syscall
+--- linux-2.4.20/arch/m68k/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/m68k/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ CONFIG_UID16=y
+
+ #
+--- linux-2.4.20/arch/mips/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/mips/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ CONFIG_MIPS=y
+ CONFIG_MIPS32=y
+ # CONFIG_MIPS64 is not set
+--- linux-2.4.20/arch/mips64/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/mips64/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ CONFIG_MIPS=y
+ # CONFIG_MIPS32 is not set
+ CONFIG_MIPS64=y
+--- linux-2.4.20/arch/ppc/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/ppc/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,20 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ # CONFIG_UID16 is not set
+ # CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+--- linux-2.4.20/arch/ppc64/kernel/misc.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/ppc64/kernel/misc.S Sat Apr 5 03:57:18 2003
+@@ -731,6 +731,7 @@ _GLOBAL(sys_call_table32)
+ .llong .sys_gettid /* 207 */
+ #if 0 /* Reserved syscalls */
+ .llong .sys_tkill /* 208 */
++#endif
+ .llong .sys_setxattr
+ .llong .sys_lsetxattr /* 210 */
+ .llong .sys_fsetxattr
+@@ -743,6 +744,7 @@ _GLOBAL(sys_call_table32)
+ .llong .sys_removexattr
+ .llong .sys_lremovexattr
+ .llong .sys_fremovexattr /* 220 */
++#if 0 /* Reserved syscalls */
+ .llong .sys_futex
+ #endif
+ .llong .sys_perfmonctl /* Put this here for now ... */
+--- linux-2.4.20/arch/s390/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/s390/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ # CONFIG_ISA is not set
+ # CONFIG_EISA is not set
+ # CONFIG_MCA is not set
+--- linux-2.4.20/arch/s390/kernel/entry.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/s390/kernel/entry.S Sat Apr 5 03:57:18 2003
+@@ -558,18 +558,18 @@ sys_call_table:
+ .long sys_fcntl64
+ .long sys_ni_syscall
+ .long sys_ni_syscall
+- .long sys_ni_syscall /* 224 - reserved for setxattr */
+- .long sys_ni_syscall /* 225 - reserved for lsetxattr */
+- .long sys_ni_syscall /* 226 - reserved for fsetxattr */
+- .long sys_ni_syscall /* 227 - reserved for getxattr */
+- .long sys_ni_syscall /* 228 - reserved for lgetxattr */
+- .long sys_ni_syscall /* 229 - reserved for fgetxattr */
+- .long sys_ni_syscall /* 230 - reserved for listxattr */
+- .long sys_ni_syscall /* 231 - reserved for llistxattr */
+- .long sys_ni_syscall /* 232 - reserved for flistxattr */
+- .long sys_ni_syscall /* 233 - reserved for removexattr */
+- .long sys_ni_syscall /* 234 - reserved for lremovexattr */
+- .long sys_ni_syscall /* 235 - reserved for fremovexattr */
++ .long sys_setxattr
++ .long sys_lsetxattr /* 225 */
++ .long sys_fsetxattr
++ .long sys_getxattr
++ .long sys_lgetxattr
++ .long sys_fgetxattr
++ .long sys_listxattr /* 230 */
++ .long sys_llistxattr
++ .long sys_flistxattr
++ .long sys_removexattr
++ .long sys_lremovexattr
++ .long sys_fremovexattr /* 235 */
+ .long sys_gettid
+ .long sys_tkill
+ .rept 255-237
+--- linux-2.4.20/arch/s390x/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/s390x/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ # CONFIG_ISA is not set
+ # CONFIG_EISA is not set
+ # CONFIG_MCA is not set
+--- linux-2.4.20/arch/s390x/kernel/entry.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/s390x/kernel/entry.S Sat Apr 5 03:57:18 2003
+@@ -591,18 +591,18 @@ sys_call_table:
+ .long SYSCALL(sys_ni_syscall,sys32_fcntl64_wrapper)
+ .long SYSCALL(sys_ni_syscall,sys_ni_syscall)
+ .long SYSCALL(sys_ni_syscall,sys_ni_syscall)
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 224 - reserved for setxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 225 - reserved for lsetxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 226 - reserved for fsetxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 227 - reserved for getxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 228 - reserved for lgetxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 229 - reserved for fgetxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 230 - reserved for listxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 231 - reserved for llistxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 232 - reserved for flistxattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 233 - reserved for removexattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 234 - reserved for lremovexattr */
+- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 235 - reserved for fremovexattr */
++ .long SYSCALL(sys_setxattr,sys32_setxattr_wrapper)
++ .long SYSCALL(sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */
++ .long SYSCALL(sys_fsetxattr,sys32_fsetxattr_wrapper)
++ .long SYSCALL(sys_getxattr,sys32_getxattr_wrapper)
++ .long SYSCALL(sys_lgetxattr,sys32_lgetxattr_wrapper)
++ .long SYSCALL(sys_fgetxattr,sys32_fgetxattr_wrapper)
++ .long SYSCALL(sys_listxattr,sys32_listxattr_wrapper) /* 230 */
++ .long SYSCALL(sys_llistxattr,sys32_llistxattr_wrapper)
++ .long SYSCALL(sys_flistxattr,sys32_flistxattr_wrapper)
++ .long SYSCALL(sys_removexattr,sys32_removexattr_wrapper)
++ .long SYSCALL(sys_lremovexattr,sys32_lremovexattr_wrapper)
++ .long SYSCALL(sys_fremovexattr,sys32_fremovexattr_wrapper)/* 235 */
+ .long SYSCALL(sys_gettid,sys_gettid)
+ .long SYSCALL(sys_tkill,sys_tkill)
+ .rept 255-237
+--- linux-2.4.20/arch/s390x/kernel/wrapper32.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/s390x/kernel/wrapper32.S Sat Apr 5 03:57:18 2003
+@@ -1091,3 +1091,95 @@ sys32_fstat64_wrapper:
+ llgtr %r3,%r3 # struct stat64 *
+ llgfr %r4,%r4 # long
+ jg sys32_fstat64 # branch to system call
++
++ .globl sys32_setxattr_wrapper
++sys32_setxattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ llgtr %r4,%r4 # void *
++ llgfr %r5,%r5 # size_t
++ lgfr %r6,%r6 # int
++ jg sys_setxattr
++
++ .globl sys32_lsetxattr_wrapper
++sys32_lsetxattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ llgtr %r4,%r4 # void *
++ llgfr %r5,%r5 # size_t
++ lgfr %r6,%r6 # int
++ jg sys_lsetxattr
++
++ .globl sys32_fsetxattr_wrapper
++sys32_fsetxattr_wrapper:
++ lgfr %r2,%r2 # int
++ llgtr %r3,%r3 # char *
++ llgtr %r4,%r4 # void *
++ llgfr %r5,%r5 # size_t
++ lgfr %r6,%r6 # int
++ jg sys_fsetxattr
++
++ .globl sys32_getxattr_wrapper
++sys32_getxattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ llgtr %r4,%r4 # void *
++ llgfr %r5,%r5 # size_t
++ jg sys_getxattr
++
++ .globl sys32_lgetxattr_wrapper
++sys32_lgetxattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ llgtr %r4,%r4 # void *
++ llgfr %r5,%r5 # size_t
++ jg sys_lgetxattr
++
++ .globl sys32_fgetxattr_wrapper
++sys32_fgetxattr_wrapper:
++ lgfr %r2,%r2 # int
++ llgtr %r3,%r3 # char *
++ llgtr %r4,%r4 # void *
++ llgfr %r5,%r5 # size_t
++ jg sys_fgetxattr
++
++ .globl sys32_listxattr_wrapper
++sys32_listxattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ llgfr %r4,%r4 # size_t
++ jg sys_listxattr
++
++ .globl sys32_llistxattr_wrapper
++sys32_llistxattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ llgfr %r4,%r4 # size_t
++ jg sys_llistxattr
++
++ .globl sys32_flistxattr_wrapper
++sys32_flistxattr_wrapper:
++ lgfr %r2,%r2 # int
++ llgtr %r3,%r3 # char *
++ llgfr %r4,%r4 # size_t
++ jg sys_flistxattr
++
++ .globl sys32_removexattr_wrapper
++sys32_removexattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ jg sys_removexattr
++
++ .globl sys32_lremovexattr_wrapper
++sys32_lremovexattr_wrapper:
++ llgtr %r2,%r2 # char *
++ llgtr %r3,%r3 # char *
++ jg sys_lremovexattr
++
++ .globl sys32_fremovexattr_wrapper
++sys32_fremovexattr_wrapper:
++ lgfr %r2,%r2 # int
++ llgtr %r3,%r3 # char *
++ jg sys_fremovexattr
++
++
+--- linux-2.4.20/arch/sparc/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/sparc/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+ CONFIG_UID16=y
+ CONFIG_HIGHMEM=y
+
+--- linux-2.4.20/arch/sparc/kernel/systbls.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/sparc/kernel/systbls.S Sat Apr 5 03:57:18 2003
+@@ -51,11 +51,11 @@ sys_call_table:
+ /*150*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
+ /*155*/ .long sys_fcntl64, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_oldumount
+ /*160*/ .long sys_nis_syscall, sys_nis_syscall, sys_getdomainname, sys_setdomainname, sys_nis_syscall
+-/*165*/ .long sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_nis_syscall
+-/*170*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getdents
+-/*175*/ .long sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+-/*180*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_sigpending, sys_query_module
+-/*185*/ .long sys_setpgid, sys_nis_syscall, sys_tkill, sys_nis_syscall, sys_newuname
++/*165*/ .long sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_setxattr
++/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
++/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
++/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_query_module
++/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_nis_syscall, sys_newuname
+ /*190*/ .long sys_init_module, sys_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ /*195*/ .long sys_nis_syscall, sys_nis_syscall, sys_getppid, sparc_sigaction, sys_sgetmask
+ /*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, old_readdir
+--- linux-2.4.20/arch/sparc64/defconfig~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/sparc64/defconfig Sat Apr 5 03:57:18 2003
+@@ -1,6 +1,13 @@
+ #
+ # Automatically generated make config: don't edit
+ #
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT3_FS_XATTR_SHARING is not set
++# CONFIG_EXT3_FS_XATTR_USER is not set
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XATTR_SHARING is not set
++# CONFIG_EXT2_FS_XATTR_USER is not set
++# CONFIG_FS_MBCACHE is not set
+
+ #
+ # Code maturity level options
+--- linux-2.4.20/arch/sparc64/kernel/systbls.S~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/arch/sparc64/kernel/systbls.S Sat Apr 5 03:57:18 2003
+@@ -52,11 +52,11 @@ sys_call_table32:
+ /*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
+ .word sys32_fcntl64, sys_nis_syscall, sys32_statfs, sys32_fstatfs, sys_oldumount
+ /*160*/ .word sys_nis_syscall, sys_nis_syscall, sys_getdomainname, sys_setdomainname, sys_nis_syscall
+- .word sys32_quotactl, sys_nis_syscall, sys32_mount, sys_ustat, sys_nis_syscall
+-/*170*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_getdents
+- .word sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+-/*180*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_sigpending, sys32_query_module
+- .word sys_setpgid, sys_nis_syscall, sys_tkill, sys_nis_syscall, sparc64_newuname
++ .word sys32_quotactl, sys_nis_syscall, sys32_mount, sys_ustat, sys_setxattr
++/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys32_getdents
++ .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
++/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys32_sigpending, sys32_query_module
++ .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_nis_syscall, sparc64_newuname
+ /*190*/ .word sys32_init_module, sparc64_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
+ /*200*/ .word sys_ssetmask, sys_sigsuspend, sys32_newlstat, sys_uselib, old32_readdir
+@@ -111,11 +111,11 @@ sys_call_table:
+ /*150*/ .word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
+ .word sys_nis_syscall, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_oldumount
+ /*160*/ .word sys_nis_syscall, sys_nis_syscall, sys_getdomainname, sys_setdomainname, sys_utrap_install
+- .word sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_nis_syscall
+-/*170*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getdents
+- .word sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+-/*180*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_query_module
+- .word sys_setpgid, sys_nis_syscall, sys_tkill, sys_nis_syscall, sparc64_newuname
++ .word sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_setxattr
++/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
++ .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
++/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_query_module
++ .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_nis_syscall, sparc64_newuname
+ /*190*/ .word sys_init_module, sparc64_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
+ /*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
+--- linux-2.4.20/fs/Config.in~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/Config.in Sat Apr 5 03:57:18 2003
+@@ -25,6 +25,11 @@ dep_mbool ' Debug Befs' CONFIG_BEFS_DEB
+ dep_tristate 'BFS file system support (EXPERIMENTAL)' CONFIG_BFS_FS $CONFIG_EXPERIMENTAL
+
+ tristate 'Ext3 journalling file system support' CONFIG_EXT3_FS
++dep_mbool ' Ext3 extended attributes' CONFIG_EXT3_FS_XATTR $CONFIG_EXT3_FS
++dep_bool ' Ext3 extended attribute block sharing' \
++ CONFIG_EXT3_FS_XATTR_SHARING $CONFIG_EXT3_FS_XATTR
++dep_bool ' Ext3 extended user attributes' \
++ CONFIG_EXT3_FS_XATTR_USER $CONFIG_EXT3_FS_XATTR
+ # CONFIG_JBD could be its own option (even modular), but until there are
+ # other users than ext3, we will simply make it be the same as CONFIG_EXT3_FS
+ # dep_tristate ' Journal Block Device support (JBD for ext3)' CONFIG_JBD $CONFIG_EXT3_FS
+@@ -84,6 +89,11 @@ dep_mbool ' QNX4FS write support (DANGE
+ tristate 'ROM file system support' CONFIG_ROMFS_FS
+
+ tristate 'Second extended fs support' CONFIG_EXT2_FS
++dep_mbool ' Ext2 extended attributes' CONFIG_EXT2_FS_XATTR $CONFIG_EXT2_FS
++dep_bool ' Ext2 extended attribute block sharing' \
++ CONFIG_EXT2_FS_XATTR_SHARING $CONFIG_EXT2_FS_XATTR
++dep_bool ' Ext2 extended user attributes' \
++ CONFIG_EXT2_FS_XATTR_USER $CONFIG_EXT2_FS_XATTR
+
+ tristate 'System V/Xenix/V7/Coherent file system support' CONFIG_SYSV_FS
+
+@@ -155,6 +165,10 @@ else
+ define_tristate CONFIG_ZISOFS_FS n
+ fi
+
++# Meta block cache for Extended Attributes (ext2/ext3)
++#tristate 'Meta block cache' CONFIG_FS_MBCACHE
++define_tristate CONFIG_FS_MBCACHE y
++
+ mainmenu_option next_comment
+ comment 'Partition Types'
+ source fs/partitions/Config.in
+--- linux-2.4.20/fs/Makefile~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/Makefile Sat Apr 5 03:57:18 2003
+@@ -79,6 +79,9 @@ obj-y += binfmt_script.o
+
+ obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o
+
++export-objs += mbcache.o
++obj-$(CONFIG_FS_MBCACHE) += mbcache.o
++
+ # persistent filesystems
+ obj-y += $(join $(subdir-y),$(subdir-y:%=/%.o))
+
+--- linux-2.4.20/fs/ext2/Makefile~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext2/Makefile Sat Apr 5 03:57:18 2003
+@@ -13,4 +13,8 @@ obj-y := balloc.o bitmap.o dir.o file
+ ioctl.o namei.o super.o symlink.o
+ obj-m := $(O_TARGET)
+
++export-objs += xattr.o
++obj-$(CONFIG_EXT2_FS_XATTR) += xattr.o
++obj-$(CONFIG_EXT2_FS_XATTR_USER) += xattr_user.o
++
+ include $(TOPDIR)/Rules.make
+--- linux-2.4.20/fs/ext2/file.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext2/file.c Sat Apr 5 03:57:18 2003
+@@ -20,6 +20,7 @@
+
+ #include <linux/fs.h>
+ #include <linux/ext2_fs.h>
++#include <linux/ext2_xattr.h>
+ #include <linux/sched.h>
+
+ /*
+@@ -51,4 +52,8 @@ struct file_operations ext2_file_operati
+
+ struct inode_operations ext2_file_inode_operations = {
+ truncate: ext2_truncate,
++ setxattr: ext2_setxattr,
++ getxattr: ext2_getxattr,
++ listxattr: ext2_listxattr,
++ removexattr: ext2_removexattr,
+ };
+--- linux-2.4.20/fs/ext2/ialloc.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext2/ialloc.c Sat Apr 5 03:57:18 2003
+@@ -15,6 +15,7 @@
+ #include <linux/config.h>
+ #include <linux/fs.h>
+ #include <linux/ext2_fs.h>
++#include <linux/ext2_xattr.h>
+ #include <linux/locks.h>
+ #include <linux/quotaops.h>
+
+@@ -167,6 +168,7 @@ void ext2_free_inode (struct inode * ino
+ */
+ if (!is_bad_inode(inode)) {
+ /* Quota is already initialized in iput() */
++ ext2_xattr_delete_inode(inode);
+ DQUOT_FREE_INODE(inode);
+ DQUOT_DROP(inode);
+ }
+--- linux-2.4.20/fs/ext2/inode.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext2/inode.c Sat Apr 5 03:57:18 2003
+@@ -39,6 +39,18 @@ MODULE_LICENSE("GPL");
+ static int ext2_update_inode(struct inode * inode, int do_sync);
+
+ /*
++ * Test whether an inode is a fast symlink.
++ */
++static inline int ext2_inode_is_fast_symlink(struct inode *inode)
++{
++ int ea_blocks = inode->u.ext2_i.i_file_acl ?
++ (inode->i_sb->s_blocksize >> 9) : 0;
++
++ return (S_ISLNK(inode->i_mode) &&
++ inode->i_blocks - ea_blocks == 0);
++}
++
++/*
+ * Called at each iput()
+ */
+ void ext2_put_inode (struct inode * inode)
+@@ -53,9 +65,7 @@ void ext2_delete_inode (struct inode * i
+ {
+ lock_kernel();
+
+- if (is_bad_inode(inode) ||
+- inode->i_ino == EXT2_ACL_IDX_INO ||
+- inode->i_ino == EXT2_ACL_DATA_INO)
++ if (is_bad_inode(inode))
+ goto no_delete;
+ inode->u.ext2_i.i_dtime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+@@ -801,6 +811,8 @@ void ext2_truncate (struct inode * inode
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
+ return;
++ if (ext2_inode_is_fast_symlink(inode))
++ return;
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ return;
+
+@@ -888,8 +900,7 @@ void ext2_read_inode (struct inode * ino
+ unsigned long offset;
+ struct ext2_group_desc * gdp;
+
+- if ((inode->i_ino != EXT2_ROOT_INO && inode->i_ino != EXT2_ACL_IDX_INO &&
+- inode->i_ino != EXT2_ACL_DATA_INO &&
++ if ((inode->i_ino != EXT2_ROOT_INO &&
+ inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
+ inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) {
+ ext2_error (inode->i_sb, "ext2_read_inode",
+@@ -974,10 +985,7 @@ void ext2_read_inode (struct inode * ino
+ for (block = 0; block < EXT2_N_BLOCKS; block++)
+ inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
+
+- if (inode->i_ino == EXT2_ACL_IDX_INO ||
+- inode->i_ino == EXT2_ACL_DATA_INO)
+- /* Nothing to do */ ;
+- else if (S_ISREG(inode->i_mode)) {
++ if (S_ISREG(inode->i_mode)) {
+ inode->i_op = &ext2_file_inode_operations;
+ inode->i_fop = &ext2_file_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
+@@ -986,15 +994,17 @@ void ext2_read_inode (struct inode * ino
+ inode->i_fop = &ext2_dir_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
+ } else if (S_ISLNK(inode->i_mode)) {
+- if (!inode->i_blocks)
++ if (ext2_inode_is_fast_symlink(inode))
+ inode->i_op = &ext2_fast_symlink_inode_operations;
+ else {
+- inode->i_op = &page_symlink_inode_operations;
++ inode->i_op = &ext2_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
+ }
+- } else
++ } else {
++ inode->i_op = &ext2_special_inode_operations;
+ init_special_inode(inode, inode->i_mode,
+ le32_to_cpu(raw_inode->i_block[0]));
++ }
+ brelse (bh);
+ inode->i_attr_flags = 0;
+ if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL) {
+--- linux-2.4.20/fs/ext2/namei.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext2/namei.c Sat Apr 5 03:57:18 2003
+@@ -31,6 +31,7 @@
+
+ #include <linux/fs.h>
+ #include <linux/ext2_fs.h>
++#include <linux/ext2_xattr.h>
+ #include <linux/pagemap.h>
+
+ /*
+@@ -136,7 +137,7 @@ static int ext2_symlink (struct inode *
+
+ if (l > sizeof (inode->u.ext2_i.i_data)) {
+ /* slow symlink */
+- inode->i_op = &page_symlink_inode_operations;
++ inode->i_op = &ext2_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
+ err = block_symlink(inode, symname, l);
+ if (err)
+@@ -345,4 +346,15 @@ struct inode_operations ext2_dir_inode_o
+ rmdir: ext2_rmdir,
+ mknod: ext2_mknod,
+ rename: ext2_rename,
++ setxattr: ext2_setxattr,
++ getxattr: ext2_getxattr,
++ listxattr: ext2_listxattr,
++ removexattr: ext2_removexattr,
++};
++
++struct inode_operations ext2_special_inode_operations = {
++ setxattr: ext2_setxattr,
++ getxattr: ext2_getxattr,
++ listxattr: ext2_listxattr,
++ removexattr: ext2_removexattr,
+ };
+--- linux-2.4.20/fs/ext2/super.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext2/super.c Sat Apr 5 03:57:18 2003
+@@ -21,6 +21,7 @@
+ #include <linux/string.h>
+ #include <linux/fs.h>
+ #include <linux/ext2_fs.h>
++#include <linux/ext2_xattr.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/locks.h>
+@@ -125,6 +126,7 @@ void ext2_put_super (struct super_block
+ int db_count;
+ int i;
+
++ ext2_xattr_put_super(sb);
+ if (!(sb->s_flags & MS_RDONLY)) {
+ struct ext2_super_block *es = EXT2_SB(sb)->s_es;
+
+@@ -175,6 +177,13 @@ static int parse_options (char * options
+ this_char = strtok (NULL, ",")) {
+ if ((value = strchr (this_char, '=')) != NULL)
+ *value++ = 0;
++#ifdef CONFIG_EXT2_FS_XATTR_USER
++ if (!strcmp (this_char, "user_xattr"))
++ set_opt (*mount_options, XATTR_USER);
++ else if (!strcmp (this_char, "nouser_xattr"))
++ clear_opt (*mount_options, XATTR_USER);
++ else
++#endif
+ if (!strcmp (this_char, "bsddf"))
+ clear_opt (*mount_options, MINIX_DF);
+ else if (!strcmp (this_char, "nouid32")) {
+@@ -424,6 +433,9 @@ struct super_block * ext2_read_super (st
+ blocksize = BLOCK_SIZE;
+
+ sb->u.ext2_sb.s_mount_opt = 0;
++#ifdef CONFIG_EXT2_FS_XATTR_USER
++ /* set_opt (sb->u.ext2_sb.s_mount_opt, XATTR_USER); */
++#endif
+ if (!parse_options ((char *) data, &sb_block, &resuid, &resgid,
+ &sb->u.ext2_sb.s_mount_opt)) {
+ return NULL;
+@@ -813,12 +825,27 @@ static DECLARE_FSTYPE_DEV(ext2_fs_type,
+
+ static int __init init_ext2_fs(void)
+ {
+- return register_filesystem(&ext2_fs_type);
++ int error = init_ext2_xattr();
++ if (error)
++ return error;
++ error = init_ext2_xattr_user();
++ if (error)
++ goto fail;
++ error = register_filesystem(&ext2_fs_type);
++ if (!error)
++ return 0;
++
++ exit_ext2_xattr_user();
++fail:
++ exit_ext2_xattr();
++ return error;
+ }
+
+ static void __exit exit_ext2_fs(void)
+ {
+ unregister_filesystem(&ext2_fs_type);
++ exit_ext2_xattr_user();
++ exit_ext2_xattr();
+ }
+
+ EXPORT_NO_SYMBOLS;
+--- linux-2.4.20/fs/ext2/symlink.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext2/symlink.c Sat Apr 5 03:57:18 2003
+@@ -19,6 +19,7 @@
+
+ #include <linux/fs.h>
+ #include <linux/ext2_fs.h>
++#include <linux/ext2_xattr.h>
+
+ static int ext2_readlink(struct dentry *dentry, char *buffer, int buflen)
+ {
+@@ -32,7 +33,20 @@ static int ext2_follow_link(struct dentr
+ return vfs_follow_link(nd, s);
+ }
+
++struct inode_operations ext2_symlink_inode_operations = {
++ readlink: page_readlink,
++ follow_link: page_follow_link,
++ setxattr: ext2_setxattr,
++ getxattr: ext2_getxattr,
++ listxattr: ext2_listxattr,
++ removexattr: ext2_removexattr,
++};
++
+ struct inode_operations ext2_fast_symlink_inode_operations = {
+ readlink: ext2_readlink,
+ follow_link: ext2_follow_link,
++ setxattr: ext2_setxattr,
++ getxattr: ext2_getxattr,
++ listxattr: ext2_listxattr,
++ removexattr: ext2_removexattr,
+ };
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/fs/ext2/xattr.c Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,1212 @@
++/*
++ * linux/fs/ext2/xattr.c
++ *
++ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
++ *
++ * Fix by Harrison Xing <harrison@mountainviewdata.com>.
++ * Extended attributes for symlinks and special files added per
++ * suggestion of Luka Renko <luka.renko@hermes.si>.
++ */
++
++/*
++ * Extended attributes are stored on disk blocks allocated outside of
++ * any inode. The i_file_acl field is then made to point to this allocated
++ * block. If all extended attributes of an inode are identical, these
++ * inodes may share the same extended attribute block. Such situations
++ * are automatically detected by keeping a cache of recent attribute block
++ * numbers and hashes over the block's contents in memory.
++ *
++ *
++ * Extended attribute block layout:
++ *
++ * +------------------+
++ * | header |
++ * | entry 1 | |
++ * | entry 2 | | growing downwards
++ * | entry 3 | v
++ * | four null bytes |
++ * | . . . |
++ * | value 1 | ^
++ * | value 3 | | growing upwards
++ * | value 2 | |
++ * +------------------+
++ *
++ * The block header is followed by multiple entry descriptors. These entry
++ * descriptors are variable in size, and alligned to EXT2_XATTR_PAD
++ * byte boundaries. The entry descriptors are sorted by attribute name,
++ * so that two extended attribute blocks can be compared efficiently.
++ *
++ * Attribute values are aligned to the end of the block, stored in
++ * no specific order. They are also padded to EXT2_XATTR_PAD byte
++ * boundaries. No additional gaps are left between them.
++ *
++ * Locking strategy
++ * ----------------
++ * The VFS already holds the BKL and the inode->i_sem semaphore when any of
++ * the xattr inode operations are called, so we are guaranteed that only one
++ * processes accesses extended attributes of an inode at any time.
++ *
++ * For writing we also grab the ext2_xattr_sem semaphore. This ensures that
++ * only a single process is modifying an extended attribute block, even
++ * if the block is shared among inodes.
++ *
++ * Note for porting to 2.5
++ * -----------------------
++ * The BKL will no longer be held in the xattr inode operations.
++ */
++
++#include <linux/module.h>
++#include <linux/locks.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/ext2_fs.h>
++#include <linux/ext2_xattr.h>
++#include <linux/mbcache.h>
++#include <linux/quotaops.h>
++#include <asm/semaphore.h>
++#include <linux/compatmac.h>
++
++/* These symbols may be needed by a module. */
++EXPORT_SYMBOL(ext2_xattr_register);
++EXPORT_SYMBOL(ext2_xattr_unregister);
++EXPORT_SYMBOL(ext2_xattr_get);
++EXPORT_SYMBOL(ext2_xattr_list);
++EXPORT_SYMBOL(ext2_xattr_set);
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
++# define mark_buffer_dirty(bh) mark_buffer_dirty(bh, 1)
++#endif
++
++#define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
++#define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
++#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
++#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
++
++#ifdef EXT2_XATTR_DEBUG
++# define ea_idebug(inode, f...) do { \
++ printk(KERN_DEBUG "inode %s:%ld: ", \
++ kdevname(inode->i_dev), inode->i_ino); \
++ printk(f); \
++ printk("\n"); \
++ } while (0)
++# define ea_bdebug(bh, f...) do { \
++ printk(KERN_DEBUG "block %s:%ld: ", \
++ kdevname(bh->b_dev), bh->b_blocknr); \
++ printk(f); \
++ printk("\n"); \
++ } while (0)
++#else
++# define ea_idebug(f...)
++# define ea_bdebug(f...)
++#endif
++
++static int ext2_xattr_set2(struct inode *, struct buffer_head *,
++ struct ext2_xattr_header *);
++
++#ifdef CONFIG_EXT2_FS_XATTR_SHARING
++
++static int ext2_xattr_cache_insert(struct buffer_head *);
++static struct buffer_head *ext2_xattr_cache_find(struct inode *,
++ struct ext2_xattr_header *);
++static void ext2_xattr_cache_remove(struct buffer_head *);
++static void ext2_xattr_rehash(struct ext2_xattr_header *,
++ struct ext2_xattr_entry *);
++
++static struct mb_cache *ext2_xattr_cache;
++
++#else
++# define ext2_xattr_cache_insert(bh) 0
++# define ext2_xattr_cache_find(inode, header) NULL
++# define ext2_xattr_cache_remove(bh) while(0) {}
++# define ext2_xattr_rehash(header, entry) while(0) {}
++#endif
++
++/*
++ * If a file system does not share extended attributes among inodes,
++ * we should not need the ext2_xattr_sem semaphore. However, the
++ * filesystem may still contain shared blocks, so we always take
++ * the lock.
++ */
++
++DECLARE_MUTEX(ext2_xattr_sem);
++
++static inline int
++ext2_xattr_new_block(struct inode *inode, int * errp, int force)
++{
++ struct super_block *sb = inode->i_sb;
++ int goal = le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block) +
++ EXT2_I(inode)->i_block_group * EXT2_BLOCKS_PER_GROUP(sb);
++
++ /* How can we enforce the allocation? */
++ int block = ext2_new_block(inode, goal, 0, 0, errp);
++#ifdef OLD_QUOTAS
++ if (!*errp)
++ inode->i_blocks += inode->i_sb->s_blocksize >> 9;
++#endif
++ return block;
++}
++
++static inline int
++ext2_xattr_quota_alloc(struct inode *inode, int force)
++{
++ /* How can we enforce the allocation? */
++#ifdef OLD_QUOTAS
++ int error = DQUOT_ALLOC_BLOCK(inode->i_sb, inode, 1);
++ if (!error)
++ inode->i_blocks += inode->i_sb->s_blocksize >> 9;
++#else
++ int error = DQUOT_ALLOC_BLOCK(inode, 1);
++#endif
++ return error;
++}
++
++#ifdef OLD_QUOTAS
++
++static inline void
++ext2_xattr_quota_free(struct inode *inode)
++{
++ DQUOT_FREE_BLOCK(inode->i_sb, inode, 1);
++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9;
++}
++
++static inline void
++ext2_xattr_free_block(struct inode * inode, unsigned long block)
++{
++ ext2_free_blocks(inode, block, 1);
++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9;
++}
++
++#else
++# define ext2_xattr_quota_free(inode) \
++ DQUOT_FREE_BLOCK(inode, 1)
++# define ext2_xattr_free_block(inode, block) \
++ ext2_free_blocks(inode, block, 1)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
++
++static inline struct buffer_head *
++sb_bread(struct super_block *sb, int block)
++{
++ return bread(sb->s_dev, block, sb->s_blocksize);
++}
++
++static inline struct buffer_head *
++sb_getblk(struct super_block *sb, int block)
++{
++ return getblk(sb->s_dev, block, sb->s_blocksize);
++}
++
++#endif
++
++struct ext2_xattr_handler *ext2_xattr_handlers[EXT2_XATTR_INDEX_MAX];
++rwlock_t ext2_handler_lock = RW_LOCK_UNLOCKED;
++
++int
++ext2_xattr_register(int name_index, struct ext2_xattr_handler *handler)
++{
++ int error = -EINVAL;
++
++ if (name_index > 0 && name_index <= EXT2_XATTR_INDEX_MAX) {
++ write_lock(&ext2_handler_lock);
++ if (!ext2_xattr_handlers[name_index-1]) {
++ ext2_xattr_handlers[name_index-1] = handler;
++ error = 0;
++ }
++ write_unlock(&ext2_handler_lock);
++ }
++ return error;
++}
++
++void
++ext2_xattr_unregister(int name_index, struct ext2_xattr_handler *handler)
++{
++ if (name_index > 0 || name_index <= EXT2_XATTR_INDEX_MAX) {
++ write_lock(&ext2_handler_lock);
++ ext2_xattr_handlers[name_index-1] = NULL;
++ write_unlock(&ext2_handler_lock);
++ }
++}
++
++static inline const char *
++strcmp_prefix(const char *a, const char *a_prefix)
++{
++ while (*a_prefix && *a == *a_prefix) {
++ a++;
++ a_prefix++;
++ }
++ return *a_prefix ? NULL : a;
++}
++
++/*
++ * Decode the extended attribute name, and translate it into
++ * the name_index and name suffix.
++ */
++static struct ext2_xattr_handler *
++ext2_xattr_resolve_name(const char **name)
++{
++ struct ext2_xattr_handler *handler = NULL;
++ int i;
++
++ if (!*name)
++ return NULL;
++ read_lock(&ext2_handler_lock);
++ for (i=0; i<EXT2_XATTR_INDEX_MAX; i++) {
++ if (ext2_xattr_handlers[i]) {
++ const char *n = strcmp_prefix(*name,
++ ext2_xattr_handlers[i]->prefix);
++ if (n) {
++ handler = ext2_xattr_handlers[i];
++ *name = n;
++ break;
++ }
++ }
++ }
++ read_unlock(&ext2_handler_lock);
++ return handler;
++}
++
++static inline struct ext2_xattr_handler *
++ext2_xattr_handler(int name_index)
++{
++ struct ext2_xattr_handler *handler = NULL;
++ if (name_index > 0 && name_index <= EXT2_XATTR_INDEX_MAX) {
++ read_lock(&ext2_handler_lock);
++ handler = ext2_xattr_handlers[name_index-1];
++ read_unlock(&ext2_handler_lock);
++ }
++ return handler;
++}
++
++/*
++ * Inode operation getxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++ssize_t
++ext2_getxattr(struct dentry *dentry, const char *name,
++ void *buffer, size_t size)
++{
++ struct ext2_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ handler = ext2_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->get(inode, name, buffer, size);
++}
++
++/*
++ * Inode operation listxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++ssize_t
++ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
++{
++ return ext2_xattr_list(dentry->d_inode, buffer, size);
++}
++
++/*
++ * Inode operation setxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++int
++ext2_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ struct ext2_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ if (size == 0)
++ value = ""; /* empty EA, do not remove */
++ handler = ext2_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->set(inode, name, value, size, flags);
++}
++
++/*
++ * Inode operation removexattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++int
++ext2_removexattr(struct dentry *dentry, const char *name)
++{
++ struct ext2_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ handler = ext2_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
++}
++
++/*
++ * ext2_xattr_get()
++ *
++ * Copy an extended attribute into the buffer
++ * provided, or compute the buffer size required.
++ * Buffer is NULL to compute the size of the buffer required.
++ *
++ * Returns a negative error number on failure, or the number of bytes
++ * used / required on success.
++ */
++int
++ext2_xattr_get(struct inode *inode, int name_index, const char *name,
++ void *buffer, size_t buffer_size)
++{
++ struct buffer_head *bh = NULL;
++ struct ext2_xattr_entry *entry;
++ unsigned int block, size;
++ char *end;
++ int name_len, error;
++
++ ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
++ name_index, name, buffer, (long)buffer_size);
++
++ if (name == NULL)
++ return -EINVAL;
++ if (!EXT2_I(inode)->i_file_acl)
++ return -ENOATTR;
++ block = EXT2_I(inode)->i_file_acl;
++ ea_idebug(inode, "reading block %d", block);
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh)
++ return -EIO;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
++ end = bh->b_data + bh->b_size;
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* find named attribute */
++ name_len = strlen(name);
++
++ error = -ERANGE;
++ if (name_len > 255)
++ goto cleanup;
++ entry = FIRST_ENTRY(bh);
++ while (!IS_LAST_ENTRY(entry)) {
++ struct ext2_xattr_entry *next =
++ EXT2_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (name_index == entry->e_name_index &&
++ name_len == entry->e_name_len &&
++ memcmp(name, entry->e_name, name_len) == 0)
++ goto found;
++ entry = next;
++ }
++ /* Check the remaining name entries */
++ while (!IS_LAST_ENTRY(entry)) {
++ struct ext2_xattr_entry *next =
++ EXT2_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++ entry = next;
++ }
++ if (ext2_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ error = -ENOATTR;
++ goto cleanup;
++found:
++ /* check the buffer size */
++ if (entry->e_value_block != 0)
++ goto bad_block;
++ size = le32_to_cpu(entry->e_value_size);
++ if (size > inode->i_sb->s_blocksize ||
++ le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
++ goto bad_block;
++
++ if (ext2_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ if (buffer) {
++ error = -ERANGE;
++ if (size > buffer_size)
++ goto cleanup;
++ /* return value of attribute */
++ memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
++ size);
++ }
++ error = size;
++
++cleanup:
++ brelse(bh);
++
++ return error;
++}
++
++/*
++ * ext2_xattr_list()
++ *
++ * Copy a list of attribute names into the buffer
++ * provided, or compute the buffer size required.
++ * Buffer is NULL to compute the size of the buffer required.
++ *
++ * Returns a negative error number on failure, or the number of bytes
++ * used / required on success.
++ */
++int
++ext2_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
++{
++ struct buffer_head *bh = NULL;
++ struct ext2_xattr_entry *entry;
++ unsigned int block, size = 0;
++ char *buf, *end;
++ int error;
++
++ ea_idebug(inode, "buffer=%p, buffer_size=%ld",
++ buffer, (long)buffer_size);
++
++ if (!EXT2_I(inode)->i_file_acl)
++ return 0;
++ block = EXT2_I(inode)->i_file_acl;
++ ea_idebug(inode, "reading block %d", block);
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh)
++ return -EIO;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
++ end = bh->b_data + bh->b_size;
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* compute the size required for the list of attribute names */
++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
++ entry = EXT2_XATTR_NEXT(entry)) {
++ struct ext2_xattr_handler *handler;
++ struct ext2_xattr_entry *next =
++ EXT2_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++
++ handler = ext2_xattr_handler(entry->e_name_index);
++ if (handler)
++ size += handler->list(NULL, inode, entry->e_name,
++ entry->e_name_len);
++ }
++
++ if (ext2_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ if (!buffer) {
++ error = size;
++ goto cleanup;
++ } else {
++ error = -ERANGE;
++ if (size > buffer_size)
++ goto cleanup;
++ }
++
++ /* list the attribute names */
++ buf = buffer;
++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
++ entry = EXT2_XATTR_NEXT(entry)) {
++ struct ext2_xattr_handler *handler;
++
++ handler = ext2_xattr_handler(entry->e_name_index);
++ if (handler)
++ buf += handler->list(buf, inode, entry->e_name,
++ entry->e_name_len);
++ }
++ error = size;
++
++cleanup:
++ brelse(bh);
++
++ return error;
++}
++
++/*
++ * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
++ * not set, set it.
++ */
++static void ext2_xattr_update_super_block(struct super_block *sb)
++{
++ if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
++ return;
++
++ lock_super(sb);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
++ EXT2_SB(sb)->s_feature_compat |= EXT2_FEATURE_COMPAT_EXT_ATTR;
++#endif
++ EXT2_SB(sb)->s_es->s_feature_compat |=
++ cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR);
++ sb->s_dirt = 1;
++ mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
++ unlock_super(sb);
++}
++
++/*
++ * ext2_xattr_set()
++ *
++ * Create, replace or remove an extended attribute for this inode. Buffer
++ * is NULL to remove an existing extended attribute, and non-NULL to
++ * either replace an existing extended attribute, or create a new extended
++ * attribute. The flags XATTR_REPLACE and XATTR_CREATE
++ * specify that an extended attribute must exist and must not exist
++ * previous to the call, respectively.
++ *
++ * Returns 0, or a negative error number on failure.
++ */
++int
++ext2_xattr_set(struct inode *inode, int name_index, const char *name,
++ const void *value, size_t value_len, int flags)
++{
++ struct super_block *sb = inode->i_sb;
++ struct buffer_head *bh = NULL;
++ struct ext2_xattr_header *header = NULL;
++ struct ext2_xattr_entry *here, *last;
++ unsigned int name_len;
++ int block = EXT2_I(inode)->i_file_acl;
++ int min_offs = sb->s_blocksize, not_found = 1, free, error;
++ char *end;
++
++ /*
++ * header -- Points either into bh, or to a temporarily
++ * allocated buffer.
++ * here -- The named entry found, or the place for inserting, within
++ * the block pointed to by header.
++ * last -- Points right after the last named entry within the block
++ * pointed to by header.
++ * min_offs -- The offset of the first value (values are aligned
++ * towards the end of the block).
++ * end -- Points right after the block pointed to by header.
++ */
++
++ ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
++ name_index, name, value, (long)value_len);
++
++ if (IS_RDONLY(inode))
++ return -EROFS;
++ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
++ return -EPERM;
++ if (value == NULL)
++ value_len = 0;
++ if (name == NULL)
++ return -EINVAL;
++ name_len = strlen(name);
++ if (name_len > 255 || value_len > sb->s_blocksize)
++ return -ERANGE;
++ down(&ext2_xattr_sem);
++
++ if (block) {
++ /* The inode already has an extended attribute block. */
++
++ bh = sb_bread(sb, block);
++ error = -EIO;
++ if (!bh)
++ goto cleanup;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)),
++ le32_to_cpu(HDR(bh)->h_refcount));
++ header = HDR(bh);
++ end = bh->b_data + bh->b_size;
++ if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
++ header->h_blocks != cpu_to_le32(1)) {
++bad_block: ext2_error(sb, "ext2_xattr_set",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* Find the named attribute. */
++ here = FIRST_ENTRY(bh);
++ while (!IS_LAST_ENTRY(here)) {
++ struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (!here->e_value_block && here->e_value_size) {
++ int offs = le16_to_cpu(here->e_value_offs);
++ if (offs < min_offs)
++ min_offs = offs;
++ }
++ not_found = name_index - here->e_name_index;
++ if (!not_found)
++ not_found = name_len - here->e_name_len;
++ if (!not_found)
++ not_found = memcmp(name, here->e_name,name_len);
++ if (not_found <= 0)
++ break;
++ here = next;
++ }
++ last = here;
++ /* We still need to compute min_offs and last. */
++ while (!IS_LAST_ENTRY(last)) {
++ struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (!last->e_value_block && last->e_value_size) {
++ int offs = le16_to_cpu(last->e_value_offs);
++ if (offs < min_offs)
++ min_offs = offs;
++ }
++ last = next;
++ }
++
++ /* Check whether we have enough space left. */
++ free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
++ } else {
++ /* We will use a new extended attribute block. */
++ free = sb->s_blocksize -
++ sizeof(struct ext2_xattr_header) - sizeof(__u32);
++ here = last = NULL; /* avoid gcc uninitialized warning. */
++ }
++
++ if (not_found) {
++ /* Request to remove a nonexistent attribute? */
++ error = -ENOATTR;
++ if (flags & XATTR_REPLACE)
++ goto cleanup;
++ error = 0;
++ if (value == NULL)
++ goto cleanup;
++ else
++ free -= EXT2_XATTR_LEN(name_len);
++ } else {
++ /* Request to create an existing attribute? */
++ error = -EEXIST;
++ if (flags & XATTR_CREATE)
++ goto cleanup;
++ if (!here->e_value_block && here->e_value_size) {
++ unsigned int size = le32_to_cpu(here->e_value_size);
++
++ if (le16_to_cpu(here->e_value_offs) + size >
++ sb->s_blocksize || size > sb->s_blocksize)
++ goto bad_block;
++ free += EXT2_XATTR_SIZE(size);
++ }
++ }
++ free -= EXT2_XATTR_SIZE(value_len);
++ error = -ENOSPC;
++ if (free < 0)
++ goto cleanup;
++
++ /* Here we know that we can set the new attribute. */
++
++ if (header) {
++ if (header->h_refcount == cpu_to_le32(1)) {
++ ea_bdebug(bh, "modifying in-place");
++ ext2_xattr_cache_remove(bh);
++ } else {
++ int offset;
++
++ ea_bdebug(bh, "cloning");
++ header = kmalloc(bh->b_size, GFP_KERNEL);
++ error = -ENOMEM;
++ if (header == NULL)
++ goto cleanup;
++ memcpy(header, HDR(bh), bh->b_size);
++ header->h_refcount = cpu_to_le32(1);
++ offset = (char *)header - bh->b_data;
++ here = ENTRY((char *)here + offset);
++ last = ENTRY((char *)last + offset);
++ }
++ } else {
++ /* Allocate a buffer where we construct the new block. */
++ header = kmalloc(sb->s_blocksize, GFP_KERNEL);
++ error = -ENOMEM;
++ if (header == NULL)
++ goto cleanup;
++ memset(header, 0, sb->s_blocksize);
++ end = (char *)header + sb->s_blocksize;
++ header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
++ header->h_blocks = header->h_refcount = cpu_to_le32(1);
++ last = here = ENTRY(header+1);
++ }
++
++ if (not_found) {
++ /* Insert the new name. */
++ int size = EXT2_XATTR_LEN(name_len);
++ int rest = (char *)last - (char *)here;
++ memmove((char *)here + size, here, rest);
++ memset(here, 0, size);
++ here->e_name_index = name_index;
++ here->e_name_len = name_len;
++ memcpy(here->e_name, name, name_len);
++ } else {
++ /* Remove the old value. */
++ if (!here->e_value_block && here->e_value_size) {
++ char *first_val = (char *)header + min_offs;
++ int offs = le16_to_cpu(here->e_value_offs);
++ char *val = (char *)header + offs;
++ size_t size = EXT2_XATTR_SIZE(
++ le32_to_cpu(here->e_value_size));
++ memmove(first_val + size, first_val, val - first_val);
++ memset(first_val, 0, size);
++ here->e_value_offs = 0;
++ min_offs += size;
++
++ /* Adjust all value offsets. */
++ last = ENTRY(header+1);
++ while (!IS_LAST_ENTRY(last)) {
++ int o = le16_to_cpu(last->e_value_offs);
++ if (!last->e_value_block && o < offs)
++ last->e_value_offs =
++ cpu_to_le16(o + size);
++ last = EXT2_XATTR_NEXT(last);
++ }
++ }
++ if (value == NULL) {
++ /* Remove this attribute. */
++ if (EXT2_XATTR_NEXT(ENTRY(header+1)) == last) {
++ /* This block is now empty. */
++ error = ext2_xattr_set2(inode, bh, NULL);
++ goto cleanup;
++ } else {
++ /* Remove the old name. */
++ int size = EXT2_XATTR_LEN(name_len);
++ last = ENTRY((char *)last - size);
++ memmove(here, (char*)here + size,
++ (char*)last - (char*)here);
++ memset(last, 0, size);
++ }
++ }
++ }
++
++ if (value != NULL) {
++ /* Insert the new value. */
++ here->e_value_size = cpu_to_le32(value_len);
++ if (value_len) {
++ size_t size = EXT2_XATTR_SIZE(value_len);
++ char *val = (char *)header + min_offs - size;
++ here->e_value_offs =
++ cpu_to_le16((char *)val - (char *)header);
++ memset(val + size - EXT2_XATTR_PAD, 0,
++ EXT2_XATTR_PAD); /* Clear the pad bytes. */
++ memcpy(val, value, value_len);
++ }
++ }
++ ext2_xattr_rehash(header, here);
++
++ error = ext2_xattr_set2(inode, bh, header);
++
++cleanup:
++ brelse(bh);
++ if (!(bh && header == HDR(bh)))
++ kfree(header);
++ up(&ext2_xattr_sem);
++
++ return error;
++}
++
++/*
++ * Second half of ext2_xattr_set(): Update the file system.
++ */
++static int
++ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
++ struct ext2_xattr_header *header)
++{
++ struct super_block *sb = inode->i_sb;
++ struct buffer_head *new_bh = NULL;
++ int error;
++
++ if (header) {
++ new_bh = ext2_xattr_cache_find(inode, header);
++ if (new_bh) {
++ /*
++ * We found an identical block in the cache.
++ * The old block will be released after updating
++ * the inode.
++ */
++ ea_bdebug(old_bh, "reusing block %ld",
++ new_bh->b_blocknr);
++
++ error = -EDQUOT;
++ if (ext2_xattr_quota_alloc(inode, 1))
++ goto cleanup;
++
++ HDR(new_bh)->h_refcount = cpu_to_le32(
++ le32_to_cpu(HDR(new_bh)->h_refcount) + 1);
++ ea_bdebug(new_bh, "refcount now=%d",
++ le32_to_cpu(HDR(new_bh)->h_refcount));
++ } else if (old_bh && header == HDR(old_bh)) {
++ /* Keep this block. */
++ new_bh = old_bh;
++ ext2_xattr_cache_insert(new_bh);
++ } else {
++ /* We need to allocate a new block */
++ int force = EXT2_I(inode)->i_file_acl != 0;
++ int block = ext2_xattr_new_block(inode, &error, force);
++ if (error)
++ goto cleanup;
++ ea_idebug(inode, "creating block %d", block);
++
++ new_bh = sb_getblk(sb, block);
++ if (!new_bh) {
++ ext2_xattr_free_block(inode, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ lock_buffer(new_bh);
++ memcpy(new_bh->b_data, header, new_bh->b_size);
++ mark_buffer_uptodate(new_bh, 1);
++ unlock_buffer(new_bh);
++ ext2_xattr_cache_insert(new_bh);
++
++ ext2_xattr_update_super_block(sb);
++ }
++ mark_buffer_dirty(new_bh);
++ if (IS_SYNC(inode)) {
++ ll_rw_block(WRITE, 1, &new_bh);
++ wait_on_buffer(new_bh);
++ error = -EIO;
++ if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
++ goto cleanup;
++ }
++ }
++
++ /* Update the inode. */
++ EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
++ inode->i_ctime = CURRENT_TIME;
++ if (IS_SYNC(inode)) {
++ error = ext2_sync_inode (inode);
++ if (error)
++ goto cleanup;
++ } else
++ mark_inode_dirty(inode);
++
++ error = 0;
++ if (old_bh && old_bh != new_bh) {
++ /*
++ * If there was an old block, and we are not still using it,
++ * we now release the old block.
++ */
++ unsigned int refcount = le32_to_cpu(HDR(old_bh)->h_refcount);
++
++ if (refcount == 1) {
++ /* Free the old block. */
++ ea_bdebug(old_bh, "freeing");
++ ext2_xattr_free_block(inode, old_bh->b_blocknr);
++ mark_buffer_clean(old_bh);
++ } else {
++ /* Decrement the refcount only. */
++ refcount--;
++ HDR(old_bh)->h_refcount = cpu_to_le32(refcount);
++ ext2_xattr_quota_free(inode);
++ mark_buffer_dirty(old_bh);
++ ea_bdebug(old_bh, "refcount now=%d", refcount);
++ }
++ }
++
++cleanup:
++ if (old_bh != new_bh)
++ brelse(new_bh);
++
++ return error;
++}
++
++/*
++ * ext2_xattr_delete_inode()
++ *
++ * Free extended attribute resources associated with this inode. This
++ * is called immediately before an inode is freed.
++ */
++void
++ext2_xattr_delete_inode(struct inode *inode)
++{
++ struct buffer_head *bh;
++ unsigned int block = EXT2_I(inode)->i_file_acl;
++
++ if (!block)
++ return;
++ down(&ext2_xattr_sem);
++
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh) {
++ ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
++ "inode %ld: block %d read error", inode->i_ino, block);
++ goto cleanup;
++ }
++ ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++ ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ goto cleanup;
++ }
++ ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
++ if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
++ ext2_xattr_cache_remove(bh);
++ ext2_xattr_free_block(inode, block);
++ bforget(bh);
++ bh = NULL;
++ } else {
++ HDR(bh)->h_refcount = cpu_to_le32(
++ le32_to_cpu(HDR(bh)->h_refcount) - 1);
++ mark_buffer_dirty(bh);
++ if (IS_SYNC(inode)) {
++ ll_rw_block(WRITE, 1, &bh);
++ wait_on_buffer(bh);
++ }
++ ext2_xattr_quota_free(inode);
++ }
++ EXT2_I(inode)->i_file_acl = 0;
++
++cleanup:
++ brelse(bh);
++ up(&ext2_xattr_sem);
++}
++
++/*
++ * ext2_xattr_put_super()
++ *
++ * This is called when a file system is unmounted.
++ */
++void
++ext2_xattr_put_super(struct super_block *sb)
++{
++#ifdef CONFIG_EXT2_FS_XATTR_SHARING
++ mb_cache_shrink(ext2_xattr_cache, sb->s_dev);
++#endif
++}
++
++#ifdef CONFIG_EXT2_FS_XATTR_SHARING
++
++/*
++ * ext2_xattr_cache_insert()
++ *
++ * Create a new entry in the extended attribute cache, and insert
++ * it unless such an entry is already in the cache.
++ *
++ * Returns 0, or a negative error number on failure.
++ */
++static int
++ext2_xattr_cache_insert(struct buffer_head *bh)
++{
++ __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
++ struct mb_cache_entry *ce;
++ int error;
++
++ ce = mb_cache_entry_alloc(ext2_xattr_cache);
++ if (!ce)
++ return -ENOMEM;
++ error = mb_cache_entry_insert(ce, bh->b_dev, bh->b_blocknr, &hash);
++ if (error) {
++ mb_cache_entry_free(ce);
++ if (error == -EBUSY) {
++ ea_bdebug(bh, "already in cache (%d cache entries)",
++ atomic_read(&ext2_xattr_cache->c_entry_count));
++ error = 0;
++ }
++ } else {
++ ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,
++ atomic_read(&ext2_xattr_cache->c_entry_count));
++ mb_cache_entry_release(ce);
++ }
++ return error;
++}
++
++/*
++ * ext2_xattr_cmp()
++ *
++ * Compare two extended attribute blocks for equality.
++ *
++ * Returns 0 if the blocks are equal, 1 if they differ, and
++ * a negative error number on errors.
++ */
++static int
++ext2_xattr_cmp(struct ext2_xattr_header *header1,
++ struct ext2_xattr_header *header2)
++{
++ struct ext2_xattr_entry *entry1, *entry2;
++
++ entry1 = ENTRY(header1+1);
++ entry2 = ENTRY(header2+1);
++ while (!IS_LAST_ENTRY(entry1)) {
++ if (IS_LAST_ENTRY(entry2))
++ return 1;
++ if (entry1->e_hash != entry2->e_hash ||
++ entry1->e_name_len != entry2->e_name_len ||
++ entry1->e_value_size != entry2->e_value_size ||
++ memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
++ return 1;
++ if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
++ return -EIO;
++ if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
++ (char *)header2 + le16_to_cpu(entry2->e_value_offs),
++ le32_to_cpu(entry1->e_value_size)))
++ return 1;
++
++ entry1 = EXT2_XATTR_NEXT(entry1);
++ entry2 = EXT2_XATTR_NEXT(entry2);
++ }
++ if (!IS_LAST_ENTRY(entry2))
++ return 1;
++ return 0;
++}
++
++/*
++ * ext2_xattr_cache_find()
++ *
++ * Find an identical extended attribute block.
++ *
++ * Returns a pointer to the block found, or NULL if such a block was
++ * not found or an error occurred.
++ */
++static struct buffer_head *
++ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
++{
++ __u32 hash = le32_to_cpu(header->h_hash);
++ struct mb_cache_entry *ce;
++
++ if (!header->h_hash)
++ return NULL; /* never share */
++ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
++ ce = mb_cache_entry_find_first(ext2_xattr_cache, 0, inode->i_dev, hash);
++ while (ce) {
++ struct buffer_head *bh = sb_bread(inode->i_sb, ce->e_block);
++
++ if (!bh) {
++ ext2_error(inode->i_sb, "ext2_xattr_cache_find",
++ "inode %ld: block %ld read error",
++ inode->i_ino, ce->e_block);
++ } else if (le32_to_cpu(HDR(bh)->h_refcount) >
++ EXT2_XATTR_REFCOUNT_MAX) {
++ ea_idebug(inode, "block %ld refcount %d>%d",ce->e_block,
++ le32_to_cpu(HDR(bh)->h_refcount),
++ EXT2_XATTR_REFCOUNT_MAX);
++ } else if (!ext2_xattr_cmp(header, HDR(bh))) {
++ ea_bdebug(bh, "b_count=%d",atomic_read(&(bh->b_count)));
++ mb_cache_entry_release(ce);
++ return bh;
++ }
++ brelse(bh);
++ ce = mb_cache_entry_find_next(ce, 0, inode->i_dev, hash);
++ }
++ return NULL;
++}
++
++/*
++ * ext2_xattr_cache_remove()
++ *
++ * Remove the cache entry of a block from the cache. Called when a
++ * block becomes invalid.
++ */
++static void
++ext2_xattr_cache_remove(struct buffer_head *bh)
++{
++ struct mb_cache_entry *ce;
++
++ ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_dev, bh->b_blocknr);
++ if (ce) {
++ ea_bdebug(bh, "removing (%d cache entries remaining)",
++ atomic_read(&ext2_xattr_cache->c_entry_count)-1);
++ mb_cache_entry_free(ce);
++ } else
++ ea_bdebug(bh, "no cache entry");
++}
++
++#define NAME_HASH_SHIFT 5
++#define VALUE_HASH_SHIFT 16
++
++/*
++ * ext2_xattr_hash_entry()
++ *
++ * Compute the hash of an extended attribute.
++ */
++static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
++ struct ext2_xattr_entry *entry)
++{
++ __u32 hash = 0;
++ char *name = entry->e_name;
++ int n;
++
++ for (n=0; n < entry->e_name_len; n++) {
++ hash = (hash << NAME_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
++ *name++;
++ }
++
++ if (entry->e_value_block == 0 && entry->e_value_size != 0) {
++ __u32 *value = (__u32 *)((char *)header +
++ le16_to_cpu(entry->e_value_offs));
++ for (n = (le32_to_cpu(entry->e_value_size) +
++ EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
++ hash = (hash << VALUE_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
++ le32_to_cpu(*value++);
++ }
++ }
++ entry->e_hash = cpu_to_le32(hash);
++}
++
++#undef NAME_HASH_SHIFT
++#undef VALUE_HASH_SHIFT
++
++#define BLOCK_HASH_SHIFT 16
++
++/*
++ * ext2_xattr_rehash()
++ *
++ * Re-compute the extended attribute hash value after an entry has changed.
++ */
++static void ext2_xattr_rehash(struct ext2_xattr_header *header,
++ struct ext2_xattr_entry *entry)
++{
++ struct ext2_xattr_entry *here;
++ __u32 hash = 0;
++
++ ext2_xattr_hash_entry(header, entry);
++ here = ENTRY(header+1);
++ while (!IS_LAST_ENTRY(here)) {
++ if (!here->e_hash) {
++ /* Block is not shared if an entry's hash value == 0 */
++ hash = 0;
++ break;
++ }
++ hash = (hash << BLOCK_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
++ le32_to_cpu(here->e_hash);
++ here = EXT2_XATTR_NEXT(here);
++ }
++ header->h_hash = cpu_to_le32(hash);
++}
++
++#undef BLOCK_HASH_SHIFT
++
++int __init
++init_ext2_xattr(void)
++{
++ ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL,
++ sizeof(struct mb_cache_entry) +
++ sizeof(struct mb_cache_entry_index), 1, 61);
++ if (!ext2_xattr_cache)
++ return -ENOMEM;
++
++ return 0;
++}
++
++void
++exit_ext2_xattr(void)
++{
++ mb_cache_destroy(ext2_xattr_cache);
++}
++
++#else /* CONFIG_EXT2_FS_XATTR_SHARING */
++
++int __init
++init_ext2_xattr(void)
++{
++ return 0;
++}
++
++void
++exit_ext2_xattr(void)
++{
++}
++
++#endif /* CONFIG_EXT2_FS_XATTR_SHARING */
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/fs/ext2/xattr_user.c Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,103 @@
++/*
++ * linux/fs/ext2/xattr_user.c
++ * Handler for extended user attributes.
++ *
++ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
++ */
++
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <linux/ext2_fs.h>
++#include <linux/ext2_xattr.h>
++
++#ifdef CONFIG_EXT2_FS_POSIX_ACL
++# include <linux/ext2_acl.h>
++#endif
++
++#define XATTR_USER_PREFIX "user."
++
++static size_t
++ext2_xattr_user_list(char *list, struct inode *inode,
++ const char *name, int name_len)
++{
++ const int prefix_len = sizeof(XATTR_USER_PREFIX)-1;
++
++ if (!test_opt(inode->i_sb, XATTR_USER))
++ return 0;
++
++ if (list) {
++ memcpy(list, XATTR_USER_PREFIX, prefix_len);
++ memcpy(list+prefix_len, name, name_len);
++ list[prefix_len + name_len] = '\0';
++ }
++ return prefix_len + name_len + 1;
++}
++
++static int
++ext2_xattr_user_get(struct inode *inode, const char *name,
++ void *buffer, size_t size)
++{
++ int error;
++
++ if (strcmp(name, "") == 0)
++ return -EINVAL;
++ if (!test_opt(inode->i_sb, XATTR_USER))
++ return -ENOTSUP;
++#ifdef CONFIG_EXT2_FS_POSIX_ACL
++ error = ext2_permission_locked(inode, MAY_READ);
++#else
++ error = permission(inode, MAY_READ);
++#endif
++ if (error)
++ return error;
++
++ return ext2_xattr_get(inode, EXT2_XATTR_INDEX_USER, name,
++ buffer, size);
++}
++
++static int
++ext2_xattr_user_set(struct inode *inode, const char *name,
++ const void *value, size_t size, int flags)
++{
++ int error;
++
++ if (strcmp(name, "") == 0)
++ return -EINVAL;
++ if (!test_opt(inode->i_sb, XATTR_USER))
++ return -ENOTSUP;
++ if ( !S_ISREG(inode->i_mode) &&
++ (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
++ return -EPERM;
++#ifdef CONFIG_EXT2_FS_POSIX_ACL
++ error = ext2_permission_locked(inode, MAY_WRITE);
++#else
++ error = permission(inode, MAY_WRITE);
++#endif
++ if (error)
++ return error;
++
++ return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER, name,
++ value, size, flags);
++}
++
++struct ext2_xattr_handler ext2_xattr_user_handler = {
++ prefix: XATTR_USER_PREFIX,
++ list: ext2_xattr_user_list,
++ get: ext2_xattr_user_get,
++ set: ext2_xattr_user_set,
++};
++
++int __init
++init_ext2_xattr_user(void)
++{
++ return ext2_xattr_register(EXT2_XATTR_INDEX_USER,
++ &ext2_xattr_user_handler);
++}
++
++void
++exit_ext2_xattr_user(void)
++{
++ ext2_xattr_unregister(EXT2_XATTR_INDEX_USER,
++ &ext2_xattr_user_handler);
++}
+--- linux-2.4.20/fs/ext3/Makefile~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext3/Makefile Sat Apr 5 03:57:36 2003
+@@ -1,5 +1,5 @@
+ #
+-# Makefile for the linux ext2-filesystem routines.
++# Makefile for the linux ext3-filesystem routines.
+ #
+ # Note! Dependencies are done automagically by 'make dep', which also
+ # removes any old dependencies. DON'T put your own dependencies here
+@@ -9,10 +9,14 @@
+
+ O_TARGET := ext3.o
+
+-export-objs := super.o inode.o
++export-objs := super.o inode.o xattr.o
+
+ obj-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+- ioctl.o namei.o super.o symlink.o hash.o
++ ioctl.o namei.o super.o symlink.o hash.o xattr.o
+ obj-m := $(O_TARGET)
+
++export-objs += xattr.o
++obj-$(CONFIG_EXT3_FS_XATTR) += xattr.o
++obj-$(CONFIG_EXT3_FS_XATTR_USER) += xattr_user.o
++
+ include $(TOPDIR)/Rules.make
+--- linux-2.4.20/fs/ext3/file.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext3/file.c Sat Apr 5 03:57:18 2003
+@@ -23,6 +23,7 @@
+ #include <linux/locks.h>
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
++#include <linux/ext3_xattr.h>
+ #include <linux/ext3_jbd.h>
+ #include <linux/smp_lock.h>
+
+@@ -126,5 +127,9 @@ struct file_operations ext3_file_operati
+ struct inode_operations ext3_file_inode_operations = {
+ truncate: ext3_truncate, /* BKL held */
+ setattr: ext3_setattr, /* BKL held */
++ setxattr: ext3_setxattr, /* BKL held */
++ getxattr: ext3_getxattr, /* BKL held */
++ listxattr: ext3_listxattr, /* BKL held */
++ removexattr: ext3_removexattr, /* BKL held */
+ };
+
+--- linux-2.4.20/fs/ext3/ialloc.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext3/ialloc.c Sat Apr 5 03:57:18 2003
+@@ -17,6 +17,7 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/ext3_jbd.h>
++#include <linux/ext3_xattr.h>
+ #include <linux/stat.h>
+ #include <linux/string.h>
+ #include <linux/locks.h>
+@@ -216,6 +217,7 @@ void ext3_free_inode (handle_t *handle,
+ * as writing the quota to disk may need the lock as well.
+ */
+ DQUOT_INIT(inode);
++ ext3_xattr_delete_inode(handle, inode);
+ DQUOT_FREE_INODE(inode);
+ DQUOT_DROP(inode);
+
+--- linux-2.4.20/fs/ext3/inode.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext3/inode.c Sat Apr 5 03:57:18 2003
+@@ -39,6 +39,18 @@
+ */
+ #undef SEARCH_FROM_ZERO
+
++/*
++ * Test whether an inode is a fast symlink.
++ */
++static inline int ext3_inode_is_fast_symlink(struct inode *inode)
++{
++ int ea_blocks = inode->u.ext3_i.i_file_acl ?
++ (inode->i_sb->s_blocksize >> 9) : 0;
++
++ return (S_ISLNK(inode->i_mode) &&
++ inode->i_blocks - ea_blocks == 0);
++}
++
+ /* The ext3 forget function must perform a revoke if we are freeing data
+ * which has been journaled. Metadata (eg. indirect blocks) must be
+ * revoked in all cases.
+@@ -48,7 +60,7 @@
+ * still needs to be revoked.
+ */
+
+-static int ext3_forget(handle_t *handle, int is_metadata,
++int ext3_forget(handle_t *handle, int is_metadata,
+ struct inode *inode, struct buffer_head *bh,
+ int blocknr)
+ {
+@@ -164,9 +176,7 @@ void ext3_delete_inode (struct inode * i
+ {
+ handle_t *handle;
+
+- if (is_bad_inode(inode) ||
+- inode->i_ino == EXT3_ACL_IDX_INO ||
+- inode->i_ino == EXT3_ACL_DATA_INO)
++ if (is_bad_inode(inode))
+ goto no_delete;
+
+ lock_kernel();
+@@ -1855,6 +1865,8 @@ void ext3_truncate(struct inode * inode)
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
+ return;
++ if (ext3_inode_is_fast_symlink(inode))
++ return;
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ return;
+
+@@ -2002,8 +2014,6 @@ int ext3_get_inode_loc (struct inode *in
+ struct ext3_group_desc * gdp;
+
+ if ((inode->i_ino != EXT3_ROOT_INO &&
+- inode->i_ino != EXT3_ACL_IDX_INO &&
+- inode->i_ino != EXT3_ACL_DATA_INO &&
+ inode->i_ino != EXT3_JOURNAL_INO &&
+ inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) ||
+ inode->i_ino > le32_to_cpu(
+@@ -2130,10 +2140,7 @@ void ext3_read_inode(struct inode * inod
+
+ brelse (iloc.bh);
+
+- if (inode->i_ino == EXT3_ACL_IDX_INO ||
+- inode->i_ino == EXT3_ACL_DATA_INO)
+- /* Nothing to do */ ;
+- else if (S_ISREG(inode->i_mode)) {
++ if (S_ISREG(inode->i_mode)) {
+ inode->i_op = &ext3_file_inode_operations;
+ inode->i_fop = &ext3_file_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+@@ -2141,15 +2148,17 @@ void ext3_read_inode(struct inode * inod
+ inode->i_op = &ext3_dir_inode_operations;
+ inode->i_fop = &ext3_dir_operations;
+ } else if (S_ISLNK(inode->i_mode)) {
+- if (!inode->i_blocks)
++ if (ext3_inode_is_fast_symlink(inode))
+ inode->i_op = &ext3_fast_symlink_inode_operations;
+ else {
+- inode->i_op = &page_symlink_inode_operations;
++ inode->i_op = &ext3_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+ }
+- } else
++ } else {
++ inode->i_op = &ext3_special_inode_operations;
+ init_special_inode(inode, inode->i_mode,
+ le32_to_cpu(iloc.raw_inode->i_block[0]));
++ }
+ /* inode->i_attr_flags = 0; unused */
+ if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL) {
+ /* inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS; unused */
+--- linux-2.4.20/fs/ext3/namei.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext3/namei.c Sat Apr 5 03:57:18 2003
+@@ -29,6 +29,7 @@
+ #include <linux/sched.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/ext3_jbd.h>
++#include <linux/ext3_xattr.h>
+ #include <linux/fcntl.h>
+ #include <linux/stat.h>
+ #include <linux/string.h>
+@@ -1611,7 +1612,7 @@ static int ext3_mkdir(struct inode * dir
+ if (IS_SYNC(dir))
+ handle->h_sync = 1;
+
+- inode = ext3_new_inode (handle, dir, S_IFDIR);
++ inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_stop;
+@@ -1619,7 +1620,6 @@ static int ext3_mkdir(struct inode * dir
+ inode->i_op = &ext3_dir_inode_operations;
+ inode->i_fop = &ext3_dir_operations;
+ inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+- inode->i_blocks = 0;
+ dir_block = ext3_bread (handle, inode, 0, 1, &err);
+ if (!dir_block) {
+ inode->i_nlink--; /* is this nlink == 0? */
+@@ -1646,9 +1646,6 @@ static int ext3_mkdir(struct inode * dir
+ BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
+ ext3_journal_dirty_metadata(handle, dir_block);
+ brelse (dir_block);
+- inode->i_mode = S_IFDIR | mode;
+- if (dir->i_mode & S_ISGID)
+- inode->i_mode |= S_ISGID;
+ ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_entry (handle, dentry, inode);
+ if (err) {
+@@ -2017,7 +2014,7 @@ static int ext3_symlink (struct inode *
+ goto out_stop;
+
+ if (l > sizeof (EXT3_I(inode)->i_data)) {
+- inode->i_op = &page_symlink_inode_operations;
++ inode->i_op = &ext3_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+ /*
+ * block_symlink() calls back into ext3_prepare/commit_write.
+@@ -2244,4 +2241,16 @@ struct inode_operations ext3_dir_inode_o
+ rmdir: ext3_rmdir, /* BKL held */
+ mknod: ext3_mknod, /* BKL held */
+ rename: ext3_rename, /* BKL held */
++ setxattr: ext3_setxattr, /* BKL held */
++ getxattr: ext3_getxattr, /* BKL held */
++ listxattr: ext3_listxattr, /* BKL held */
++ removexattr: ext3_removexattr, /* BKL held */
+ };
++
++struct inode_operations ext3_special_inode_operations = {
++ setxattr: ext3_setxattr, /* BKL held */
++ getxattr: ext3_getxattr, /* BKL held */
++ listxattr: ext3_listxattr, /* BKL held */
++ removexattr: ext3_removexattr, /* BKL held */
++};
++
+--- linux-2.4.20/fs/ext3/super.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext3/super.c Sat Apr 5 03:57:18 2003
+@@ -24,6 +24,7 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/ext3_jbd.h>
++#include <linux/ext3_xattr.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/locks.h>
+@@ -404,6 +405,7 @@ void ext3_put_super (struct super_block
+ kdev_t j_dev = sbi->s_journal->j_dev;
+ int i;
+
++ ext3_xattr_put_super(sb);
+ journal_destroy(sbi->s_journal);
+ if (!(sb->s_flags & MS_RDONLY)) {
+ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+@@ -499,6 +501,7 @@ static int parse_options (char * options
+ int is_remount)
+ {
+ unsigned long *mount_options = &sbi->s_mount_opt;
++
+ uid_t *resuid = &sbi->s_resuid;
+ gid_t *resgid = &sbi->s_resgid;
+ char * this_char;
+@@ -511,6 +514,13 @@ static int parse_options (char * options
+ this_char = strtok (NULL, ",")) {
+ if ((value = strchr (this_char, '=')) != NULL)
+ *value++ = 0;
++#ifdef CONFIG_EXT3_FS_XATTR_USER
++ if (!strcmp (this_char, "user_xattr"))
++ set_opt (*mount_options, XATTR_USER);
++ else if (!strcmp (this_char, "nouser_xattr"))
++ clear_opt (*mount_options, XATTR_USER);
++ else
++#endif
+ if (!strcmp (this_char, "bsddf"))
+ clear_opt (*mount_options, MINIX_DF);
+ else if (!strcmp (this_char, "nouid32")) {
+@@ -928,6 +938,12 @@ struct super_block * ext3_read_super (st
+ sbi->s_mount_opt = 0;
+ sbi->s_resuid = EXT3_DEF_RESUID;
+ sbi->s_resgid = EXT3_DEF_RESGID;
++
++ /* Default extended attribute flags */
++#ifdef CONFIG_EXT3_FS_XATTR_USER
++ /* set_opt(sbi->s_mount_opt, XATTR_USER); */
++#endif
++
+ if (!parse_options ((char *) data, &sb_block, sbi, &journal_inum, 0)) {
+ sb->s_dev = 0;
+ goto out_fail;
+@@ -1767,12 +1783,27 @@ static DECLARE_FSTYPE_DEV(ext3_fs_type,
+
+ static int __init init_ext3_fs(void)
+ {
+- return register_filesystem(&ext3_fs_type);
++ int error = init_ext3_xattr();
++ if (error)
++ return error;
++ error = init_ext3_xattr_user();
++ if (error)
++ goto fail;
++ error = register_filesystem(&ext3_fs_type);
++ if (!error)
++ return 0;
++
++ exit_ext3_xattr_user();
++fail:
++ exit_ext3_xattr();
++ return error;
+ }
+
+ static void __exit exit_ext3_fs(void)
+ {
+ unregister_filesystem(&ext3_fs_type);
++ exit_ext3_xattr_user();
++ exit_ext3_xattr();
+ }
+
+ EXPORT_SYMBOL(ext3_force_commit);
+--- linux-2.4.20/fs/ext3/symlink.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/ext3/symlink.c Sat Apr 5 03:57:18 2003
+@@ -20,6 +20,7 @@
+ #include <linux/fs.h>
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
++#include <linux/ext3_xattr.h>
+
+ static int ext3_readlink(struct dentry *dentry, char *buffer, int buflen)
+ {
+@@ -33,7 +34,20 @@ static int ext3_follow_link(struct dentr
+ return vfs_follow_link(nd, s);
+ }
+
++struct inode_operations ext3_symlink_inode_operations = {
++ readlink: page_readlink, /* BKL not held. Don't need */
++ follow_link: page_follow_link, /* BKL not held. Don't need */
++ setxattr: ext3_setxattr, /* BKL held */
++ getxattr: ext3_getxattr, /* BKL held */
++ listxattr: ext3_listxattr, /* BKL held */
++ removexattr: ext3_removexattr, /* BKL held */
++};
++
+ struct inode_operations ext3_fast_symlink_inode_operations = {
+ readlink: ext3_readlink, /* BKL not held. Don't need */
+ follow_link: ext3_follow_link, /* BKL not held. Don't need */
++ setxattr: ext3_setxattr, /* BKL held */
++ getxattr: ext3_getxattr, /* BKL held */
++ listxattr: ext3_listxattr, /* BKL held */
++ removexattr: ext3_removexattr, /* BKL held */
+ };
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/fs/ext3/xattr.c Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,1224 @@
++/*
++ * linux/fs/ext3/xattr.c
++ *
++ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
++ *
++ * Fix by Harrison Xing <harrison@mountainviewdata.com>.
++ * Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>.
++ * Extended attributes for symlinks and special files added per
++ * suggestion of Luka Renko <luka.renko@hermes.si>.
++ */
++
++/*
++ * Extended attributes are stored on disk blocks allocated outside of
++ * any inode. The i_file_acl field is then made to point to this allocated
++ * block. If all extended attributes of an inode are identical, these
++ * inodes may share the same extended attribute block. Such situations
++ * are automatically detected by keeping a cache of recent attribute block
++ * numbers and hashes over the block's contents in memory.
++ *
++ *
++ * Extended attribute block layout:
++ *
++ * +------------------+
++ * | header |
++ * | entry 1 | |
++ * | entry 2 | | growing downwards
++ * | entry 3 | v
++ * | four null bytes |
++ * | . . . |
++ * | value 1 | ^
++ * | value 3 | | growing upwards
++ * | value 2 | |
++ * +------------------+
++ *
++ * The block header is followed by multiple entry descriptors. These entry
++ * descriptors are variable in size, and alligned to EXT3_XATTR_PAD
++ * byte boundaries. The entry descriptors are sorted by attribute name,
++ * so that two extended attribute blocks can be compared efficiently.
++ *
++ * Attribute values are aligned to the end of the block, stored in
++ * no specific order. They are also padded to EXT3_XATTR_PAD byte
++ * boundaries. No additional gaps are left between them.
++ *
++ * Locking strategy
++ * ----------------
++ * The VFS already holds the BKL and the inode->i_sem semaphore when any of
++ * the xattr inode operations are called, so we are guaranteed that only one
++ * processes accesses extended attributes of an inode at any time.
++ *
++ * For writing we also grab the ext3_xattr_sem semaphore. This ensures that
++ * only a single process is modifying an extended attribute block, even
++ * if the block is shared among inodes.
++ *
++ * Note for porting to 2.5
++ * -----------------------
++ * The BKL will no longer be held in the xattr inode operations.
++ */
++
++#include <linux/fs.h>
++#include <linux/locks.h>
++#include <linux/slab.h>
++#include <linux/ext3_jbd.h>
++#include <linux/ext3_fs.h>
++#include <linux/ext3_xattr.h>
++#include <linux/mbcache.h>
++#include <linux/quotaops.h>
++#include <asm/semaphore.h>
++#include <linux/compatmac.h>
++
++#define EXT3_EA_USER "user."
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
++# define mark_buffer_dirty(bh) mark_buffer_dirty(bh, 1)
++#endif
++
++#define HDR(bh) ((struct ext3_xattr_header *)((bh)->b_data))
++#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr))
++#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
++#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
++
++#ifdef EXT3_XATTR_DEBUG
++# define ea_idebug(inode, f...) do { \
++ printk(KERN_DEBUG "inode %s:%ld: ", \
++ kdevname(inode->i_dev), inode->i_ino); \
++ printk(f); \
++ printk("\n"); \
++ } while (0)
++# define ea_bdebug(bh, f...) do { \
++ printk(KERN_DEBUG "block %s:%ld: ", \
++ kdevname(bh->b_dev), bh->b_blocknr); \
++ printk(f); \
++ printk("\n"); \
++ } while (0)
++#else
++# define ea_idebug(f...)
++# define ea_bdebug(f...)
++#endif
++
++static int ext3_xattr_set2(handle_t *, struct inode *, struct buffer_head *,
++ struct ext3_xattr_header *);
++
++#ifdef CONFIG_EXT3_FS_XATTR_SHARING
++
++static int ext3_xattr_cache_insert(struct buffer_head *);
++static struct buffer_head *ext3_xattr_cache_find(struct inode *,
++ struct ext3_xattr_header *);
++static void ext3_xattr_cache_remove(struct buffer_head *);
++static void ext3_xattr_rehash(struct ext3_xattr_header *,
++ struct ext3_xattr_entry *);
++
++static struct mb_cache *ext3_xattr_cache;
++
++#else
++# define ext3_xattr_cache_insert(bh) 0
++# define ext3_xattr_cache_find(inode, header) NULL
++# define ext3_xattr_cache_remove(bh) while(0) {}
++# define ext3_xattr_rehash(header, entry) while(0) {}
++#endif
++
++/*
++ * If a file system does not share extended attributes among inodes,
++ * we should not need the ext3_xattr_sem semaphore. However, the
++ * filesystem may still contain shared blocks, so we always take
++ * the lock.
++ */
++
++DECLARE_MUTEX(ext3_xattr_sem);
++
++static inline int
++ext3_xattr_new_block(handle_t *handle, struct inode *inode,
++ int * errp, int force)
++{
++ struct super_block *sb = inode->i_sb;
++ int goal = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
++ EXT3_I(inode)->i_block_group * EXT3_BLOCKS_PER_GROUP(sb);
++
++ /* How can we enforce the allocation? */
++ int block = ext3_new_block(handle, inode, goal, 0, 0, errp);
++#ifdef OLD_QUOTAS
++ if (!*errp)
++ inode->i_blocks += inode->i_sb->s_blocksize >> 9;
++#endif
++ return block;
++}
++
++static inline int
++ext3_xattr_quota_alloc(struct inode *inode, int force)
++{
++ /* How can we enforce the allocation? */
++#ifdef OLD_QUOTAS
++ int error = DQUOT_ALLOC_BLOCK(inode->i_sb, inode, 1);
++ if (!error)
++ inode->i_blocks += inode->i_sb->s_blocksize >> 9;
++#else
++ int error = DQUOT_ALLOC_BLOCK(inode, 1);
++#endif
++ return error;
++}
++
++#ifdef OLD_QUOTAS
++
++static inline void
++ext3_xattr_quota_free(struct inode *inode)
++{
++ DQUOT_FREE_BLOCK(inode->i_sb, inode, 1);
++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9;
++}
++
++static inline void
++ext3_xattr_free_block(handle_t *handle, struct inode * inode,
++ unsigned long block)
++{
++ ext3_free_blocks(handle, inode, block, 1);
++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9;
++}
++
++#else
++# define ext3_xattr_quota_free(inode) \
++ DQUOT_FREE_BLOCK(inode, 1)
++# define ext3_xattr_free_block(handle, inode, block) \
++ ext3_free_blocks(handle, inode, block, 1)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
++
++static inline struct buffer_head *
++sb_bread(struct super_block *sb, int block)
++{
++ return bread(sb->s_dev, block, sb->s_blocksize);
++}
++
++static inline struct buffer_head *
++sb_getblk(struct super_block *sb, int block)
++{
++ return getblk(sb->s_dev, block, sb->s_blocksize);
++}
++
++#endif
++
++struct ext3_xattr_handler *ext3_xattr_handlers[EXT3_XATTR_INDEX_MAX];
++rwlock_t ext3_handler_lock = RW_LOCK_UNLOCKED;
++
++int
++ext3_xattr_register(int name_index, struct ext3_xattr_handler *handler)
++{
++ int error = -EINVAL;
++
++ if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) {
++ write_lock(&ext3_handler_lock);
++ if (!ext3_xattr_handlers[name_index-1]) {
++ ext3_xattr_handlers[name_index-1] = handler;
++ error = 0;
++ }
++ write_unlock(&ext3_handler_lock);
++ }
++ return error;
++}
++
++void
++ext3_xattr_unregister(int name_index, struct ext3_xattr_handler *handler)
++{
++ if (name_index > 0 || name_index <= EXT3_XATTR_INDEX_MAX) {
++ write_lock(&ext3_handler_lock);
++ ext3_xattr_handlers[name_index-1] = NULL;
++ write_unlock(&ext3_handler_lock);
++ }
++}
++
++static inline const char *
++strcmp_prefix(const char *a, const char *a_prefix)
++{
++ while (*a_prefix && *a == *a_prefix) {
++ a++;
++ a_prefix++;
++ }
++ return *a_prefix ? NULL : a;
++}
++
++/*
++ * Decode the extended attribute name, and translate it into
++ * the name_index and name suffix.
++ */
++static inline struct ext3_xattr_handler *
++ext3_xattr_resolve_name(const char **name)
++{
++ struct ext3_xattr_handler *handler = NULL;
++ int i;
++
++ if (!*name)
++ return NULL;
++ read_lock(&ext3_handler_lock);
++ for (i=0; i<EXT3_XATTR_INDEX_MAX; i++) {
++ if (ext3_xattr_handlers[i]) {
++ const char *n = strcmp_prefix(*name,
++ ext3_xattr_handlers[i]->prefix);
++ if (n) {
++ handler = ext3_xattr_handlers[i];
++ *name = n;
++ break;
++ }
++ }
++ }
++ read_unlock(&ext3_handler_lock);
++ return handler;
++}
++
++static inline struct ext3_xattr_handler *
++ext3_xattr_handler(int name_index)
++{
++ struct ext3_xattr_handler *handler = NULL;
++ if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) {
++ read_lock(&ext3_handler_lock);
++ handler = ext3_xattr_handlers[name_index-1];
++ read_unlock(&ext3_handler_lock);
++ }
++ return handler;
++}
++
++/*
++ * Inode operation getxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++ssize_t
++ext3_getxattr(struct dentry *dentry, const char *name,
++ void *buffer, size_t size)
++{
++ struct ext3_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ handler = ext3_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->get(inode, name, buffer, size);
++}
++
++/*
++ * Inode operation listxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++ssize_t
++ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
++{
++ return ext3_xattr_list(dentry->d_inode, buffer, size);
++}
++
++/*
++ * Inode operation setxattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++int
++ext3_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ struct ext3_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ if (size == 0)
++ value = ""; /* empty EA, do not remove */
++ handler = ext3_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->set(inode, name, value, size, flags);
++}
++
++/*
++ * Inode operation removexattr()
++ *
++ * dentry->d_inode->i_sem down
++ * BKL held [before 2.5.x]
++ */
++int
++ext3_removexattr(struct dentry *dentry, const char *name)
++{
++ struct ext3_xattr_handler *handler;
++ struct inode *inode = dentry->d_inode;
++
++ handler = ext3_xattr_resolve_name(&name);
++ if (!handler)
++ return -ENOTSUP;
++ return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
++}
++
++/*
++ * ext3_xattr_get()
++ *
++ * Copy an extended attribute into the buffer
++ * provided, or compute the buffer size required.
++ * Buffer is NULL to compute the size of the buffer required.
++ *
++ * Returns a negative error number on failure, or the number of bytes
++ * used / required on success.
++ */
++int
++ext3_xattr_get(struct inode *inode, int name_index, const char *name,
++ void *buffer, size_t buffer_size)
++{
++ struct buffer_head *bh = NULL;
++ struct ext3_xattr_entry *entry;
++ unsigned int block, size;
++ char *end;
++ int name_len, error;
++
++ ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
++ name_index, name, buffer, (long)buffer_size);
++
++ if (name == NULL)
++ return -EINVAL;
++ if (!EXT3_I(inode)->i_file_acl)
++ return -ENOATTR;
++ block = EXT3_I(inode)->i_file_acl;
++ ea_idebug(inode, "reading block %d", block);
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh)
++ return -EIO;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
++ end = bh->b_data + bh->b_size;
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++bad_block: ext3_error(inode->i_sb, "ext3_xattr_get",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* find named attribute */
++ name_len = strlen(name);
++
++ error = -ERANGE;
++ if (name_len > 255)
++ goto cleanup;
++ entry = FIRST_ENTRY(bh);
++ while (!IS_LAST_ENTRY(entry)) {
++ struct ext3_xattr_entry *next =
++ EXT3_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (name_index == entry->e_name_index &&
++ name_len == entry->e_name_len &&
++ memcmp(name, entry->e_name, name_len) == 0)
++ goto found;
++ entry = next;
++ }
++ /* Check the remaining name entries */
++ while (!IS_LAST_ENTRY(entry)) {
++ struct ext3_xattr_entry *next =
++ EXT3_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++ entry = next;
++ }
++ if (ext3_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ error = -ENOATTR;
++ goto cleanup;
++found:
++ /* check the buffer size */
++ if (entry->e_value_block != 0)
++ goto bad_block;
++ size = le32_to_cpu(entry->e_value_size);
++ if (size > inode->i_sb->s_blocksize ||
++ le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
++ goto bad_block;
++
++ if (ext3_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ if (buffer) {
++ error = -ERANGE;
++ if (size > buffer_size)
++ goto cleanup;
++ /* return value of attribute */
++ memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
++ size);
++ }
++ error = size;
++
++cleanup:
++ brelse(bh);
++
++ return error;
++}
++
++/*
++ * ext3_xattr_list()
++ *
++ * Copy a list of attribute names into the buffer
++ * provided, or compute the buffer size required.
++ * Buffer is NULL to compute the size of the buffer required.
++ *
++ * Returns a negative error number on failure, or the number of bytes
++ * used / required on success.
++ */
++int
++ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
++{
++ struct buffer_head *bh = NULL;
++ struct ext3_xattr_entry *entry;
++ unsigned int block, size = 0;
++ char *buf, *end;
++ int error;
++
++ ea_idebug(inode, "buffer=%p, buffer_size=%ld",
++ buffer, (long)buffer_size);
++
++ if (!EXT3_I(inode)->i_file_acl)
++ return 0;
++ block = EXT3_I(inode)->i_file_acl;
++ ea_idebug(inode, "reading block %d", block);
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh)
++ return -EIO;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
++ end = bh->b_data + bh->b_size;
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++bad_block: ext3_error(inode->i_sb, "ext3_xattr_list",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* compute the size required for the list of attribute names */
++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
++ entry = EXT3_XATTR_NEXT(entry)) {
++ struct ext3_xattr_handler *handler;
++ struct ext3_xattr_entry *next =
++ EXT3_XATTR_NEXT(entry);
++ if ((char *)next >= end)
++ goto bad_block;
++
++ handler = ext3_xattr_handler(entry->e_name_index);
++ if (handler)
++ size += handler->list(NULL, inode, entry->e_name,
++ entry->e_name_len);
++ }
++
++ if (ext3_xattr_cache_insert(bh))
++ ea_idebug(inode, "cache insert failed");
++ if (!buffer) {
++ error = size;
++ goto cleanup;
++ } else {
++ error = -ERANGE;
++ if (size > buffer_size)
++ goto cleanup;
++ }
++
++ /* list the attribute names */
++ buf = buffer;
++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
++ entry = EXT3_XATTR_NEXT(entry)) {
++ struct ext3_xattr_handler *handler;
++
++ handler = ext3_xattr_handler(entry->e_name_index);
++ if (handler)
++ buf += handler->list(buf, inode, entry->e_name,
++ entry->e_name_len);
++ }
++ error = size;
++
++cleanup:
++ brelse(bh);
++
++ return error;
++}
++
++/*
++ * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
++ * not set, set it.
++ */
++static void ext3_xattr_update_super_block(handle_t *handle,
++ struct super_block *sb)
++{
++ if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
++ return;
++
++ lock_super(sb);
++ ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
++ EXT3_SB(sb)->s_feature_compat |= EXT3_FEATURE_COMPAT_EXT_ATTR;
++#endif
++ EXT3_SB(sb)->s_es->s_feature_compat |=
++ cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR);
++ sb->s_dirt = 1;
++ ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
++ unlock_super(sb);
++}
++
++/*
++ * ext3_xattr_set()
++ *
++ * Create, replace or remove an extended attribute for this inode. Buffer
++ * is NULL to remove an existing extended attribute, and non-NULL to
++ * either replace an existing extended attribute, or create a new extended
++ * attribute. The flags XATTR_REPLACE and XATTR_CREATE
++ * specify that an extended attribute must exist and must not exist
++ * previous to the call, respectively.
++ *
++ * Returns 0, or a negative error number on failure.
++ */
++int
++ext3_xattr_set(handle_t *handle, struct inode *inode, int name_index,
++ const char *name, const void *value, size_t value_len, int flags)
++{
++ struct super_block *sb = inode->i_sb;
++ struct buffer_head *bh = NULL;
++ struct ext3_xattr_header *header = NULL;
++ struct ext3_xattr_entry *here, *last;
++ unsigned int name_len;
++ int block = EXT3_I(inode)->i_file_acl;
++ int min_offs = sb->s_blocksize, not_found = 1, free, error;
++ char *end;
++
++ /*
++ * header -- Points either into bh, or to a temporarily
++ * allocated buffer.
++ * here -- The named entry found, or the place for inserting, within
++ * the block pointed to by header.
++ * last -- Points right after the last named entry within the block
++ * pointed to by header.
++ * min_offs -- The offset of the first value (values are aligned
++ * towards the end of the block).
++ * end -- Points right after the block pointed to by header.
++ */
++
++ ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
++ name_index, name, value, (long)value_len);
++
++ if (IS_RDONLY(inode))
++ return -EROFS;
++ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
++ return -EPERM;
++ if (value == NULL)
++ value_len = 0;
++ if (name == NULL)
++ return -EINVAL;
++ name_len = strlen(name);
++ if (name_len > 255 || value_len > sb->s_blocksize)
++ return -ERANGE;
++ down(&ext3_xattr_sem);
++
++ if (block) {
++ /* The inode already has an extended attribute block. */
++ bh = sb_bread(sb, block);
++ error = -EIO;
++ if (!bh)
++ goto cleanup;
++ ea_bdebug(bh, "b_count=%d, refcount=%d",
++ atomic_read(&(bh->b_count)),
++ le32_to_cpu(HDR(bh)->h_refcount));
++ header = HDR(bh);
++ end = bh->b_data + bh->b_size;
++ if (header->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ header->h_blocks != cpu_to_le32(1)) {
++bad_block: ext3_error(sb, "ext3_xattr_set",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ /* Find the named attribute. */
++ here = FIRST_ENTRY(bh);
++ while (!IS_LAST_ENTRY(here)) {
++ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(here);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (!here->e_value_block && here->e_value_size) {
++ int offs = le16_to_cpu(here->e_value_offs);
++ if (offs < min_offs)
++ min_offs = offs;
++ }
++ not_found = name_index - here->e_name_index;
++ if (!not_found)
++ not_found = name_len - here->e_name_len;
++ if (!not_found)
++ not_found = memcmp(name, here->e_name,name_len);
++ if (not_found <= 0)
++ break;
++ here = next;
++ }
++ last = here;
++ /* We still need to compute min_offs and last. */
++ while (!IS_LAST_ENTRY(last)) {
++ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last);
++ if ((char *)next >= end)
++ goto bad_block;
++ if (!last->e_value_block && last->e_value_size) {
++ int offs = le16_to_cpu(last->e_value_offs);
++ if (offs < min_offs)
++ min_offs = offs;
++ }
++ last = next;
++ }
++
++ /* Check whether we have enough space left. */
++ free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
++ } else {
++ /* We will use a new extended attribute block. */
++ free = sb->s_blocksize -
++ sizeof(struct ext3_xattr_header) - sizeof(__u32);
++ here = last = NULL; /* avoid gcc uninitialized warning. */
++ }
++
++ if (not_found) {
++ /* Request to remove a nonexistent attribute? */
++ error = -ENOATTR;
++ if (flags & XATTR_REPLACE)
++ goto cleanup;
++ error = 0;
++ if (value == NULL)
++ goto cleanup;
++ else
++ free -= EXT3_XATTR_LEN(name_len);
++ } else {
++ /* Request to create an existing attribute? */
++ error = -EEXIST;
++ if (flags & XATTR_CREATE)
++ goto cleanup;
++ if (!here->e_value_block && here->e_value_size) {
++ unsigned int size = le32_to_cpu(here->e_value_size);
++
++ if (le16_to_cpu(here->e_value_offs) + size >
++ sb->s_blocksize || size > sb->s_blocksize)
++ goto bad_block;
++ free += EXT3_XATTR_SIZE(size);
++ }
++ }
++ free -= EXT3_XATTR_SIZE(value_len);
++ error = -ENOSPC;
++ if (free < 0)
++ goto cleanup;
++
++ /* Here we know that we can set the new attribute. */
++
++ if (header) {
++ if (header->h_refcount == cpu_to_le32(1)) {
++ ea_bdebug(bh, "modifying in-place");
++ ext3_xattr_cache_remove(bh);
++ error = ext3_journal_get_write_access(handle, bh);
++ if (error)
++ goto cleanup;
++ } else {
++ int offset;
++
++ ea_bdebug(bh, "cloning");
++ header = kmalloc(bh->b_size, GFP_KERNEL);
++ error = -ENOMEM;
++ if (header == NULL)
++ goto cleanup;
++ memcpy(header, HDR(bh), bh->b_size);
++ header->h_refcount = cpu_to_le32(1);
++ offset = (char *)header - bh->b_data;
++ here = ENTRY((char *)here + offset);
++ last = ENTRY((char *)last + offset);
++ }
++ } else {
++ /* Allocate a buffer where we construct the new block. */
++ header = kmalloc(sb->s_blocksize, GFP_KERNEL);
++ error = -ENOMEM;
++ if (header == NULL)
++ goto cleanup;
++ memset(header, 0, sb->s_blocksize);
++ end = (char *)header + sb->s_blocksize;
++ header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
++ header->h_blocks = header->h_refcount = cpu_to_le32(1);
++ last = here = ENTRY(header+1);
++ }
++
++ if (not_found) {
++ /* Insert the new name. */
++ int size = EXT3_XATTR_LEN(name_len);
++ int rest = (char *)last - (char *)here;
++ memmove((char *)here + size, here, rest);
++ memset(here, 0, size);
++ here->e_name_index = name_index;
++ here->e_name_len = name_len;
++ memcpy(here->e_name, name, name_len);
++ } else {
++ /* Remove the old value. */
++ if (!here->e_value_block && here->e_value_size) {
++ char *first_val = (char *)header + min_offs;
++ int offs = le16_to_cpu(here->e_value_offs);
++ char *val = (char *)header + offs;
++ size_t size = EXT3_XATTR_SIZE(
++ le32_to_cpu(here->e_value_size));
++ memmove(first_val + size, first_val, val - first_val);
++ memset(first_val, 0, size);
++ here->e_value_offs = 0;
++ min_offs += size;
++
++ /* Adjust all value offsets. */
++ last = ENTRY(header+1);
++ while (!IS_LAST_ENTRY(last)) {
++ int o = le16_to_cpu(last->e_value_offs);
++ if (!last->e_value_block && o < offs)
++ last->e_value_offs =
++ cpu_to_le16(o + size);
++ last = EXT3_XATTR_NEXT(last);
++ }
++ }
++ if (value == NULL) {
++ /* Remove this attribute. */
++ if (EXT3_XATTR_NEXT(ENTRY(header+1)) == last) {
++ /* This block is now empty. */
++ error = ext3_xattr_set2(handle, inode, bh,NULL);
++ goto cleanup;
++ } else {
++ /* Remove the old name. */
++ int size = EXT3_XATTR_LEN(name_len);
++ last = ENTRY((char *)last - size);
++ memmove(here, (char*)here + size,
++ (char*)last - (char*)here);
++ memset(last, 0, size);
++ }
++ }
++ }
++
++ if (value != NULL) {
++ /* Insert the new value. */
++ here->e_value_size = cpu_to_le32(value_len);
++ if (value_len) {
++ size_t size = EXT3_XATTR_SIZE(value_len);
++ char *val = (char *)header + min_offs - size;
++ here->e_value_offs =
++ cpu_to_le16((char *)val - (char *)header);
++ memset(val + size - EXT3_XATTR_PAD, 0,
++ EXT3_XATTR_PAD); /* Clear the pad bytes. */
++ memcpy(val, value, value_len);
++ }
++ }
++ ext3_xattr_rehash(header, here);
++
++ error = ext3_xattr_set2(handle, inode, bh, header);
++
++cleanup:
++ brelse(bh);
++ if (!(bh && header == HDR(bh)))
++ kfree(header);
++ up(&ext3_xattr_sem);
++
++ return error;
++}
++
++/*
++ * Second half of ext3_xattr_set(): Update the file system.
++ */
++static int
++ext3_xattr_set2(handle_t *handle, struct inode *inode,
++ struct buffer_head *old_bh, struct ext3_xattr_header *header)
++{
++ struct super_block *sb = inode->i_sb;
++ struct buffer_head *new_bh = NULL;
++ int error;
++
++ if (header) {
++ new_bh = ext3_xattr_cache_find(inode, header);
++ if (new_bh) {
++ /*
++ * We found an identical block in the cache.
++ * The old block will be released after updating
++ * the inode.
++ */
++ ea_bdebug(old_bh, "reusing block %ld",
++ new_bh->b_blocknr);
++
++ error = -EDQUOT;
++ if (ext3_xattr_quota_alloc(inode, 1))
++ goto cleanup;
++
++ error = ext3_journal_get_write_access(handle, new_bh);
++ if (error)
++ goto cleanup;
++ HDR(new_bh)->h_refcount = cpu_to_le32(
++ le32_to_cpu(HDR(new_bh)->h_refcount) + 1);
++ ea_bdebug(new_bh, "refcount now=%d",
++ le32_to_cpu(HDR(new_bh)->h_refcount));
++ } else if (old_bh && header == HDR(old_bh)) {
++ /* Keep this block. */
++ new_bh = old_bh;
++ ext3_xattr_cache_insert(new_bh);
++ } else {
++ /* We need to allocate a new block */
++ int force = EXT3_I(inode)->i_file_acl != 0;
++ int block = ext3_xattr_new_block(handle, inode,
++ &error, force);
++ if (error)
++ goto cleanup;
++ ea_idebug(inode, "creating block %d", block);
++
++ new_bh = sb_getblk(sb, block);
++ if (!new_bh) {
++getblk_failed: ext3_xattr_free_block(handle, inode, block);
++ error = -EIO;
++ goto cleanup;
++ }
++ lock_buffer(new_bh);
++ error = ext3_journal_get_create_access(handle, new_bh);
++ if (error) {
++ unlock_buffer(new_bh);
++ goto getblk_failed;
++ }
++ memcpy(new_bh->b_data, header, new_bh->b_size);
++ mark_buffer_uptodate(new_bh, 1);
++ unlock_buffer(new_bh);
++ ext3_xattr_cache_insert(new_bh);
++
++ ext3_xattr_update_super_block(handle, sb);
++ }
++ error = ext3_journal_dirty_metadata(handle, new_bh);
++ if (error)
++ goto cleanup;
++ }
++
++ /* Update the inode. */
++ EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
++ inode->i_ctime = CURRENT_TIME;
++ ext3_mark_inode_dirty(handle, inode);
++ if (IS_SYNC(inode))
++ handle->h_sync = 1;
++
++ error = 0;
++ if (old_bh && old_bh != new_bh) {
++ /*
++ * If there was an old block, and we are not still using it,
++ * we now release the old block.
++ */
++ unsigned int refcount = le32_to_cpu(HDR(old_bh)->h_refcount);
++
++ error = ext3_journal_get_write_access(handle, old_bh);
++ if (error)
++ goto cleanup;
++ if (refcount == 1) {
++ /* Free the old block. */
++ ea_bdebug(old_bh, "freeing");
++ ext3_xattr_free_block(handle, inode, old_bh->b_blocknr);
++
++ /* ext3_forget() calls bforget() for us, but we
++ let our caller release old_bh, so we need to
++ duplicate the handle before. */
++ get_bh(old_bh);
++ ext3_forget(handle, 1, inode, old_bh,old_bh->b_blocknr);
++ } else {
++ /* Decrement the refcount only. */
++ refcount--;
++ HDR(old_bh)->h_refcount = cpu_to_le32(refcount);
++ ext3_xattr_quota_free(inode);
++ ext3_journal_dirty_metadata(handle, old_bh);
++ ea_bdebug(old_bh, "refcount now=%d", refcount);
++ }
++ }
++
++cleanup:
++ if (old_bh != new_bh)
++ brelse(new_bh);
++
++ return error;
++}
++
++/*
++ * ext3_xattr_delete_inode()
++ *
++ * Free extended attribute resources associated with this inode. This
++ * is called immediately before an inode is freed.
++ */
++void
++ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
++{
++ struct buffer_head *bh;
++ unsigned int block = EXT3_I(inode)->i_file_acl;
++
++ if (!block)
++ return;
++ down(&ext3_xattr_sem);
++
++ bh = sb_bread(inode->i_sb, block);
++ if (!bh) {
++ ext3_error(inode->i_sb, "ext3_xattr_delete_inode",
++ "inode %ld: block %d read error", inode->i_ino, block);
++ goto cleanup;
++ }
++ ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
++ HDR(bh)->h_blocks != cpu_to_le32(1)) {
++ ext3_error(inode->i_sb, "ext3_xattr_delete_inode",
++ "inode %ld: bad block %d", inode->i_ino, block);
++ goto cleanup;
++ }
++ ext3_journal_get_write_access(handle, bh);
++ ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
++ if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
++ ext3_xattr_cache_remove(bh);
++ ext3_xattr_free_block(handle, inode, block);
++ ext3_forget(handle, 1, inode, bh, block);
++ bh = NULL;
++ } else {
++ HDR(bh)->h_refcount = cpu_to_le32(
++ le32_to_cpu(HDR(bh)->h_refcount) - 1);
++ ext3_journal_dirty_metadata(handle, bh);
++ if (IS_SYNC(inode))
++ handle->h_sync = 1;
++ ext3_xattr_quota_free(inode);
++ }
++ EXT3_I(inode)->i_file_acl = 0;
++
++cleanup:
++ brelse(bh);
++ up(&ext3_xattr_sem);
++}
++
++/*
++ * ext3_xattr_put_super()
++ *
++ * This is called when a file system is unmounted.
++ */
++void
++ext3_xattr_put_super(struct super_block *sb)
++{
++#ifdef CONFIG_EXT3_FS_XATTR_SHARING
++ mb_cache_shrink(ext3_xattr_cache, sb->s_dev);
++#endif
++}
++
++#ifdef CONFIG_EXT3_FS_XATTR_SHARING
++
++/*
++ * ext3_xattr_cache_insert()
++ *
++ * Create a new entry in the extended attribute cache, and insert
++ * it unless such an entry is already in the cache.
++ *
++ * Returns 0, or a negative error number on failure.
++ */
++static int
++ext3_xattr_cache_insert(struct buffer_head *bh)
++{
++ __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
++ struct mb_cache_entry *ce;
++ int error;
++
++ ce = mb_cache_entry_alloc(ext3_xattr_cache);
++ if (!ce)
++ return -ENOMEM;
++ error = mb_cache_entry_insert(ce, bh->b_dev, bh->b_blocknr, &hash);
++ if (error) {
++ mb_cache_entry_free(ce);
++ if (error == -EBUSY) {
++ ea_bdebug(bh, "already in cache (%d cache entries)",
++ atomic_read(&ext3_xattr_cache->c_entry_count));
++ error = 0;
++ }
++ } else {
++ ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,
++ atomic_read(&ext3_xattr_cache->c_entry_count));
++ mb_cache_entry_release(ce);
++ }
++ return error;
++}
++
++/*
++ * ext3_xattr_cmp()
++ *
++ * Compare two extended attribute blocks for equality.
++ *
++ * Returns 0 if the blocks are equal, 1 if they differ, and
++ * a negative error number on errors.
++ */
++static int
++ext3_xattr_cmp(struct ext3_xattr_header *header1,
++ struct ext3_xattr_header *header2)
++{
++ struct ext3_xattr_entry *entry1, *entry2;
++
++ entry1 = ENTRY(header1+1);
++ entry2 = ENTRY(header2+1);
++ while (!IS_LAST_ENTRY(entry1)) {
++ if (IS_LAST_ENTRY(entry2))
++ return 1;
++ if (entry1->e_hash != entry2->e_hash ||
++ entry1->e_name_len != entry2->e_name_len ||
++ entry1->e_value_size != entry2->e_value_size ||
++ memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
++ return 1;
++ if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
++ return -EIO;
++ if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
++ (char *)header2 + le16_to_cpu(entry2->e_value_offs),
++ le32_to_cpu(entry1->e_value_size)))
++ return 1;
++
++ entry1 = EXT3_XATTR_NEXT(entry1);
++ entry2 = EXT3_XATTR_NEXT(entry2);
++ }
++ if (!IS_LAST_ENTRY(entry2))
++ return 1;
++ return 0;
++}
++
++/*
++ * ext3_xattr_cache_find()
++ *
++ * Find an identical extended attribute block.
++ *
++ * Returns a pointer to the block found, or NULL if such a block was
++ * not found or an error occurred.
++ */
++static struct buffer_head *
++ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header)
++{
++ __u32 hash = le32_to_cpu(header->h_hash);
++ struct mb_cache_entry *ce;
++
++ if (!header->h_hash)
++ return NULL; /* never share */
++ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
++ ce = mb_cache_entry_find_first(ext3_xattr_cache, 0, inode->i_dev, hash);
++ while (ce) {
++ struct buffer_head *bh = sb_bread(inode->i_sb, ce->e_block);
++
++ if (!bh) {
++ ext3_error(inode->i_sb, "ext3_xattr_cache_find",
++ "inode %ld: block %ld read error",
++ inode->i_ino, ce->e_block);
++ } else if (le32_to_cpu(HDR(bh)->h_refcount) >
++ EXT3_XATTR_REFCOUNT_MAX) {
++ ea_idebug(inode, "block %ld refcount %d>%d",ce->e_block,
++ le32_to_cpu(HDR(bh)->h_refcount),
++ EXT3_XATTR_REFCOUNT_MAX);
++ } else if (!ext3_xattr_cmp(header, HDR(bh))) {
++ ea_bdebug(bh, "b_count=%d",atomic_read(&(bh->b_count)));
++ mb_cache_entry_release(ce);
++ return bh;
++ }
++ brelse(bh);
++ ce = mb_cache_entry_find_next(ce, 0, inode->i_dev, hash);
++ }
++ return NULL;
++}
++
++/*
++ * ext3_xattr_cache_remove()
++ *
++ * Remove the cache entry of a block from the cache. Called when a
++ * block becomes invalid.
++ */
++static void
++ext3_xattr_cache_remove(struct buffer_head *bh)
++{
++ struct mb_cache_entry *ce;
++
++ ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_dev, bh->b_blocknr);
++ if (ce) {
++ ea_bdebug(bh, "removing (%d cache entries remaining)",
++ atomic_read(&ext3_xattr_cache->c_entry_count)-1);
++ mb_cache_entry_free(ce);
++ } else
++ ea_bdebug(bh, "no cache entry");
++}
++
++#define NAME_HASH_SHIFT 5
++#define VALUE_HASH_SHIFT 16
++
++/*
++ * ext3_xattr_hash_entry()
++ *
++ * Compute the hash of an extended attribute.
++ */
++static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header,
++ struct ext3_xattr_entry *entry)
++{
++ __u32 hash = 0;
++ char *name = entry->e_name;
++ int n;
++
++ for (n=0; n < entry->e_name_len; n++) {
++ hash = (hash << NAME_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
++ *name++;
++ }
++
++ if (entry->e_value_block == 0 && entry->e_value_size != 0) {
++ __u32 *value = (__u32 *)((char *)header +
++ le16_to_cpu(entry->e_value_offs));
++ for (n = (le32_to_cpu(entry->e_value_size) +
++ EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) {
++ hash = (hash << VALUE_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
++ le32_to_cpu(*value++);
++ }
++ }
++ entry->e_hash = cpu_to_le32(hash);
++}
++
++#undef NAME_HASH_SHIFT
++#undef VALUE_HASH_SHIFT
++
++#define BLOCK_HASH_SHIFT 16
++
++/*
++ * ext3_xattr_rehash()
++ *
++ * Re-compute the extended attribute hash value after an entry has changed.
++ */
++static void ext3_xattr_rehash(struct ext3_xattr_header *header,
++ struct ext3_xattr_entry *entry)
++{
++ struct ext3_xattr_entry *here;
++ __u32 hash = 0;
++
++ ext3_xattr_hash_entry(header, entry);
++ here = ENTRY(header+1);
++ while (!IS_LAST_ENTRY(here)) {
++ if (!here->e_hash) {
++ /* Block is not shared if an entry's hash value == 0 */
++ hash = 0;
++ break;
++ }
++ hash = (hash << BLOCK_HASH_SHIFT) ^
++ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
++ le32_to_cpu(here->e_hash);
++ here = EXT3_XATTR_NEXT(here);
++ }
++ header->h_hash = cpu_to_le32(hash);
++}
++
++#undef BLOCK_HASH_SHIFT
++
++int __init
++init_ext3_xattr(void)
++{
++ ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL,
++ sizeof(struct mb_cache_entry) +
++ sizeof(struct mb_cache_entry_index), 1, 61);
++ if (!ext3_xattr_cache)
++ return -ENOMEM;
++
++ return 0;
++}
++
++void
++exit_ext3_xattr(void)
++{
++ if (ext3_xattr_cache)
++ mb_cache_destroy(ext3_xattr_cache);
++ ext3_xattr_cache = NULL;
++}
++
++#else /* CONFIG_EXT3_FS_XATTR_SHARING */
++
++int __init
++init_ext3_xattr(void)
++{
++ return 0;
++}
++
++void
++exit_ext3_xattr(void)
++{
++}
++
++#endif /* CONFIG_EXT3_FS_XATTR_SHARING */
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/fs/ext3/xattr_user.c Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,111 @@
++/*
++ * linux/fs/ext3/xattr_user.c
++ * Handler for extended user attributes.
++ *
++ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
++ */
++
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <linux/ext3_jbd.h>
++#include <linux/ext3_fs.h>
++#include <linux/ext3_xattr.h>
++
++#ifdef CONFIG_EXT3_FS_POSIX_ACL
++# include <linux/ext3_acl.h>
++#endif
++
++#define XATTR_USER_PREFIX "user."
++
++static size_t
++ext3_xattr_user_list(char *list, struct inode *inode,
++ const char *name, int name_len)
++{
++ const int prefix_len = sizeof(XATTR_USER_PREFIX)-1;
++
++ if (!test_opt(inode->i_sb, XATTR_USER))
++ return 0;
++
++ if (list) {
++ memcpy(list, XATTR_USER_PREFIX, prefix_len);
++ memcpy(list+prefix_len, name, name_len);
++ list[prefix_len + name_len] = '\0';
++ }
++ return prefix_len + name_len + 1;
++}
++
++static int
++ext3_xattr_user_get(struct inode *inode, const char *name,
++ void *buffer, size_t size)
++{
++ int error;
++
++ if (strcmp(name, "") == 0)
++ return -EINVAL;
++ if (!test_opt(inode->i_sb, XATTR_USER))
++ return -ENOTSUP;
++#ifdef CONFIG_EXT3_FS_POSIX_ACL
++ error = ext3_permission_locked(inode, MAY_READ);
++#else
++ error = permission(inode, MAY_READ);
++#endif
++ if (error)
++ return error;
++
++ return ext3_xattr_get(inode, EXT3_XATTR_INDEX_USER, name,
++ buffer, size);
++}
++
++static int
++ext3_xattr_user_set(struct inode *inode, const char *name,
++ const void *value, size_t size, int flags)
++{
++ handle_t *handle;
++ int error;
++
++ if (strcmp(name, "") == 0)
++ return -EINVAL;
++ if (!test_opt(inode->i_sb, XATTR_USER))
++ return -ENOTSUP;
++ if ( !S_ISREG(inode->i_mode) &&
++ (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
++ return -EPERM;
++#ifdef CONFIG_EXT3_FS_POSIX_ACL
++ error = ext3_permission_locked(inode, MAY_WRITE);
++#else
++ error = permission(inode, MAY_WRITE);
++#endif
++ if (error)
++ return error;
++
++ handle = ext3_journal_start(inode, EXT3_XATTR_TRANS_BLOCKS);
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++ error = ext3_xattr_set(handle, inode, EXT3_XATTR_INDEX_USER, name,
++ value, size, flags);
++ ext3_journal_stop(handle, inode);
++
++ return error;
++}
++
++struct ext3_xattr_handler ext3_xattr_user_handler = {
++ prefix: XATTR_USER_PREFIX,
++ list: ext3_xattr_user_list,
++ get: ext3_xattr_user_get,
++ set: ext3_xattr_user_set,
++};
++
++int __init
++init_ext3_xattr_user(void)
++{
++ return ext3_xattr_register(EXT3_XATTR_INDEX_USER,
++ &ext3_xattr_user_handler);
++}
++
++void
++exit_ext3_xattr_user(void)
++{
++ ext3_xattr_unregister(EXT3_XATTR_INDEX_USER,
++ &ext3_xattr_user_handler);
++}
+--- linux-2.4.20/fs/jfs/jfs_xattr.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/jfs/jfs_xattr.h Sat Apr 5 03:57:18 2003
+@@ -52,8 +52,10 @@ struct jfs_ea_list {
+ #define END_EALIST(ealist) \
+ ((struct jfs_ea *) (((char *) (ealist)) + EALIST_SIZE(ealist)))
+
+-extern int __jfs_setxattr(struct inode *, const char *, void *, size_t, int);
+-extern int jfs_setxattr(struct dentry *, const char *, void *, size_t, int);
++extern int __jfs_setxattr(struct inode *, const char *, const void *, size_t,
++ int);
++extern int jfs_setxattr(struct dentry *, const char *, const void *, size_t,
++ int);
+ extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t);
+ extern ssize_t jfs_getxattr(struct dentry *, const char *, void *, size_t);
+ extern ssize_t jfs_listxattr(struct dentry *, char *, size_t);
+--- linux-2.4.20/fs/jfs/xattr.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/fs/jfs/xattr.c Sat Apr 5 03:57:18 2003
+@@ -641,7 +641,7 @@ static int ea_put(struct inode *inode, s
+ }
+
+ static int can_set_xattr(struct inode *inode, const char *name,
+- void *value, size_t value_len)
++ const void *value, size_t value_len)
+ {
+ if (IS_RDONLY(inode))
+ return -EROFS;
+@@ -660,7 +660,7 @@ static int can_set_xattr(struct inode *i
+ return permission(inode, MAY_WRITE);
+ }
+
+-int __jfs_setxattr(struct inode *inode, const char *name, void *value,
++int __jfs_setxattr(struct inode *inode, const char *name, const void *value,
+ size_t value_len, int flags)
+ {
+ struct jfs_ea_list *ealist;
+@@ -799,7 +799,7 @@ int __jfs_setxattr(struct inode *inode,
+ return rc;
+ }
+
+-int jfs_setxattr(struct dentry *dentry, const char *name, void *value,
++int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t value_len, int flags)
+ {
+ if (value == NULL) { /* empty EA, do not remove */
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/fs/mbcache.c Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,648 @@
++/*
++ * linux/fs/mbcache.c
++ * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
++ */
++
++/*
++ * Filesystem Meta Information Block Cache (mbcache)
++ *
++ * The mbcache caches blocks of block devices that need to be located
++ * by their device/block number, as well as by other criteria (such
++ * as the block's contents).
++ *
++ * There can only be one cache entry in a cache per device and block number.
++ * Additional indexes need not be unique in this sense. The number of
++ * additional indexes (=other criteria) can be hardwired at compile time
++ * or specified at cache create time.
++ *
++ * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
++ * in the cache. A valid entry is in the main hash tables of the cache,
++ * and may also be in the lru list. An invalid entry is not in any hashes
++ * or lists.
++ *
++ * A valid cache entry is only in the lru list if no handles refer to it.
++ * Invalid cache entries will be freed when the last handle to the cache
++ * entry is released. Entries that cannot be freed immediately are put
++ * back on the lru list.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/cache_def.h>
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/mbcache.h>
++
++
++#ifdef MB_CACHE_DEBUG
++# define mb_debug(f...) do { \
++ printk(KERN_DEBUG f); \
++ printk("\n"); \
++ } while (0)
++#define mb_assert(c) do { if (!(c)) \
++ printk(KERN_ERR "assertion " #c " failed\n"); \
++ } while(0)
++#else
++# define mb_debug(f...) do { } while(0)
++# define mb_assert(c) do { } while(0)
++#endif
++#define mb_error(f...) do { \
++ printk(KERN_ERR f); \
++ printk("\n"); \
++ } while(0)
++
++MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
++MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
++MODULE_LICENSE("GPL");
++#endif
++
++EXPORT_SYMBOL(mb_cache_create);
++EXPORT_SYMBOL(mb_cache_shrink);
++EXPORT_SYMBOL(mb_cache_destroy);
++EXPORT_SYMBOL(mb_cache_entry_alloc);
++EXPORT_SYMBOL(mb_cache_entry_insert);
++EXPORT_SYMBOL(mb_cache_entry_release);
++EXPORT_SYMBOL(mb_cache_entry_takeout);
++EXPORT_SYMBOL(mb_cache_entry_free);
++EXPORT_SYMBOL(mb_cache_entry_dup);
++EXPORT_SYMBOL(mb_cache_entry_get);
++#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
++EXPORT_SYMBOL(mb_cache_entry_find_first);
++EXPORT_SYMBOL(mb_cache_entry_find_next);
++#endif
++
++
++/*
++ * Global data: list of all mbcache's, lru list, and a spinlock for
++ * accessing cache data structures on SMP machines. The lru list is
++ * global across all mbcaches.
++ */
++
++static LIST_HEAD(mb_cache_list);
++static LIST_HEAD(mb_cache_lru_list);
++static spinlock_t mb_cache_spinlock = SPIN_LOCK_UNLOCKED;
++
++static inline int
++mb_cache_indexes(struct mb_cache *cache)
++{
++#ifdef MB_CACHE_INDEXES_COUNT
++ return MB_CACHE_INDEXES_COUNT;
++#else
++ return cache->c_indexes_count;
++#endif
++}
++
++/*
++ * What the mbcache registers as to get shrunk dynamically.
++ */
++
++static void
++mb_cache_memory_pressure(int priority, unsigned int gfp_mask);
++
++static struct cache_definition mb_cache_definition = {
++ "mb_cache",
++ mb_cache_memory_pressure
++};
++
++
++static inline int
++__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
++{
++ return !list_empty(&ce->e_block_list);
++}
++
++
++static inline void
++__mb_cache_entry_unhash(struct mb_cache_entry *ce)
++{
++ int n;
++
++ if (__mb_cache_entry_is_hashed(ce)) {
++ list_del_init(&ce->e_block_list);
++ for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
++ list_del(&ce->e_indexes[n].o_list);
++ }
++}
++
++
++static inline void
++__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
++{
++ struct mb_cache *cache = ce->e_cache;
++
++ mb_assert(atomic_read(&ce->e_used) == 0);
++ if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
++ /* free failed -- put back on the lru list
++ for freeing later. */
++ spin_lock(&mb_cache_spinlock);
++ list_add(&ce->e_lru_list, &mb_cache_lru_list);
++ spin_unlock(&mb_cache_spinlock);
++ } else {
++ kmem_cache_free(cache->c_entry_cache, ce);
++ atomic_dec(&cache->c_entry_count);
++ }
++}
++
++
++static inline void
++__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
++{
++ if (atomic_dec_and_test(&ce->e_used)) {
++ if (__mb_cache_entry_is_hashed(ce))
++ list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
++ else {
++ spin_unlock(&mb_cache_spinlock);
++ __mb_cache_entry_forget(ce, GFP_KERNEL);
++ return;
++ }
++ }
++ spin_unlock(&mb_cache_spinlock);
++}
++
++
++/*
++ * mb_cache_memory_pressure() memory pressure callback
++ *
++ * This function is called by the kernel memory management when memory
++ * gets low.
++ *
++ * @priority: Amount by which to shrink the cache (0 = highes priority)
++ * @gfp_mask: (ignored)
++ */
++static void
++mb_cache_memory_pressure(int priority, unsigned int gfp_mask)
++{
++ LIST_HEAD(free_list);
++ struct list_head *l, *ltmp;
++ int count = 0;
++
++ spin_lock(&mb_cache_spinlock);
++ list_for_each(l, &mb_cache_list) {
++ struct mb_cache *cache =
++ list_entry(l, struct mb_cache, c_cache_list);
++ mb_debug("cache %s (%d)", cache->c_name,
++ atomic_read(&cache->c_entry_count));
++ count += atomic_read(&cache->c_entry_count);
++ }
++ mb_debug("trying to free %d of %d entries",
++ count / (priority ? priority : 1), count);
++ if (priority)
++ count /= priority;
++ while (count-- && !list_empty(&mb_cache_lru_list)) {
++ struct mb_cache_entry *ce =
++ list_entry(mb_cache_lru_list.next,
++ struct mb_cache_entry, e_lru_list);
++ list_del(&ce->e_lru_list);
++ __mb_cache_entry_unhash(ce);
++ list_add_tail(&ce->e_lru_list, &free_list);
++ }
++ spin_unlock(&mb_cache_spinlock);
++ list_for_each_safe(l, ltmp, &free_list) {
++ __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
++ e_lru_list), gfp_mask);
++ }
++}
++
++
++/*
++ * mb_cache_create() create a new cache
++ *
++ * All entries in one cache are equal size. Cache entries may be from
++ * multiple devices. If this is the first mbcache created, registers
++ * the cache with kernel memory management. Returns NULL if no more
++ * memory was available.
++ *
++ * @name: name of the cache (informal)
++ * @cache_op: contains the callback called when freeing a cache entry
++ * @entry_size: The size of a cache entry, including
++ * struct mb_cache_entry
++ * @indexes_count: number of additional indexes in the cache. Must equal
++ * MB_CACHE_INDEXES_COUNT if the number of indexes is
++ * hardwired.
++ * @bucket_count: number of hash buckets
++ */
++struct mb_cache *
++mb_cache_create(const char *name, struct mb_cache_op *cache_op,
++ size_t entry_size, int indexes_count, int bucket_count)
++{
++ int m=0, n;
++ struct mb_cache *cache = NULL;
++
++ if(entry_size < sizeof(struct mb_cache_entry) +
++ indexes_count * sizeof(struct mb_cache_entry_index))
++ return NULL;
++
++ MOD_INC_USE_COUNT;
++ cache = kmalloc(sizeof(struct mb_cache) +
++ indexes_count * sizeof(struct list_head), GFP_KERNEL);
++ if (!cache)
++ goto fail;
++ cache->c_name = name;
++ cache->c_op.free = NULL;
++ if (cache_op)
++ cache->c_op.free = cache_op->free;
++ atomic_set(&cache->c_entry_count, 0);
++ cache->c_bucket_count = bucket_count;
++#ifdef MB_CACHE_INDEXES_COUNT
++ mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
++#else
++ cache->c_indexes_count = indexes_count;
++#endif
++ cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
++ GFP_KERNEL);
++ if (!cache->c_block_hash)
++ goto fail;
++ for (n=0; n<bucket_count; n++)
++ INIT_LIST_HEAD(&cache->c_block_hash[n]);
++ for (m=0; m<indexes_count; m++) {
++ cache->c_indexes_hash[m] = kmalloc(bucket_count *
++ sizeof(struct list_head),
++ GFP_KERNEL);
++ if (!cache->c_indexes_hash[m])
++ goto fail;
++ for (n=0; n<bucket_count; n++)
++ INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
++ }
++ cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
++ 0 /*SLAB_POISON | SLAB_RED_ZONE*/, NULL, NULL);
++ if (!cache->c_entry_cache)
++ goto fail;
++
++ spin_lock(&mb_cache_spinlock);
++ list_add(&cache->c_cache_list, &mb_cache_list);
++ spin_unlock(&mb_cache_spinlock);
++ return cache;
++
++fail:
++ if (cache) {
++ while (--m >= 0)
++ kfree(cache->c_indexes_hash[m]);
++ if (cache->c_block_hash)
++ kfree(cache->c_block_hash);
++ kfree(cache);
++ }
++ MOD_DEC_USE_COUNT;
++ return NULL;
++}
++
++
++/*
++ * mb_cache_shrink()
++ *
++ * Removes all cache entires of a device from the cache. All cache entries
++ * currently in use cannot be freed, and thus remain in the cache.
++ *
++ * @cache: which cache to shrink
++ * @dev: which device's cache entries to shrink
++ */
++void
++mb_cache_shrink(struct mb_cache *cache, kdev_t dev)
++{
++ LIST_HEAD(free_list);
++ struct list_head *l, *ltmp;
++
++ spin_lock(&mb_cache_spinlock);
++ list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
++ struct mb_cache_entry *ce =
++ list_entry(l, struct mb_cache_entry, e_lru_list);
++ if (ce->e_dev == dev) {
++ list_del(&ce->e_lru_list);
++ list_add_tail(&ce->e_lru_list, &free_list);
++ __mb_cache_entry_unhash(ce);
++ }
++ }
++ spin_unlock(&mb_cache_spinlock);
++ list_for_each_safe(l, ltmp, &free_list) {
++ __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
++ e_lru_list), GFP_KERNEL);
++ }
++}
++
++
++/*
++ * mb_cache_destroy()
++ *
++ * Shrinks the cache to its minimum possible size (hopefully 0 entries),
++ * and then destroys it. If this was the last mbcache, un-registers the
++ * mbcache from kernel memory management.
++ */
++void
++mb_cache_destroy(struct mb_cache *cache)
++{
++ LIST_HEAD(free_list);
++ struct list_head *l, *ltmp;
++ int n;
++
++ spin_lock(&mb_cache_spinlock);
++ list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
++ struct mb_cache_entry *ce =
++ list_entry(l, struct mb_cache_entry, e_lru_list);
++ if (ce->e_cache == cache) {
++ list_del(&ce->e_lru_list);
++ list_add_tail(&ce->e_lru_list, &free_list);
++ __mb_cache_entry_unhash(ce);
++ }
++ }
++ list_del(&cache->c_cache_list);
++ spin_unlock(&mb_cache_spinlock);
++ list_for_each_safe(l, ltmp, &free_list) {
++ __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
++ e_lru_list), GFP_KERNEL);
++ }
++
++ if (atomic_read(&cache->c_entry_count) > 0) {
++ mb_error("cache %s: %d orphaned entries",
++ cache->c_name,
++ atomic_read(&cache->c_entry_count));
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0))
++ /* We don't have kmem_cache_destroy() in 2.2.x */
++ kmem_cache_shrink(cache->c_entry_cache);
++#else
++ kmem_cache_destroy(cache->c_entry_cache);
++#endif
++ for (n=0; n < mb_cache_indexes(cache); n++)
++ kfree(cache->c_indexes_hash[n]);
++ kfree(cache->c_block_hash);
++ kfree(cache);
++
++ MOD_DEC_USE_COUNT;
++}
++
++
++/*
++ * mb_cache_entry_alloc()
++ *
++ * Allocates a new cache entry. The new entry will not be valid initially,
++ * and thus cannot be looked up yet. It should be filled with data, and
++ * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
++ * if no more memory was available.
++ */
++struct mb_cache_entry *
++mb_cache_entry_alloc(struct mb_cache *cache)
++{
++ struct mb_cache_entry *ce;
++
++ atomic_inc(&cache->c_entry_count);
++ ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
++ if (ce) {
++ INIT_LIST_HEAD(&ce->e_lru_list);
++ INIT_LIST_HEAD(&ce->e_block_list);
++ ce->e_cache = cache;
++ atomic_set(&ce->e_used, 1);
++ }
++ return ce;
++}
++
++
++/*
++ * mb_cache_entry_insert()
++ *
++ * Inserts an entry that was allocated using mb_cache_entry_alloc() into
++ * the cache. After this, the cache entry can be looked up, but is not yet
++ * in the lru list as the caller still holds a handle to it. Returns 0 on
++ * success, or -EBUSY if a cache entry for that device + inode exists
++ * already (this may happen after a failed lookup, if another process has
++ * inserted the same cache entry in the meantime).
++ *
++ * @dev: device the cache entry belongs to
++ * @block: block number
++ * @keys: array of additional keys. There must be indexes_count entries
++ * in the array (as specified when creating the cache).
++ */
++int
++mb_cache_entry_insert(struct mb_cache_entry *ce, kdev_t dev,
++ unsigned long block, unsigned int keys[])
++{
++ struct mb_cache *cache = ce->e_cache;
++ unsigned int bucket = (HASHDEV(dev) + block) % cache->c_bucket_count;
++ struct list_head *l;
++ int error = -EBUSY, n;
++
++ spin_lock(&mb_cache_spinlock);
++ list_for_each(l, &cache->c_block_hash[bucket]) {
++ struct mb_cache_entry *ce =
++ list_entry(l, struct mb_cache_entry, e_block_list);
++ if (ce->e_dev == dev && ce->e_block == block)
++ goto out;
++ }
++ __mb_cache_entry_unhash(ce);
++ ce->e_dev = dev;
++ ce->e_block = block;
++ list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
++ for (n=0; n<mb_cache_indexes(cache); n++) {
++ ce->e_indexes[n].o_key = keys[n];
++ bucket = keys[n] % cache->c_bucket_count;
++ list_add(&ce->e_indexes[n].o_list,
++ &cache->c_indexes_hash[n][bucket]);
++ }
++out:
++ spin_unlock(&mb_cache_spinlock);
++ return error;
++}
++
++
++/*
++ * mb_cache_entry_release()
++ *
++ * Release a handle to a cache entry. When the last handle to a cache entry
++ * is released it is either freed (if it is invalid) or otherwise inserted
++ * in to the lru list.
++ */
++void
++mb_cache_entry_release(struct mb_cache_entry *ce)
++{
++ spin_lock(&mb_cache_spinlock);
++ __mb_cache_entry_release_unlock(ce);
++}
++
++
++/*
++ * mb_cache_entry_takeout()
++ *
++ * Take a cache entry out of the cache, making it invalid. The entry can later
++ * be re-inserted using mb_cache_entry_insert(), or released using
++ * mb_cache_entry_release().
++ */
++void
++mb_cache_entry_takeout(struct mb_cache_entry *ce)
++{
++ spin_lock(&mb_cache_spinlock);
++ mb_assert(list_empty(&ce->e_lru_list));
++ __mb_cache_entry_unhash(ce);
++ spin_unlock(&mb_cache_spinlock);
++}
++
++
++/*
++ * mb_cache_entry_free()
++ *
++ * This is equivalent to the sequence mb_cache_entry_takeout() --
++ * mb_cache_entry_release().
++ */
++void
++mb_cache_entry_free(struct mb_cache_entry *ce)
++{
++ spin_lock(&mb_cache_spinlock);
++ mb_assert(list_empty(&ce->e_lru_list));
++ __mb_cache_entry_unhash(ce);
++ __mb_cache_entry_release_unlock(ce);
++}
++
++
++/*
++ * mb_cache_entry_dup()
++ *
++ * Duplicate a handle to a cache entry (does not duplicate the cache entry
++ * itself). After the call, both the old and the new handle must be released.
++ */
++struct mb_cache_entry *
++mb_cache_entry_dup(struct mb_cache_entry *ce)
++{
++ atomic_inc(&ce->e_used);
++ return ce;
++}
++
++
++/*
++ * mb_cache_entry_get()
++ *
++ * Get a cache entry by device / block number. (There can only be one entry
++ * in the cache per device and block.) Returns NULL if no such cache entry
++ * exists.
++ */
++struct mb_cache_entry *
++mb_cache_entry_get(struct mb_cache *cache, kdev_t dev, unsigned long block)
++{
++ unsigned int bucket = (HASHDEV(dev) + block) % cache->c_bucket_count;
++ struct list_head *l;
++ struct mb_cache_entry *ce;
++
++ spin_lock(&mb_cache_spinlock);
++ list_for_each(l, &cache->c_block_hash[bucket]) {
++ ce = list_entry(l, struct mb_cache_entry, e_block_list);
++ if (ce->e_dev == dev && ce->e_block == block) {
++ if (!list_empty(&ce->e_lru_list))
++ list_del_init(&ce->e_lru_list);
++ atomic_inc(&ce->e_used);
++ goto cleanup;
++ }
++ }
++ ce = NULL;
++
++cleanup:
++ spin_unlock(&mb_cache_spinlock);
++ return ce;
++}
++
++#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
++
++static struct mb_cache_entry *
++__mb_cache_entry_find(struct list_head *l, struct list_head *head,
++ int index, kdev_t dev, unsigned int key)
++{
++ while (l != head) {
++ struct mb_cache_entry *ce =
++ list_entry(l, struct mb_cache_entry,
++ e_indexes[index].o_list);
++ if (ce->e_dev == dev && ce->e_indexes[index].o_key == key) {
++ if (!list_empty(&ce->e_lru_list))
++ list_del_init(&ce->e_lru_list);
++ atomic_inc(&ce->e_used);
++ return ce;
++ }
++ l = l->next;
++ }
++ return NULL;
++}
++
++
++/*
++ * mb_cache_entry_find_first()
++ *
++ * Find the first cache entry on a given device with a certain key in
++ * an additional index. Additonal matches can be found with
++ * mb_cache_entry_find_next(). Returns NULL if no match was found.
++ *
++ * @cache: the cache to search
++ * @index: the number of the additonal index to search (0<=index<indexes_count)
++ * @dev: the device the cache entry should belong to
++ * @key: the key in the index
++ */
++struct mb_cache_entry *
++mb_cache_entry_find_first(struct mb_cache *cache, int index, kdev_t dev,
++ unsigned int key)
++{
++ unsigned int bucket = key % cache->c_bucket_count;
++ struct list_head *l;
++ struct mb_cache_entry *ce;
++
++ mb_assert(index < mb_cache_indexes(cache));
++ spin_lock(&mb_cache_spinlock);
++ l = cache->c_indexes_hash[index][bucket].next;
++ ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
++ index, dev, key);
++ spin_unlock(&mb_cache_spinlock);
++ return ce;
++}
++
++
++/*
++ * mb_cache_entry_find_next()
++ *
++ * Find the next cache entry on a given device with a certain key in an
++ * additional index. Returns NULL if no match could be found. The previous
++ * entry is atomatically released, so that mb_cache_entry_find_next() can
++ * be called like this:
++ *
++ * entry = mb_cache_entry_find_first();
++ * while (entry) {
++ * ...
++ * entry = mb_cache_entry_find_next(entry, ...);
++ * }
++ *
++ * @prev: The previous match
++ * @index: the number of the additonal index to search (0<=index<indexes_count)
++ * @dev: the device the cache entry should belong to
++ * @key: the key in the index
++ */
++struct mb_cache_entry *
++mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, kdev_t dev,
++ unsigned int key)
++{
++ struct mb_cache *cache = prev->e_cache;
++ unsigned int bucket = key % cache->c_bucket_count;
++ struct list_head *l;
++ struct mb_cache_entry *ce;
++
++ mb_assert(index < mb_cache_indexes(cache));
++ spin_lock(&mb_cache_spinlock);
++ l = prev->e_indexes[index].o_list.next;
++ ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
++ index, dev, key);
++ __mb_cache_entry_release_unlock(prev);
++ return ce;
++}
++
++#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
++
++static int __init init_mbcache(void)
++{
++ register_cache(&mb_cache_definition);
++ return 0;
++}
++
++static void __exit exit_mbcache(void)
++{
++ unregister_cache(&mb_cache_definition);
++}
++
++module_init(init_mbcache)
++module_exit(exit_mbcache)
++
+--- linux-2.4.20/include/asm-arm/unistd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/asm-arm/unistd.h Sat Apr 5 03:57:18 2003
+@@ -244,7 +244,6 @@
+ #define __NR_security (__NR_SYSCALL_BASE+223)
+ #define __NR_gettid (__NR_SYSCALL_BASE+224)
+ #define __NR_readahead (__NR_SYSCALL_BASE+225)
+-#if 0 /* allocated in 2.5 */
+ #define __NR_setxattr (__NR_SYSCALL_BASE+226)
+ #define __NR_lsetxattr (__NR_SYSCALL_BASE+227)
+ #define __NR_fsetxattr (__NR_SYSCALL_BASE+228)
+@@ -257,7 +256,6 @@
+ #define __NR_removexattr (__NR_SYSCALL_BASE+235)
+ #define __NR_lremovexattr (__NR_SYSCALL_BASE+236)
+ #define __NR_fremovexattr (__NR_SYSCALL_BASE+237)
+-#endif
+ #define __NR_tkill (__NR_SYSCALL_BASE+238)
+ /*
+ * Please check 2.5 _before_ adding calls here,
+--- linux-2.4.20/include/asm-ia64/unistd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/asm-ia64/unistd.h Sat Apr 5 03:57:18 2003
+@@ -206,8 +206,19 @@
+ #define __NR_getdents64 1214
+ #define __NR_getunwind 1215
+ #define __NR_readahead 1216
++#define __NR_setxattr 1217
++#define __NR_lsetxattr 1218
++#define __NR_fsetxattr 1219
++#define __NR_getxattr 1220
++#define __NR_lgetxattr 1221
++#define __NR_fgetxattr 1222
++#define __NR_listxattr 1223
++#define __NR_llistxattr 1224
++#define __NR_flistxattr 1225
++#define __NR_removexattr 1226
++#define __NR_lremovexattr 1227
++#define __NR_fremovexattr 1228
+ /*
+- * 1217-1228: reserved for xattr
+ * 1230-1232: reserved for futex and sched_[sg]etaffinity.
+ */
+ #define __NR_tkill 1229
+--- linux-2.4.20/include/asm-ppc64/unistd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/asm-ppc64/unistd.h Sat Apr 5 03:57:18 2003
+@@ -218,6 +218,7 @@
+ #define __NR_gettid 207
+ #if 0 /* Reserved syscalls */
+ #define __NR_tkill 208
++#endif
+ #define __NR_setxattr 209
+ #define __NR_lsetxattr 210
+ #define __NR_fsetxattr 211
+@@ -230,6 +231,7 @@
+ #define __NR_removexattr 218
+ #define __NR_lremovexattr 219
+ #define __NR_fremovexattr 220
++#if 0 /* Reserved syscalls */
+ #define __NR_futex 221
+ #endif
+
+--- linux-2.4.20/include/asm-s390/unistd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/asm-s390/unistd.h Sat Apr 5 03:57:18 2003
+@@ -212,9 +212,18 @@
+ #define __NR_madvise 219
+ #define __NR_getdents64 220
+ #define __NR_fcntl64 221
+-/*
+- * Numbers 224-235 are reserved for posix acl
+- */
++#define __NR_setxattr 224
++#define __NR_lsetxattr 225
++#define __NR_fsetxattr 226
++#define __NR_getxattr 227
++#define __NR_lgetxattr 228
++#define __NR_fgetxattr 229
++#define __NR_listxattr 230
++#define __NR_llistxattr 231
++#define __NR_flistxattr 232
++#define __NR_removexattr 233
++#define __NR_lremovexattr 234
++#define __NR_fremovexattr 235
+ #define __NR_gettid 236
+ #define __NR_tkill 237
+
+--- linux-2.4.20/include/asm-s390x/unistd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/asm-s390x/unistd.h Sat Apr 5 03:57:18 2003
+@@ -180,9 +180,18 @@
+ #define __NR_pivot_root 217
+ #define __NR_mincore 218
+ #define __NR_madvise 219
+-/*
+- * Numbers 224-235 are reserved for posix acl
+- */
++#define __NR_setxattr 224
++#define __NR_lsetxattr 225
++#define __NR_fsetxattr 226
++#define __NR_getxattr 227
++#define __NR_lgetxattr 228
++#define __NR_fgetxattr 229
++#define __NR_listxattr 230
++#define __NR_llistxattr 231
++#define __NR_flistxattr 232
++#define __NR_removexattr 233
++#define __NR_lremovexattr 234
++#define __NR_fremovexattr 235
+ #define __NR_gettid 236
+ #define __NR_tkill 237
+
+--- linux-2.4.20/include/asm-sparc/unistd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/asm-sparc/unistd.h Sat Apr 5 03:57:18 2003
+@@ -1,4 +1,4 @@
+-/* $Id: linux-2.4.20-xattr-0.8.54.patch,v 1.1.2.1 2003/04/05 11:14:59 braam Exp $ */
++/* $Id: linux-2.4.20-xattr-0.8.54.patch,v 1.1.2.1 2003/04/05 11:14:59 braam Exp $ */
+ #ifndef _SPARC_UNISTD_H
+ #define _SPARC_UNISTD_H
+
+@@ -184,24 +184,24 @@
+ /* #define __NR_exportfs 166 SunOS Specific */
+ #define __NR_mount 167 /* Common */
+ #define __NR_ustat 168 /* Common */
+-/* #define __NR_semsys 169 SunOS Specific */
+-/* #define __NR_msgsys 170 SunOS Specific */
+-/* #define __NR_shmsys 171 SunOS Specific */
+-/* #define __NR_auditsys 172 SunOS Specific */
+-/* #define __NR_rfssys 173 SunOS Specific */
++#define __NR_setxattr 169 /* SunOS: semsys */
++#define __NR_lsetxattr 170 /* SunOS: msgsys */
++#define __NR_fsetxattr 171 /* SunOS: shmsys */
++#define __NR_getxattr 172 /* SunOS: auditsys */
++#define __NR_lgetxattr 173 /* SunOS: rfssys */
+ #define __NR_getdents 174 /* Common */
+ #define __NR_setsid 175 /* Common */
+ #define __NR_fchdir 176 /* Common */
+-/* #define __NR_fchroot 177 SunOS Specific */
+-/* #define __NR_vpixsys 178 SunOS Specific */
+-/* #define __NR_aioread 179 SunOS Specific */
+-/* #define __NR_aiowrite 180 SunOS Specific */
+-/* #define __NR_aiowait 181 SunOS Specific */
+-/* #define __NR_aiocancel 182 SunOS Specific */
++#define __NR_fgetxattr 177 /* SunOS: fchroot */
++#define __NR_listxattr 178 /* SunOS: vpixsys */
++#define __NR_llistxattr 179 /* SunOS: aioread */
++#define __NR_flistxattr 180 /* SunOS: aiowrite */
++#define __NR_removexattr 181 /* SunOS: aiowait */
++#define __NR_lremovexattr 182 /* SunOS: aiocancel */
+ #define __NR_sigpending 183 /* Common */
+ #define __NR_query_module 184 /* Linux Specific */
+ #define __NR_setpgid 185 /* Common */
+-/* #define __NR_pathconf 186 SunOS Specific */
++#define __NR_fremovexattr 186 /* SunOS: pathconf */
+ #define __NR_tkill 187 /* SunOS: fpathconf */
+ /* #define __NR_sysconf 188 SunOS Specific */
+ #define __NR_uname 189 /* Linux Specific */
+--- linux-2.4.20/include/asm-sparc64/unistd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/asm-sparc64/unistd.h Sat Apr 5 03:57:18 2003
+@@ -184,24 +184,24 @@
+ /* #define __NR_exportfs 166 SunOS Specific */
+ #define __NR_mount 167 /* Common */
+ #define __NR_ustat 168 /* Common */
+-/* #define __NR_semsys 169 SunOS Specific */
+-/* #define __NR_msgsys 170 SunOS Specific */
+-/* #define __NR_shmsys 171 SunOS Specific */
+-/* #define __NR_auditsys 172 SunOS Specific */
+-/* #define __NR_rfssys 173 SunOS Specific */
++#define __NR_setxattr 169 /* SunOS: semsys */
++#define __NR_lsetxattr 170 /* SunOS: msgsys */
++#define __NR_fsetxattr 171 /* SunOS: shmsys */
++#define __NR_getxattr 172 /* SunOS: auditsys */
++#define __NR_lgetxattr 173 /* SunOS: rfssys */
+ #define __NR_getdents 174 /* Common */
+ #define __NR_setsid 175 /* Common */
+ #define __NR_fchdir 176 /* Common */
+-/* #define __NR_fchroot 177 SunOS Specific */
+-/* #define __NR_vpixsys 178 SunOS Specific */
+-/* #define __NR_aioread 179 SunOS Specific */
+-/* #define __NR_aiowrite 180 SunOS Specific */
+-/* #define __NR_aiowait 181 SunOS Specific */
+-/* #define __NR_aiocancel 182 SunOS Specific */
++#define __NR_fgetxattr 177 /* SunOS: fchroot */
++#define __NR_listxattr 178 /* SunOS: vpixsys */
++#define __NR_llistxattr 179 /* SunOS: aioread */
++#define __NR_flistxattr 180 /* SunOS: aiowrite */
++#define __NR_removexattr 181 /* SunOS: aiowait */
++#define __NR_lremovexattr 182 /* SunOS: aiocancel */
+ #define __NR_sigpending 183 /* Common */
+ #define __NR_query_module 184 /* Linux Specific */
+ #define __NR_setpgid 185 /* Common */
+-/* #define __NR_pathconf 186 SunOS Specific */
++#define __NR_fremovexattr 186 /* SunOS: pathconf */
+ #define __NR_tkill 187 /* SunOS: fpathconf */
+ /* #define __NR_sysconf 188 SunOS Specific */
+ #define __NR_uname 189 /* Linux Specific */
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/include/linux/cache_def.h Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,15 @@
++/*
++ * linux/cache_def.h
++ * Handling of caches defined in drivers, filesystems, ...
++ *
++ * Copyright (C) 2002 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
++ */
++
++struct cache_definition {
++ const char *name;
++ void (*shrink)(int, unsigned int);
++ struct list_head link;
++};
++
++extern void register_cache(struct cache_definition *);
++extern void unregister_cache(struct cache_definition *);
+--- linux-2.4.20/include/linux/errno.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/linux/errno.h Sat Apr 5 03:57:18 2003
+@@ -23,4 +23,8 @@
+
+ #endif
+
++/* Defined for extended attributes */
++#define ENOATTR ENODATA /* No such attribute */
++#define ENOTSUP EOPNOTSUPP /* Operation not supported */
++
+ #endif
+--- linux-2.4.20/include/linux/ext2_fs.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/linux/ext2_fs.h Sat Apr 5 03:57:18 2003
+@@ -57,8 +57,6 @@
+ */
+ #define EXT2_BAD_INO 1 /* Bad blocks inode */
+ #define EXT2_ROOT_INO 2 /* Root inode */
+-#define EXT2_ACL_IDX_INO 3 /* ACL inode */
+-#define EXT2_ACL_DATA_INO 4 /* ACL inode */
+ #define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */
+ #define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */
+
+@@ -86,7 +84,6 @@
+ #else
+ # define EXT2_BLOCK_SIZE(s) (EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size)
+ #endif
+-#define EXT2_ACLE_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_acl_entry))
+ #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
+ #ifdef __KERNEL__
+ # define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
+@@ -121,28 +118,6 @@
+ #endif
+
+ /*
+- * ACL structures
+- */
+-struct ext2_acl_header /* Header of Access Control Lists */
+-{
+- __u32 aclh_size;
+- __u32 aclh_file_count;
+- __u32 aclh_acle_count;
+- __u32 aclh_first_acle;
+-};
+-
+-struct ext2_acl_entry /* Access Control List Entry */
+-{
+- __u32 acle_size;
+- __u16 acle_perms; /* Access permissions */
+- __u16 acle_type; /* Type of entry */
+- __u16 acle_tag; /* User or group identity */
+- __u16 acle_pad1;
+- __u32 acle_next; /* Pointer on next entry for the */
+- /* same inode or on next free entry */
+-};
+-
+-/*
+ * Structure of a blocks group descriptor
+ */
+ struct ext2_group_desc
+@@ -314,6 +289,7 @@ struct ext2_inode {
+ #define EXT2_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
+ #define EXT2_MOUNT_MINIX_DF 0x0080 /* Mimics the Minix statfs */
+ #define EXT2_MOUNT_NO_UID32 0x0200 /* Disable 32-bit UIDs */
++#define EXT2_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */
+
+ #define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
+ #define set_opt(o, opt) o |= EXT2_MOUNT_##opt
+@@ -397,6 +373,7 @@ struct ext2_super_block {
+
+ #ifdef __KERNEL__
+ #define EXT2_SB(sb) (&((sb)->u.ext2_sb))
++#define EXT2_I(inode) (&((inode)->u.ext2_i))
+ #else
+ /* Assume that user mode programs are passing in an ext2fs superblock, not
+ * a kernel struct super_block. This will allow us to call the feature-test
+@@ -466,7 +443,7 @@ struct ext2_super_block {
+ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008
+ #define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff
+
+-#define EXT2_FEATURE_COMPAT_SUPP 0
++#define EXT2_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
+ #define EXT2_FEATURE_INCOMPAT_SUPP EXT2_FEATURE_INCOMPAT_FILETYPE
+ #define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+ EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
+@@ -623,8 +600,10 @@ extern struct address_space_operations e
+
+ /* namei.c */
+ extern struct inode_operations ext2_dir_inode_operations;
++extern struct inode_operations ext2_special_inode_operations;
+
+ /* symlink.c */
++extern struct inode_operations ext2_symlink_inode_operations;
+ extern struct inode_operations ext2_fast_symlink_inode_operations;
+
+ #endif /* __KERNEL__ */
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/include/linux/ext2_xattr.h Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,157 @@
++/*
++ File: linux/ext2_xattr.h
++
++ On-disk format of extended attributes for the ext2 filesystem.
++
++ (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
++*/
++
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/xattr.h>
++
++/* Magic value in attribute blocks */
++#define EXT2_XATTR_MAGIC 0xEA020000
++
++/* Maximum number of references to one attribute block */
++#define EXT2_XATTR_REFCOUNT_MAX 1024
++
++/* Name indexes */
++#define EXT2_XATTR_INDEX_MAX 10
++#define EXT2_XATTR_INDEX_USER 1
++#define EXT2_XATTR_INDEX_POSIX_ACL_ACCESS 2
++#define EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT 3
++
++struct ext2_xattr_header {
++ __u32 h_magic; /* magic number for identification */
++ __u32 h_refcount; /* reference count */
++ __u32 h_blocks; /* number of disk blocks used */
++ __u32 h_hash; /* hash value of all attributes */
++ __u32 h_reserved[4]; /* zero right now */
++};
++
++struct ext2_xattr_entry {
++ __u8 e_name_len; /* length of name */
++ __u8 e_name_index; /* attribute name index */
++ __u16 e_value_offs; /* offset in disk block of value */
++ __u32 e_value_block; /* disk block attribute is stored on (n/i) */
++ __u32 e_value_size; /* size of attribute value */
++ __u32 e_hash; /* hash value of name and value */
++ char e_name[0]; /* attribute name */
++};
++
++#define EXT2_XATTR_PAD_BITS 2
++#define EXT2_XATTR_PAD (1<<EXT2_XATTR_PAD_BITS)
++#define EXT2_XATTR_ROUND (EXT2_XATTR_PAD-1)
++#define EXT2_XATTR_LEN(name_len) \
++ (((name_len) + EXT2_XATTR_ROUND + \
++ sizeof(struct ext2_xattr_entry)) & ~EXT2_XATTR_ROUND)
++#define EXT2_XATTR_NEXT(entry) \
++ ( (struct ext2_xattr_entry *)( \
++ (char *)(entry) + EXT2_XATTR_LEN((entry)->e_name_len)) )
++#define EXT2_XATTR_SIZE(size) \
++ (((size) + EXT2_XATTR_ROUND) & ~EXT2_XATTR_ROUND)
++
++#ifdef __KERNEL__
++
++# ifdef CONFIG_EXT2_FS_XATTR
++
++struct ext2_xattr_handler {
++ char *prefix;
++ size_t (*list)(char *list, struct inode *inode, const char *name,
++ int name_len);
++ int (*get)(struct inode *inode, const char *name, void *buffer,
++ size_t size);
++ int (*set)(struct inode *inode, const char *name, const void *buffer,
++ size_t size, int flags);
++};
++
++extern int ext2_xattr_register(int, struct ext2_xattr_handler *);
++extern void ext2_xattr_unregister(int, struct ext2_xattr_handler *);
++
++extern int ext2_setxattr(struct dentry *, const char *, const void *, size_t, int);
++extern ssize_t ext2_getxattr(struct dentry *, const char *, void *, size_t);
++extern ssize_t ext2_listxattr(struct dentry *, char *, size_t);
++extern int ext2_removexattr(struct dentry *, const char *);
++
++extern int ext2_xattr_get(struct inode *, int, const char *, void *, size_t);
++extern int ext2_xattr_list(struct inode *, char *, size_t);
++extern int ext2_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
++
++extern void ext2_xattr_delete_inode(struct inode *);
++extern void ext2_xattr_put_super(struct super_block *);
++
++extern int init_ext2_xattr(void) __init;
++extern void exit_ext2_xattr(void);
++
++# else /* CONFIG_EXT2_FS_XATTR */
++# define ext2_setxattr NULL
++# define ext2_getxattr NULL
++# define ext2_listxattr NULL
++# define ext2_removexattr NULL
++
++static inline int
++ext2_xattr_get(struct inode *inode, int name_index,
++ const char *name, void *buffer, size_t size)
++{
++ return -ENOTSUP;
++}
++
++static inline int
++ext2_xattr_list(struct inode *inode, char *buffer, size_t size)
++{
++ return -ENOTSUP;
++}
++
++static inline int
++ext2_xattr_set(struct inode *inode, int name_index, const char *name,
++ const void *value, size_t size, int flags)
++{
++ return -ENOTSUP;
++}
++
++static inline void
++ext2_xattr_delete_inode(struct inode *inode)
++{
++}
++
++static inline void
++ext2_xattr_put_super(struct super_block *sb)
++{
++}
++
++static inline int
++init_ext2_xattr(void)
++{
++ return 0;
++}
++
++static inline void
++exit_ext2_xattr(void)
++{
++}
++
++# endif /* CONFIG_EXT2_FS_XATTR */
++
++# ifdef CONFIG_EXT2_FS_XATTR_USER
++
++extern int init_ext2_xattr_user(void) __init;
++extern void exit_ext2_xattr_user(void);
++
++# else /* CONFIG_EXT2_FS_XATTR_USER */
++
++static inline int
++init_ext2_xattr_user(void)
++{
++ return 0;
++}
++
++static inline void
++exit_ext2_xattr_user(void)
++{
++}
++
++# endif /* CONFIG_EXT2_FS_XATTR_USER */
++
++#endif /* __KERNEL__ */
++
+--- linux-2.4.20/include/linux/ext3_fs.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/linux/ext3_fs.h Sat Apr 5 03:57:18 2003
+@@ -63,8 +63,6 @@
+ */
+ #define EXT3_BAD_INO 1 /* Bad blocks inode */
+ #define EXT3_ROOT_INO 2 /* Root inode */
+-#define EXT3_ACL_IDX_INO 3 /* ACL inode */
+-#define EXT3_ACL_DATA_INO 4 /* ACL inode */
+ #define EXT3_BOOT_LOADER_INO 5 /* Boot loader inode */
+ #define EXT3_UNDEL_DIR_INO 6 /* Undelete directory inode */
+ #define EXT3_RESIZE_INO 7 /* Reserved group descriptors inode */
+@@ -94,7 +92,6 @@
+ #else
+ # define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
+ #endif
+-#define EXT3_ACLE_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_acl_entry))
+ #define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
+ #ifdef __KERNEL__
+ # define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
+@@ -129,28 +126,6 @@
+ #endif
+
+ /*
+- * ACL structures
+- */
+-struct ext3_acl_header /* Header of Access Control Lists */
+-{
+- __u32 aclh_size;
+- __u32 aclh_file_count;
+- __u32 aclh_acle_count;
+- __u32 aclh_first_acle;
+-};
+-
+-struct ext3_acl_entry /* Access Control List Entry */
+-{
+- __u32 acle_size;
+- __u16 acle_perms; /* Access permissions */
+- __u16 acle_type; /* Type of entry */
+- __u16 acle_tag; /* User or group identity */
+- __u16 acle_pad1;
+- __u32 acle_next; /* Pointer on next entry for the */
+- /* same inode or on next free entry */
+-};
+-
+-/*
+ * Structure of a blocks group descriptor
+ */
+ struct ext3_group_desc
+@@ -344,6 +319,7 @@ struct ext3_inode {
+ #define EXT3_MOUNT_WRITEBACK_DATA 0x0C00 /* No data ordering */
+ #define EXT3_MOUNT_UPDATE_JOURNAL 0x1000 /* Update the journal format */
+ #define EXT3_MOUNT_NO_UID32 0x2000 /* Disable 32-bit UIDs */
++#define EXT3_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */
+
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef _LINUX_EXT2_FS_H
+@@ -520,7 +496,7 @@ struct ext3_super_block {
+ #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
+ #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
+
+-#define EXT3_FEATURE_COMPAT_SUPP 0
++#define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
+ #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
+ EXT3_FEATURE_INCOMPAT_RECOVER)
+ #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+@@ -703,6 +679,7 @@ extern void ext3_check_inodes_bitmap (st
+ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
+
+ /* inode.c */
++extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
+ extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+ extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+
+@@ -771,8 +748,10 @@ extern struct address_space_operations e
+
+ /* namei.c */
+ extern struct inode_operations ext3_dir_inode_operations;
++extern struct inode_operations ext3_special_inode_operations;
+
+ /* symlink.c */
++extern struct inode_operations ext3_symlink_inode_operations;
+ extern struct inode_operations ext3_fast_symlink_inode_operations;
+
+
+--- linux-2.4.20/include/linux/ext3_jbd.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/linux/ext3_jbd.h Sat Apr 5 03:57:18 2003
+@@ -30,13 +30,19 @@
+
+ #define EXT3_SINGLEDATA_TRANS_BLOCKS 8
+
++/* Extended attributes may touch two data buffers, two bitmap buffers,
++ * and two group and summaries. */
++
++#define EXT3_XATTR_TRANS_BLOCKS 8
++
+ /* Define the minimum size for a transaction which modifies data. This
+ * needs to take into account the fact that we may end up modifying two
+ * quota files too (one for the group, one for the user quota). The
+ * superblock only gets updated once, of course, so don't bother
+ * counting that again for the quota updates. */
+
+-#define EXT3_DATA_TRANS_BLOCKS (3 * EXT3_SINGLEDATA_TRANS_BLOCKS - 2)
++#define EXT3_DATA_TRANS_BLOCKS (3 * EXT3_SINGLEDATA_TRANS_BLOCKS + \
++ EXT3_XATTR_TRANS_BLOCKS - 2)
+
+ extern int ext3_writepage_trans_blocks(struct inode *inode);
+
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/include/linux/ext3_xattr.h Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,157 @@
++/*
++ File: linux/ext3_xattr.h
++
++ On-disk format of extended attributes for the ext3 filesystem.
++
++ (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
++*/
++
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/xattr.h>
++
++/* Magic value in attribute blocks */
++#define EXT3_XATTR_MAGIC 0xEA020000
++
++/* Maximum number of references to one attribute block */
++#define EXT3_XATTR_REFCOUNT_MAX 1024
++
++/* Name indexes */
++#define EXT3_XATTR_INDEX_MAX 10
++#define EXT3_XATTR_INDEX_USER 1
++#define EXT3_XATTR_INDEX_POSIX_ACL_ACCESS 2
++#define EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT 3
++
++struct ext3_xattr_header {
++ __u32 h_magic; /* magic number for identification */
++ __u32 h_refcount; /* reference count */
++ __u32 h_blocks; /* number of disk blocks used */
++ __u32 h_hash; /* hash value of all attributes */
++ __u32 h_reserved[4]; /* zero right now */
++};
++
++struct ext3_xattr_entry {
++ __u8 e_name_len; /* length of name */
++ __u8 e_name_index; /* attribute name index */
++ __u16 e_value_offs; /* offset in disk block of value */
++ __u32 e_value_block; /* disk block attribute is stored on (n/i) */
++ __u32 e_value_size; /* size of attribute value */
++ __u32 e_hash; /* hash value of name and value */
++ char e_name[0]; /* attribute name */
++};
++
++#define EXT3_XATTR_PAD_BITS 2
++#define EXT3_XATTR_PAD (1<<EXT3_XATTR_PAD_BITS)
++#define EXT3_XATTR_ROUND (EXT3_XATTR_PAD-1)
++#define EXT3_XATTR_LEN(name_len) \
++ (((name_len) + EXT3_XATTR_ROUND + \
++ sizeof(struct ext3_xattr_entry)) & ~EXT3_XATTR_ROUND)
++#define EXT3_XATTR_NEXT(entry) \
++ ( (struct ext3_xattr_entry *)( \
++ (char *)(entry) + EXT3_XATTR_LEN((entry)->e_name_len)) )
++#define EXT3_XATTR_SIZE(size) \
++ (((size) + EXT3_XATTR_ROUND) & ~EXT3_XATTR_ROUND)
++
++#ifdef __KERNEL__
++
++# ifdef CONFIG_EXT3_FS_XATTR
++
++struct ext3_xattr_handler {
++ char *prefix;
++ size_t (*list)(char *list, struct inode *inode, const char *name,
++ int name_len);
++ int (*get)(struct inode *inode, const char *name, void *buffer,
++ size_t size);
++ int (*set)(struct inode *inode, const char *name, const void *buffer,
++ size_t size, int flags);
++};
++
++extern int ext3_xattr_register(int, struct ext3_xattr_handler *);
++extern void ext3_xattr_unregister(int, struct ext3_xattr_handler *);
++
++extern int ext3_setxattr(struct dentry *, const char *, const void *, size_t, int);
++extern ssize_t ext3_getxattr(struct dentry *, const char *, void *, size_t);
++extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
++extern int ext3_removexattr(struct dentry *, const char *);
++
++extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
++extern int ext3_xattr_list(struct inode *, char *, size_t);
++extern int ext3_xattr_set(handle_t *handle, struct inode *, int, const char *, const void *, size_t, int);
++
++extern void ext3_xattr_delete_inode(handle_t *, struct inode *);
++extern void ext3_xattr_put_super(struct super_block *);
++
++extern int init_ext3_xattr(void) __init;
++extern void exit_ext3_xattr(void);
++
++# else /* CONFIG_EXT3_FS_XATTR */
++# define ext3_setxattr NULL
++# define ext3_getxattr NULL
++# define ext3_listxattr NULL
++# define ext3_removexattr NULL
++
++static inline int
++ext3_xattr_get(struct inode *inode, int name_index, const char *name,
++ void *buffer, size_t size)
++{
++ return -ENOTSUP;
++}
++
++static inline int
++ext3_xattr_list(struct inode *inode, void *buffer, size_t size)
++{
++ return -ENOTSUP;
++}
++
++static inline int
++ext3_xattr_set(handle_t *handle, struct inode *inode, int name_index,
++ const char *name, const void *value, size_t size, int flags)
++{
++ return -ENOTSUP;
++}
++
++static inline void
++ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
++{
++}
++
++static inline void
++ext3_xattr_put_super(struct super_block *sb)
++{
++}
++
++static inline int
++init_ext3_xattr(void)
++{
++ return 0;
++}
++
++static inline void
++exit_ext3_xattr(void)
++{
++}
++
++# endif /* CONFIG_EXT3_FS_XATTR */
++
++# ifdef CONFIG_EXT3_FS_XATTR_USER
++
++extern int init_ext3_xattr_user(void) __init;
++extern void exit_ext3_xattr_user(void);
++
++# else /* CONFIG_EXT3_FS_XATTR_USER */
++
++static inline int
++init_ext3_xattr_user(void)
++{
++ return 0;
++}
++
++static inline void
++exit_ext3_xattr_user(void)
++{
++}
++
++#endif /* CONFIG_EXT3_FS_XATTR_USER */
++
++#endif /* __KERNEL__ */
++
+--- linux-2.4.20/include/linux/fs.h~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/include/linux/fs.h Sat Apr 5 03:57:18 2003
+@@ -888,7 +888,7 @@ struct inode_operations {
+ int (*setattr) (struct dentry *, struct iattr *);
+ int (*setattr_raw) (struct inode *, struct iattr *);
+ int (*getattr) (struct dentry *, struct iattr *);
+- int (*setxattr) (struct dentry *, const char *, void *, size_t, int);
++ int (*setxattr) (struct dentry *, const char *, const void *, size_t, int);
+ ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+ ssize_t (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, const char *);
+--- /dev/null Fri Aug 30 17:31:37 2002
++++ linux-2.4.20-braam/include/linux/mbcache.h Sat Apr 5 03:57:18 2003
+@@ -0,0 +1,69 @@
++/*
++ File: linux/mbcache.h
++
++ (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
++*/
++
++/* Hardwire the number of additional indexes */
++#define MB_CACHE_INDEXES_COUNT 1
++
++struct mb_cache_entry;
++
++struct mb_cache_op {
++ int (*free)(struct mb_cache_entry *, int);
++};
++
++struct mb_cache {
++ struct list_head c_cache_list;
++ const char *c_name;
++ struct mb_cache_op c_op;
++ atomic_t c_entry_count;
++ int c_bucket_count;
++#ifndef MB_CACHE_INDEXES_COUNT
++ int c_indexes_count;
++#endif
++ kmem_cache_t *c_entry_cache;
++ struct list_head *c_block_hash;
++ struct list_head *c_indexes_hash[0];
++};
++
++struct mb_cache_entry_index {
++ struct list_head o_list;
++ unsigned int o_key;
++};
++
++struct mb_cache_entry {
++ struct list_head e_lru_list;
++ struct mb_cache *e_cache;
++ atomic_t e_used;
++ kdev_t e_dev;
++ unsigned long e_block;
++ struct list_head e_block_list;
++ struct mb_cache_entry_index e_indexes[0];
++};
++
++/* Functions on caches */
++
++struct mb_cache * mb_cache_create(const char *, struct mb_cache_op *, size_t,
++ int, int);
++void mb_cache_shrink(struct mb_cache *, kdev_t);
++void mb_cache_destroy(struct mb_cache *);
++
++/* Functions on cache entries */
++
++struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *);
++int mb_cache_entry_insert(struct mb_cache_entry *, kdev_t, unsigned long,
++ unsigned int[]);
++void mb_cache_entry_rehash(struct mb_cache_entry *, unsigned int[]);
++void mb_cache_entry_release(struct mb_cache_entry *);
++void mb_cache_entry_takeout(struct mb_cache_entry *);
++void mb_cache_entry_free(struct mb_cache_entry *);
++struct mb_cache_entry *mb_cache_entry_dup(struct mb_cache_entry *);
++struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *, kdev_t,
++ unsigned long);
++#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
++struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, int,
++ kdev_t, unsigned int);
++struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, int,
++ kdev_t, unsigned int);
++#endif
+--- linux-2.4.20/kernel/ksyms.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/kernel/ksyms.c Sat Apr 5 03:57:18 2003
+@@ -11,6 +11,7 @@
+
+ #include <linux/config.h>
+ #include <linux/slab.h>
++#include <linux/cache_def.h>
+ #include <linux/module.h>
+ #include <linux/blkdev.h>
+ #include <linux/cdrom.h>
+@@ -89,6 +90,7 @@ EXPORT_SYMBOL(exit_mm);
+ EXPORT_SYMBOL(exit_files);
+ EXPORT_SYMBOL(exit_fs);
+ EXPORT_SYMBOL(exit_sighand);
++EXPORT_SYMBOL(copy_fs_struct);
+
+ /* internal kernel memory management */
+ EXPORT_SYMBOL(_alloc_pages);
+@@ -107,6 +109,8 @@ EXPORT_SYMBOL(kmem_cache_validate);
+ EXPORT_SYMBOL(kmem_cache_alloc);
+ EXPORT_SYMBOL(kmem_cache_free);
+ EXPORT_SYMBOL(kmem_cache_size);
++EXPORT_SYMBOL(register_cache);
++EXPORT_SYMBOL(unregister_cache);
+ EXPORT_SYMBOL(kmalloc);
+ EXPORT_SYMBOL(kfree);
+ EXPORT_SYMBOL(vfree);
+--- linux-2.4.20/mm/vmscan.c~linux-2.4.20-xattr-0.8.54 Sat Apr 5 03:57:18 2003
++++ linux-2.4.20-braam/mm/vmscan.c Sat Apr 5 03:57:18 2003
+@@ -18,6 +18,7 @@
+ #include <linux/kernel_stat.h>
+ #include <linux/swap.h>
+ #include <linux/swapctl.h>
++#include <linux/cache_def.h>
+ #include <linux/smp_lock.h>
+ #include <linux/pagemap.h>
+ #include <linux/init.h>
+@@ -34,6 +35,39 @@
+ */
+ #define DEF_PRIORITY (6)
+
++static DECLARE_MUTEX(other_caches_sem);
++static LIST_HEAD(cache_definitions);
++
++void register_cache(struct cache_definition *cache)
++{
++ down(&other_caches_sem);
++ list_add(&cache->link, &cache_definitions);
++ up(&other_caches_sem);
++}
++
++void unregister_cache(struct cache_definition *cache)
++{
++ down(&other_caches_sem);
++ list_del(&cache->link);
++ up(&other_caches_sem);
++}
++
++static void shrink_other_caches(unsigned int priority, int gfp_mask)
++{
++ struct list_head *p;
++
++ if (down_trylock(&other_caches_sem))
++ return;
++
++ list_for_each_prev(p, &cache_definitions) {
++ struct cache_definition *cache =
++ list_entry(p, struct cache_definition, link);
++
++ cache->shrink(priority, gfp_mask);
++ }
++ up(&other_caches_sem);
++}
++
+ /*
+ * The swap-out function returns 1 if it successfully
+ * scanned all the pages it was asked to (`count').
+@@ -577,6 +611,7 @@ static int shrink_caches(zone_t * classz
+
+ shrink_dcache_memory(priority, gfp_mask);
+ shrink_icache_memory(priority, gfp_mask);
++ shrink_other_caches(priority, gfp_mask);
+ #ifdef CONFIG_QUOTA
+ shrink_dqcache_memory(DEF_PRIORITY, gfp_mask);
+ #endif
+
+_
--- /dev/null
+diff -ru lum-2.4.18-um30/fs/ext3/balloc.c uml-2.4.18-12.5/fs/ext3/balloc.c
+--- lum-2.4.18-um30/fs/ext3/balloc.c Mon Feb 25 12:38:08 2002
++++ uml-2.4.18-12.5/fs/ext3/balloc.c Thu Sep 19 13:40:11 2002
+@@ -276,7 +276,8 @@
+ }
+ lock_super (sb);
+ es = sb->u.ext3_sb.s_es;
+- if (block < le32_to_cpu(es->s_first_data_block) ||
++ if (block < le32_to_cpu(es->s_first_data_block) ||
++ block + count < block ||
+ (block + count) > le32_to_cpu(es->s_blocks_count)) {
+ ext3_error (sb, "ext3_free_blocks",
+ "Freeing blocks not in datazone - "
+@@ -309,17 +310,6 @@
+ if (!gdp)
+ goto error_return;
+
+- if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
+- in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
+- in_range (block, le32_to_cpu(gdp->bg_inode_table),
+- sb->u.ext3_sb.s_itb_per_group) ||
+- in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
+- sb->u.ext3_sb.s_itb_per_group))
+- ext3_error (sb, "ext3_free_blocks",
+- "Freeing blocks in system zones - "
+- "Block = %lu, count = %lu",
+- block, count);
+-
+ /*
+ * We are about to start releasing blocks in the bitmap,
+ * so we need undo access.
+@@ -345,14 +335,24 @@
+ if (err)
+ goto error_return;
+
+- for (i = 0; i < count; i++) {
++ for (i = 0; i < count; i++, block++) {
++ if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
++ block == le32_to_cpu(gdp->bg_inode_bitmap) ||
++ in_range(block, le32_to_cpu(gdp->bg_inode_table),
++ sb->u.ext2_sb.s_itb_per_group)) {
++ ext3_error(sb, __FUNCTION__,
++ "Freeing block in system zone - block = %lu",
++ block);
++ continue;
++ }
++
+ /*
+ * An HJ special. This is expensive...
+ */
+ #ifdef CONFIG_JBD_DEBUG
+ {
+ struct buffer_head *debug_bh;
+- debug_bh = sb_get_hash_table(sb, block + i);
++ debug_bh = sb_get_hash_table(sb, block);
+ if (debug_bh) {
+ BUFFER_TRACE(debug_bh, "Deleted!");
+ if (!bh2jh(bitmap_bh)->b_committed_data)
+@@ -365,9 +365,8 @@
+ #endif
+ BUFFER_TRACE(bitmap_bh, "clear bit");
+ if (!ext3_clear_bit (bit + i, bitmap_bh->b_data)) {
+- ext3_error (sb, __FUNCTION__,
+- "bit already cleared for block %lu",
+- block + i);
++ ext3_error(sb, __FUNCTION__,
++ "bit already cleared for block %lu", block);
+ BUFFER_TRACE(bitmap_bh, "bit already cleared");
+ } else {
+ dquot_freed_blocks++;
+@@ -415,7 +417,6 @@
+ if (!err) err = ret;
+
+ if (overflow && !err) {
+- block += count;
+ count = overflow;
+ goto do_more;
+ }
+@@ -575,6 +577,7 @@
+
+ ext3_debug ("goal=%lu.\n", goal);
+
++repeat:
+ /*
+ * First, test whether the goal block is free.
+ */
+@@ -684,10 +686,21 @@
+ if (tmp == le32_to_cpu(gdp->bg_block_bitmap) ||
+ tmp == le32_to_cpu(gdp->bg_inode_bitmap) ||
+ in_range (tmp, le32_to_cpu(gdp->bg_inode_table),
+- sb->u.ext3_sb.s_itb_per_group))
+- ext3_error (sb, "ext3_new_block",
+- "Allocating block in system zone - "
+- "block = %u", tmp);
++ EXT3_SB(sb)->s_itb_per_group)) {
++ ext3_error(sb, __FUNCTION__,
++ "Allocating block in system zone - block = %u", tmp);
++
++ /* Note: This will potentially use up one of the handle's
++ * buffer credits. Normally we have way too many credits,
++ * so that is OK. In _very_ rare cases it might not be OK.
++ * We will trigger an assertion if we run out of credits,
++ * and we will have to do a full fsck of the filesystem -
++ * better than randomly corrupting filesystem metadata.
++ */
++ ext3_set_bit(j, bh->b_data);
++ goto repeat;
++ }
++
+
+ /* The superblock lock should guard against anybody else beating
+ * us to this point! */
+diff -ru lum-2.4.18-um30/fs/ext3/namei.c uml-2.4.18-12.5/fs/ext3/namei.c
+--- lum-2.4.18-um30/fs/ext3/namei.c Fri Nov 9 15:25:04 2001
++++ uml-2.4.18-12.5/fs/ext3/namei.c Thu Sep 19 13:40:11 2002
+@@ -354,8 +355,8 @@
+ */
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ dir->u.ext3_i.i_flags &= ~EXT3_INDEX_FL;
+- ext3_mark_inode_dirty(handle, dir);
+ dir->i_version = ++event;
++ ext3_mark_inode_dirty(handle, dir);
+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+ ext3_journal_dirty_metadata(handle, bh);
+ brelse(bh);
+@@ -464,8 +465,8 @@
+ inode->i_op = &ext3_file_inode_operations;
+ inode->i_fop = &ext3_file_operations;
+ inode->i_mapping->a_ops = &ext3_aops;
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ }
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -489,8 +490,8 @@
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, mode, rdev);
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ }
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -933,8 +934,8 @@
+ inode->i_size = l-1;
+ }
+ inode->u.ext3_i.i_disksize = inode->i_size;
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ out_stop:
+ ext3_journal_stop(handle, dir);
+ return err;
+@@ -970,8 +971,8 @@
+ ext3_inc_count(handle, inode);
+ atomic_inc(&inode->i_count);
+
+- ext3_mark_inode_dirty(handle, inode);
+ err = ext3_add_nondir(handle, dentry, inode);
++ ext3_mark_inode_dirty(handle, inode);
+ ext3_journal_stop(handle, dir);
+ return err;
+ }
--- /dev/null
+fs/ext3/Makefile
+fs/ext3/dir.c
+fs/ext3/file.c
+fs/ext3/hash.c
+fs/ext3/namei.c
+fs/ext3/super.c
+include/linux/ext3_fs.h
+include/linux/ext3_fs_sb.h
+include/linux/ext3_jbd.h
+include/linux/rbtree.h
+lib/rbtree.c
--- /dev/null
+fs/ext3/namei.c
--- /dev/null
+fs/ext3/dir.c
+fs/ext3/namei.c
+include/linux/ext3_fs.h
--- /dev/null
+fs/ext3/namei.c
--- /dev/null
+fs/ext3/ialloc.c
+fs/ext3/namei.c
+include/linux/ext3_fs.h
--- /dev/null
+fs/ext3/balloc.c
+fs/ext3/file.c
+fs/ext3/fsync.c
+fs/ext3/ialloc.c
+fs/ext3/inode.c
+fs/ext3/namei.c
+fs/ext3/super.c
--- /dev/null
+fs/ext3/balloc.c
+fs/ext3/dir.c
+fs/ext3/ialloc.c
+fs/ext3/inode.c
+fs/ext3/ioctl.c
+fs/ext3/namei.c
+fs/ext3/super.c
+fs/ext3/symlink.c
+include/linux/ext3_fs.h
+include/linux/ext3_jbd.h
--- /dev/null
+fs/ext3/ialloc.c
+fs/ext3/inode.c
+include/linux/ext3_fs.h
--- /dev/null
+fs/ext3/inode.c
--- /dev/null
+fs/ext3/namei.c
+fs/ext3/super.c
+include/linux/ext3_fs_sb.h
--- /dev/null
+fs/ext3/inode.c
--- /dev/null
+fs/ext3/super.c
--- /dev/null
+fs/ext3/namei.c
--- /dev/null
+fs/ext3/super.c
--- /dev/null
+include/linux/ext3_fs.h
--- /dev/null
+include/linux/ext3_fs.h
+include/linux/ext3_fs_sb.h
+fs/ext3/super.c
--- /dev/null
+fs/ext3/namei.c
+fs/ext3/inode.c
--- /dev/null
+fs/ext3/super.c
--- /dev/null
+fs/ext3/ialloc.c
+fs/ext3/inode.c
+include/linux/ext3_fs.h
--- /dev/null
+fs/ext3/inode.c
--- /dev/null
+fs/ext3/namei.c
+fs/ext3/ialloc.c
+fs/ext3/ioctl.c
+include/linux/ext3_fs.h
--- /dev/null
+fs/ext3/super.c
+fs/ext3/namei.c
+include/linux/ext3_fs.h
+include/linux/ext3_jbd.h
--- /dev/null
+fs/ext3/ialloc.c
+fs/ext3/inode.c
+fs/ext3/namei.c
+fs/ext3/super.c
+fs/ext3/xattr.c
+include/linux/ext3_fs.h
+include/linux/ext3_jbd.h
+include/linux/ext3_xattr.h
+include/linux/xattr.h
--- /dev/null
+Documentation/Configure.help
+arch/alpha/defconfig
+arch/alpha/kernel/entry.S
+arch/arm/defconfig
+arch/arm/kernel/calls.S
+arch/i386/defconfig
+arch/ia64/defconfig
+arch/ia64/kernel/entry.S
+arch/m68k/defconfig
+arch/mips/defconfig
+arch/mips64/defconfig
+arch/ppc/defconfig
+arch/ppc64/kernel/misc.S
+arch/s390/defconfig
+arch/s390/kernel/entry.S
+arch/s390x/defconfig
+arch/s390x/kernel/entry.S
+arch/s390x/kernel/wrapper32.S
+arch/sparc/defconfig
+arch/sparc/kernel/systbls.S
+arch/sparc64/defconfig
+arch/sparc64/kernel/systbls.S
+fs/Config.in
+fs/Makefile
+fs/ext2/Makefile
+fs/ext2/file.c
+fs/ext2/ialloc.c
+fs/ext2/inode.c
+fs/ext2/namei.c
+fs/ext2/super.c
+fs/ext2/symlink.c
+fs/ext2/xattr.c
+fs/ext2/xattr_user.c
+fs/ext3/Makefile
+fs/ext3/file.c
+fs/ext3/ialloc.c
+fs/ext3/inode.c
+fs/ext3/namei.c
+fs/ext3/super.c
+fs/ext3/symlink.c
+fs/ext3/xattr.c
+fs/ext3/xattr_user.c
+fs/jfs/jfs_xattr.h
+fs/jfs/xattr.c
+fs/mbcache.c
+include/asm-arm/unistd.h
+include/asm-ia64/unistd.h
+include/asm-ppc64/unistd.h
+include/asm-s390/unistd.h
+include/asm-s390x/unistd.h
+include/asm-sparc/unistd.h
+include/asm-sparc64/unistd.h
+include/linux/cache_def.h
+include/linux/errno.h
+include/linux/ext2_fs.h
+include/linux/ext2_xattr.h
+include/linux/ext3_fs.h
+include/linux/ext3_jbd.h
+include/linux/ext3_xattr.h
+include/linux/fs.h
+include/linux/mbcache.h
+kernel/ksyms.c
+mm/vmscan.c
iod-stock-24-exports.patch
uml_check_get_page.patch
uml_no_panic.patch
+ext-2.4-patch-1.patch
+ext-2.4-patch-2.patch
+ext-2.4-patch-3.patch
+ext-2.4-patch-4.patch
+linux-2.4.20-xattr-0.8.54.patch
+extN-san.patch