Whamcloud - gitweb
First cut at uml40 extN patch.
authorgord-fig <gord-fig>
Wed, 25 Sep 2002 04:06:49 +0000 (04:06 +0000)
committergord-fig <gord-fig>
Wed, 25 Sep 2002 04:06:49 +0000 (04:06 +0000)
lustre/extN/Makefile.am
lustre/extN/extN.patch-2.4.18-40um [new file with mode: 0644]

index 1408be4..2fba3fb 100644 (file)
@@ -39,7 +39,8 @@ include $(top_srcdir)/Rules
 # set works for nearly everybody.  This is mainly for damage control.
 diff:
        $(RM) extN.patchT
-       for f in $(EXTNC); do diff -u extN.orig/$$f $$f >> extN.patchT ; done
+       -for f in $(EXTNC); do (cd $(top_srcdir) && diff -u extN/extN.orig/$$f extN/$$f) >> extN.patchT; done
+       -for f in $(EXTNI); do (cd $(top_srcdir) && diff -u extN/extN.orig-include/$$f include/linux/$$f) >> extN.patchT; done
        mv -f extN.patchT $(srcdir)/extN.patch-$(RELEASE)
        f=extN.patch-$(RELEASE); if cvs update $$f 2>&1 | grep 'cvs add' >/dev/null; then \
          cvs add $$f; \
@@ -75,7 +76,7 @@ patch-stamp: sed-stamp $(EXTNP)
        test -e $(top_srcdir)/fs || ln -s . $(top_srcdir)/fs
        set -vx;                                                              \
        if [ -f $(srcdir)/extN.patch-$(RELEASE) ]; then                       \
-               (cd $(srcdir) && patch -p0) < $(srcdir)/extN.patch-$(RELEASE);                  \
+               (cd $(top_srcdir) && patch -p0) < $(srcdir)/extN.patch-$(RELEASE);                  \
        else                                                                  \
                echo "If first patch fails, read NOTE in extN/Makefile.am";   \
                list='$(EXTNP)'; for p in $$list; do                          \
diff --git a/lustre/extN/extN.patch-2.4.18-40um b/lustre/extN/extN.patch-2.4.18-40um
new file mode 100644 (file)
index 0000000..7542b8c
--- /dev/null
@@ -0,0 +1,3042 @@
+--- extN/extN.orig/balloc.c    Tue Sep 24 15:41:40 2002
++++ extN/balloc.c      Tue Sep 24 22:00:43 2002
+@@ -46,18 +46,18 @@
+       unsigned long desc;
+       struct extN_group_desc * gdp;
+-      if (block_group >= sb->u.extN_sb.s_groups_count) {
++      if (block_group >= EXTN_SB(sb)->s_groups_count) {
+               extN_error (sb, "extN_get_group_desc",
+                           "block_group >= groups_count - "
+                           "block_group = %d, groups_count = %lu",
+-                          block_group, sb->u.extN_sb.s_groups_count);
++                          block_group, EXTN_SB(sb)->s_groups_count);
+               return NULL;
+       }
+       
+       group_desc = block_group / EXTN_DESC_PER_BLOCK(sb);
+       desc = block_group % EXTN_DESC_PER_BLOCK(sb);
+-      if (!sb->u.extN_sb.s_group_desc[group_desc]) {
++      if (!EXTN_SB(sb)->s_group_desc[group_desc]) {
+               extN_error (sb, "extN_get_group_desc",
+                           "Group descriptor not loaded - "
+                           "block_group = %d, group_desc = %lu, desc = %lu",
+@@ -66,9 +66,9 @@
+       }
+       
+       gdp = (struct extN_group_desc *) 
+-            sb->u.extN_sb.s_group_desc[group_desc]->b_data;
++            EXTN_SB(sb)->s_group_desc[group_desc]->b_data;
+       if (bh)
+-              *bh = sb->u.extN_sb.s_group_desc[group_desc];
++              *bh = EXTN_SB(sb)->s_group_desc[group_desc];
+       return gdp + desc;
+ }
+@@ -104,8 +104,8 @@
+        * this group.  The IO will be retried next time.
+        */
+ error_out:
+-      sb->u.extN_sb.s_block_bitmap_number[bitmap_nr] = block_group;
+-      sb->u.extN_sb.s_block_bitmap[bitmap_nr] = bh;
++      EXTN_SB(sb)->s_block_bitmap_number[bitmap_nr] = block_group;
++      EXTN_SB(sb)->s_block_bitmap[bitmap_nr] = bh;
+       return retval;
+ }
+@@ -128,16 +128,17 @@
+       int i, j, retval = 0;
+       unsigned long block_bitmap_number;
+       struct buffer_head * block_bitmap;
++      struct extN_sb_info *sbi = EXTN_SB(sb);
+-      if (block_group >= sb->u.extN_sb.s_groups_count)
++      if (block_group >= sbi->s_groups_count)
+               extN_panic (sb, "load_block_bitmap",
+                           "block_group >= groups_count - "
+                           "block_group = %d, groups_count = %lu",
+-                          block_group, sb->u.extN_sb.s_groups_count);
++                          block_group, EXTN_SB(sb)->s_groups_count);
+-      if (sb->u.extN_sb.s_groups_count <= EXTN_MAX_GROUP_LOADED) {
+-              if (sb->u.extN_sb.s_block_bitmap[block_group]) {
+-                      if (sb->u.extN_sb.s_block_bitmap_number[block_group] ==
++      if (sbi->s_groups_count <= EXTN_MAX_GROUP_LOADED) {
++              if (sbi->s_block_bitmap[block_group]) {
++                      if (sbi->s_block_bitmap_number[block_group] ==
+                           block_group)
+                               return block_group;
+                       extN_error (sb, "__load_block_bitmap",
+@@ -149,21 +150,20 @@
+               return block_group;
+       }
+-      for (i = 0; i < sb->u.extN_sb.s_loaded_block_bitmaps &&
+-                  sb->u.extN_sb.s_block_bitmap_number[i] != block_group; i++)
++      for (i = 0; i < sbi->s_loaded_block_bitmaps &&
++                  sbi->s_block_bitmap_number[i] != block_group; i++)
+               ;
+-      if (i < sb->u.extN_sb.s_loaded_block_bitmaps &&
+-          sb->u.extN_sb.s_block_bitmap_number[i] == block_group) {
+-              block_bitmap_number = sb->u.extN_sb.s_block_bitmap_number[i];
+-              block_bitmap = sb->u.extN_sb.s_block_bitmap[i];
++      if (i < sbi->s_loaded_block_bitmaps &&
++          sbi->s_block_bitmap_number[i] == block_group) {
++              block_bitmap_number = sbi->s_block_bitmap_number[i];
++              block_bitmap = sbi->s_block_bitmap[i];
+               for (j = i; j > 0; j--) {
+-                      sb->u.extN_sb.s_block_bitmap_number[j] =
+-                              sb->u.extN_sb.s_block_bitmap_number[j - 1];
+-                      sb->u.extN_sb.s_block_bitmap[j] =
+-                              sb->u.extN_sb.s_block_bitmap[j - 1];
++                      sbi->s_block_bitmap_number[j] =
++                              sbi->s_block_bitmap_number[j - 1];
++                      sbi->s_block_bitmap[j] = sbi->s_block_bitmap[j - 1];
+               }
+-              sb->u.extN_sb.s_block_bitmap_number[0] = block_bitmap_number;
+-              sb->u.extN_sb.s_block_bitmap[0] = block_bitmap;
++              sbi->s_block_bitmap_number[0] = block_bitmap_number;
++              sbi->s_block_bitmap[0] = block_bitmap;
+               /*
+                * There's still one special case here --- if block_bitmap == 0
+@@ -173,17 +173,14 @@
+               if (!block_bitmap)
+                       retval = read_block_bitmap (sb, block_group, 0);
+       } else {
+-              if (sb->u.extN_sb.s_loaded_block_bitmaps<EXTN_MAX_GROUP_LOADED)
+-                      sb->u.extN_sb.s_loaded_block_bitmaps++;
++              if (sbi->s_loaded_block_bitmaps<EXTN_MAX_GROUP_LOADED)
++                      sbi->s_loaded_block_bitmaps++;
+               else
+-                      brelse (sb->u.extN_sb.s_block_bitmap
+-                                      [EXTN_MAX_GROUP_LOADED - 1]);
+-              for (j = sb->u.extN_sb.s_loaded_block_bitmaps - 1;
+-                                      j > 0;  j--) {
+-                      sb->u.extN_sb.s_block_bitmap_number[j] =
+-                              sb->u.extN_sb.s_block_bitmap_number[j - 1];
+-                      sb->u.extN_sb.s_block_bitmap[j] =
+-                              sb->u.extN_sb.s_block_bitmap[j - 1];
++                      brelse(sbi->s_block_bitmap[EXTN_MAX_GROUP_LOADED - 1]);
++              for (j = sbi->s_loaded_block_bitmaps - 1; j > 0;  j--) {
++                      sbi->s_block_bitmap_number[j] =
++                              sbi->s_block_bitmap_number[j - 1];
++                      sbi->s_block_bitmap[j] = sbi->s_block_bitmap[j - 1];
+               }
+               retval = read_block_bitmap (sb, block_group, 0);
+       }
+@@ -206,24 +203,25 @@
+ static inline int load_block_bitmap (struct super_block * sb,
+                                    unsigned int block_group)
+ {
++      struct extN_sb_info *sbi = EXTN_SB(sb);
+       int slot;
+-      
++
+       /*
+        * Do the lookup for the slot.  First of all, check if we're asking
+        * for the same slot as last time, and did we succeed that last time?
+        */
+-      if (sb->u.extN_sb.s_loaded_block_bitmaps > 0 &&
+-          sb->u.extN_sb.s_block_bitmap_number[0] == block_group &&
+-          sb->u.extN_sb.s_block_bitmap[0]) {
++      if (sbi->s_loaded_block_bitmaps > 0 &&
++          sbi->s_block_bitmap_number[0] == block_group &&
++          sbi->s_block_bitmap[0]) {
+               return 0;
+       }
+       /*
+        * Or can we do a fast lookup based on a loaded group on a filesystem
+        * small enough to be mapped directly into the superblock?
+        */
+-      else if (sb->u.extN_sb.s_groups_count <= EXTN_MAX_GROUP_LOADED && 
+-               sb->u.extN_sb.s_block_bitmap_number[block_group]==block_group
+-                      && sb->u.extN_sb.s_block_bitmap[block_group]) {
++      else if (sbi->s_groups_count <= EXTN_MAX_GROUP_LOADED &&
++               sbi->s_block_bitmap_number[block_group] == block_group
++                      && sbi->s_block_bitmap[block_group]) {
+               slot = block_group;
+       }
+       /*
+@@ -243,7 +241,7 @@
+        * If it's a valid slot, we may still have cached a previous IO error,
+        * in which case the bh in the superblock cache will be zero.
+        */
+-      if (!sb->u.extN_sb.s_block_bitmap[slot])
++      if (!sbi->s_block_bitmap[slot])
+               return -EIO;
+       
+       /*
+@@ -304,7 +302,7 @@
+       if (bitmap_nr < 0)
+               goto error_return;
+       
+-      bitmap_bh = sb->u.extN_sb.s_block_bitmap[bitmap_nr];
++      bitmap_bh = EXTN_SB(sb)->s_block_bitmap[bitmap_nr];
+       gdp = extN_get_group_desc (sb, block_group, &gd_bh);
+       if (!gdp)
+               goto error_return;
+@@ -340,8 +338,8 @@
+       if (err)
+               goto error_return;
+-      BUFFER_TRACE(sb->u.extN_sb.s_sbh, "get_write_access");
+-      err = extN_journal_get_write_access(handle, sb->u.extN_sb.s_sbh);
++      BUFFER_TRACE(EXTN_SB(sb)->s_sbh, "get_write_access");
++      err = extN_journal_get_write_access(handle, EXTN_SB(sb)->s_sbh);
+       if (err)
+               goto error_return;
+@@ -410,8 +408,8 @@
+       if (!err) err = ret;
+       /* And the superblock */
+-      BUFFER_TRACE(sb->u.extN_sb.s_sbh, "dirtied superblock");
+-      ret = extN_journal_dirty_metadata(handle, sb->u.extN_sb.s_sbh);
++      BUFFER_TRACE(EXTN_SB(sb)->s_sbh, "dirtied superblock");
++      ret = extN_journal_dirty_metadata(handle, EXTN_SB(sb)->s_sbh);
+       if (!err) err = ret;
+       if (overflow && !err) {
+@@ -564,12 +562,12 @@
+       }
+       lock_super (sb);
+-      es = sb->u.extN_sb.s_es;
++      es = EXTN_SB(sb)->s_es;
+       if (le32_to_cpu(es->s_free_blocks_count) <=
+                       le32_to_cpu(es->s_r_blocks_count) &&
+-          ((sb->u.extN_sb.s_resuid != current->fsuid) &&
+-           (sb->u.extN_sb.s_resgid == 0 ||
+-            !in_group_p (sb->u.extN_sb.s_resgid)) && 
++          ((EXTN_SB(sb)->s_resuid != current->fsuid) &&
++           (EXTN_SB(sb)->s_resgid == 0 ||
++            !in_group_p (EXTN_SB(sb)->s_resgid)) &&
+            !capable(CAP_SYS_RESOURCE)))
+               goto out;
+@@ -598,7 +596,7 @@
+               if (bitmap_nr < 0)
+                       goto io_error;
+               
+-              bh = sb->u.extN_sb.s_block_bitmap[bitmap_nr];
++              bh = EXTN_SB(sb)->s_block_bitmap[bitmap_nr];
+               extN_debug ("goal is at %d:%d.\n", i, j);
+@@ -621,9 +619,9 @@
+        * Now search the rest of the groups.  We assume that 
+        * i and gdp correctly point to the last group visited.
+        */
+-      for (k = 0; k < sb->u.extN_sb.s_groups_count; k++) {
++      for (k = 0; k < EXTN_SB(sb)->s_groups_count; k++) {
+               i++;
+-              if (i >= sb->u.extN_sb.s_groups_count)
++              if (i >= EXTN_SB(sb)->s_groups_count)
+                       i = 0;
+               gdp = extN_get_group_desc (sb, i, &bh2);
+               if (!gdp) {
+@@ -635,7 +633,7 @@
+                       if (bitmap_nr < 0)
+                               goto io_error;
+       
+-                      bh = sb->u.extN_sb.s_block_bitmap[bitmap_nr];
++                      bh = EXTN_SB(sb)->s_block_bitmap[bitmap_nr];
+                       j = find_next_usable_block(-1, bh, 
+                                                  EXTN_BLOCKS_PER_GROUP(sb));
+                       if (j >= 0) 
+@@ -674,8 +672,8 @@
+       fatal = extN_journal_get_write_access(handle, bh2);
+       if (fatal) goto out;
+-      BUFFER_TRACE(sb->u.extN_sb.s_sbh, "get_write_access");
+-      fatal = extN_journal_get_write_access(handle, sb->u.extN_sb.s_sbh);
++      BUFFER_TRACE(EXTN_SB(sb)->s_sbh, "get_write_access");
++      fatal = extN_journal_get_write_access(handle, EXTN_SB(sb)->s_sbh);
+       if (fatal) goto out;
+       tmp = j + i * EXTN_BLOCKS_PER_GROUP(sb)
+@@ -796,7 +794,7 @@
+       if (!fatal) fatal = err;
+       
+       BUFFER_TRACE(bh, "journal_dirty_metadata for superblock");
+-      err = extN_journal_dirty_metadata(handle, sb->u.extN_sb.s_sbh);
++      err = extN_journal_dirty_metadata(handle, EXTN_SB(sb)->s_sbh);
+       if (!fatal) fatal = err;
+       sb->s_dirt = 1;
+@@ -829,11 +827,11 @@
+       int i;
+       
+       lock_super (sb);
+-      es = sb->u.extN_sb.s_es;
++      es = EXTN_SB(sb)->s_es;
+       desc_count = 0;
+       bitmap_count = 0;
+       gdp = NULL;
+-      for (i = 0; i < sb->u.extN_sb.s_groups_count; i++) {
++      for (i = 0; i < EXTN_SB(sb)->s_groups_count; i++) {
+               gdp = extN_get_group_desc (sb, i, NULL);
+               if (!gdp)
+                       continue;
+@@ -842,7 +840,7 @@
+               if (bitmap_nr < 0)
+                       continue;
+               
+-              x = extN_count_free (sb->u.extN_sb.s_block_bitmap[bitmap_nr],
++              x = extN_count_free (EXTN_SB(sb)->s_block_bitmap[bitmap_nr],
+                                    sb->s_blocksize);
+               printk ("group %d: stored = %d, counted = %lu\n",
+                       i, le16_to_cpu(gdp->bg_free_blocks_count), x);
+@@ -853,7 +851,7 @@
+       unlock_super (sb);
+       return bitmap_count;
+ #else
+-      return le32_to_cpu(sb->u.extN_sb.s_es->s_free_blocks_count);
++      return le32_to_cpu(EXTN_SB(sb)->s_es->s_free_blocks_count);
+ #endif
+ }
+@@ -862,7 +860,7 @@
+                               unsigned char * map)
+ {
+       return extN_test_bit ((block -
+-              le32_to_cpu(sb->u.extN_sb.s_es->s_first_data_block)) %
++              le32_to_cpu(EXTN_SB(sb)->s_es->s_first_data_block)) %
+                        EXTN_BLOCKS_PER_GROUP(sb), map);
+ }
+@@ -930,11 +928,11 @@
+       struct extN_group_desc * gdp;
+       int i;
+-      es = sb->u.extN_sb.s_es;
++      es = EXTN_SB(sb)->s_es;
+       desc_count = 0;
+       bitmap_count = 0;
+       gdp = NULL;
+-      for (i = 0; i < sb->u.extN_sb.s_groups_count; i++) {
++      for (i = 0; i < EXTN_SB(sb)->s_groups_count; i++) {
+               gdp = extN_get_group_desc (sb, i, NULL);
+               if (!gdp)
+                       continue;
+@@ -968,7 +966,7 @@
+                                   "Inode bitmap for group %d is marked free",
+                                   i);
+-              for (j = 0; j < sb->u.extN_sb.s_itb_per_group; j++)
++              for (j = 0; j < EXTN_SB(sb)->s_itb_per_group; j++)
+                       if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j,
+                                                       sb, bh->b_data))
+                               extN_error (sb, "extN_check_blocks_bitmap",
+--- extN/extN.orig/dir.c       Tue Sep 24 15:41:40 2002
++++ extN/dir.c Tue Sep 24 22:00:43 2002
+@@ -52,7 +52,7 @@
+       else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+               error_msg = "directory entry across blocks";
+       else if (le32_to_cpu(de->inode) >
+-                      le32_to_cpu(dir->i_sb->u.extN_sb.s_es->s_inodes_count))
++                      le32_to_cpu(EXTN_SB(dir->i_sb)->s_es->s_inodes_count))
+               error_msg = "inode out of bounds";
+       if (error_msg != NULL)
+--- extN/extN.orig/ialloc.c    Tue Sep 24 15:41:40 2002
++++ extN/ialloc.c      Tue Sep 24 22:00:43 2002
+@@ -17,6 +17,7 @@
+ #include <linux/jbd.h>
+ #include <linux/extN_fs.h>
+ #include <linux/extN_jbd.h>
++#include <linux/extN_xattr.h>
+ #include <linux/stat.h>
+ #include <linux/string.h>
+ #include <linux/locks.h>
+@@ -73,8 +74,8 @@
+        * this group.  The IO will be retried next time.
+        */
+ error_out:
+-      sb->u.extN_sb.s_inode_bitmap_number[bitmap_nr] = block_group;
+-      sb->u.extN_sb.s_inode_bitmap[bitmap_nr] = bh;
++      EXTN_SB(sb)->s_inode_bitmap_number[bitmap_nr] = block_group;
++      EXTN_SB(sb)->s_inode_bitmap[bitmap_nr] = bh;
+       return retval;
+ }
+@@ -216,6 +217,7 @@
+        * as writing the quota to disk may need the lock as well.
+        */
+       DQUOT_INIT(inode);
++      extN_xattr_drop_inode(handle, inode);
+       DQUOT_FREE_INODE(inode);
+       DQUOT_DROP(inode);
+@@ -225,7 +227,7 @@
+       clear_inode (inode);
+       lock_super (sb);
+-      es = sb->u.extN_sb.s_es;
++      es = EXTN_SB(sb)->s_es;
+       if (ino < EXTN_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+               extN_error (sb, "extN_free_inode",
+                           "reserved or nonexistent inode %lu", ino);
+@@ -237,7 +239,7 @@
+       if (bitmap_nr < 0)
+               goto error_return;
+-      bh = sb->u.extN_sb.s_inode_bitmap[bitmap_nr];
++      bh = EXTN_SB(sb)->s_inode_bitmap[bitmap_nr];
+       BUFFER_TRACE(bh, "get_write_access");
+       fatal = extN_journal_get_write_access(handle, bh);
+@@ -255,8 +257,8 @@
+               fatal = extN_journal_get_write_access(handle, bh2);
+               if (fatal) goto error_return;
+-              BUFFER_TRACE(sb->u.extN_sb.s_sbh, "get write access");
+-              fatal = extN_journal_get_write_access(handle, sb->u.extN_sb.s_sbh);
++              BUFFER_TRACE(EXTN_SB(sb)->s_sbh, "get write access");
++              fatal = extN_journal_get_write_access(handle, EXTN_SB(sb)->s_sbh);
+               if (fatal) goto error_return;
+               if (gdp) {
+@@ -271,9 +273,9 @@
+               if (!fatal) fatal = err;
+               es->s_free_inodes_count =
+                       cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1);
+-              BUFFER_TRACE(sb->u.extN_sb.s_sbh,
++              BUFFER_TRACE(EXTN_SB(sb)->s_sbh,
+                                       "call extN_journal_dirty_metadata");
+-              err = extN_journal_dirty_metadata(handle, sb->u.extN_sb.s_sbh);
++              err = extN_journal_dirty_metadata(handle, EXTN_SB(sb)->s_sbh);
+               if (!fatal) fatal = err;
+       }
+       BUFFER_TRACE(bh, "call extN_journal_dirty_metadata");
+@@ -305,6 +307,8 @@
+       int i, j, avefreei;
+       struct inode * inode;
+       int bitmap_nr;
++      struct extN_inode_info *ei;
++      struct extN_sb_info *sbi;
+       struct extN_group_desc * gdp;
+       struct extN_group_desc * tmp;
+       struct extN_super_block * es;
+@@ -318,19 +322,21 @@
+       inode = new_inode(sb);
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
+-      init_rwsem(&inode->u.extN_i.truncate_sem);
++      sbi = EXTN_SB(sb);
++      ei = EXTN_I(inode);
++      init_rwsem(&ei->truncate_sem);
+       lock_super (sb);
+-      es = sb->u.extN_sb.s_es;
++      es = sbi->s_es;
+ repeat:
+       gdp = NULL;
+       i = 0;
+       if (S_ISDIR(mode)) {
+               avefreei = le32_to_cpu(es->s_free_inodes_count) /
+-                      sb->u.extN_sb.s_groups_count;
++                      sbi->s_groups_count;
+               if (!gdp) {
+-                      for (j = 0; j < sb->u.extN_sb.s_groups_count; j++) {
++                      for (j = 0; j < sbi->s_groups_count; j++) {
+                               struct buffer_head *temp_buffer;
+                               tmp = extN_get_group_desc (sb, j, &temp_buffer);
+                               if (tmp &&
+@@ -350,7 +356,7 @@
+               /*
+                * Try to place the inode in its parent directory
+                */
+-              i = dir->u.extN_i.i_block_group;
++              i = EXTN_I(dir)->i_block_group;
+               tmp = extN_get_group_desc (sb, i, &bh2);
+               if (tmp && le16_to_cpu(tmp->bg_free_inodes_count))
+                       gdp = tmp;
+@@ -360,10 +366,10 @@
+                        * Use a quadratic hash to find a group with a
+                        * free inode
+                        */
+-                      for (j = 1; j < sb->u.extN_sb.s_groups_count; j <<= 1) {
++                      for (j = 1; j < sbi->s_groups_count; j <<= 1) {
+                               i += j;
+-                              if (i >= sb->u.extN_sb.s_groups_count)
+-                                      i -= sb->u.extN_sb.s_groups_count;
++                              if (i >= sbi->s_groups_count)
++                                      i -= sbi->s_groups_count;
+                               tmp = extN_get_group_desc (sb, i, &bh2);
+                               if (tmp &&
+                                   le16_to_cpu(tmp->bg_free_inodes_count)) {
+@@ -376,9 +382,9 @@
+                       /*
+                        * That failed: try linear search for a free inode
+                        */
+-                      i = dir->u.extN_i.i_block_group + 1;
+-                      for (j = 2; j < sb->u.extN_sb.s_groups_count; j++) {
+-                              if (++i >= sb->u.extN_sb.s_groups_count)
++                      i = EXTN_I(dir)->i_block_group + 1;
++                      for (j = 2; j < sbi->s_groups_count; j++) {
++                              if (++i >= sbi->s_groups_count)
+                                       i = 0;
+                               tmp = extN_get_group_desc (sb, i, &bh2);
+                               if (tmp &&
+@@ -399,11 +405,11 @@
+       if (bitmap_nr < 0)
+               goto fail;
+-      bh = sb->u.extN_sb.s_inode_bitmap[bitmap_nr];
++      bh = sbi->s_inode_bitmap[bitmap_nr];
+       if ((j = extN_find_first_zero_bit ((unsigned long *) bh->b_data,
+-                                    EXTN_INODES_PER_GROUP(sb))) <
+-          EXTN_INODES_PER_GROUP(sb)) {
++                                    sbi->s_inodes_per_group)) <
++          sbi->s_inodes_per_group) {
+               BUFFER_TRACE(bh, "get_write_access");
+               err = extN_journal_get_write_access(handle, bh);
+               if (err) goto fail;
+@@ -436,8 +442,8 @@
+               }
+               goto repeat;
+       }
+-      j += i * EXTN_INODES_PER_GROUP(sb) + 1;
+-      if (j < EXTN_FIRST_INO(sb) || j > le32_to_cpu(es->s_inodes_count)) {
++      j += i * sbi->s_inodes_per_group + 1;
++      if (j < sbi->s_first_ino || j > le32_to_cpu(es->s_inodes_count)) {
+               extN_error (sb, "extN_new_inode",
+                           "reserved inode or inode > inodes count - "
+                           "block_group = %d,inode=%d", i, j);
+@@ -457,13 +463,13 @@
+       err = extN_journal_dirty_metadata(handle, bh2);
+       if (err) goto fail;
+       
+-      BUFFER_TRACE(sb->u.extN_sb.s_sbh, "get_write_access");
+-      err = extN_journal_get_write_access(handle, sb->u.extN_sb.s_sbh);
++      BUFFER_TRACE(sbi->s_sbh, "get_write_access");
++      err = extN_journal_get_write_access(handle, sbi->s_sbh);
+       if (err) goto fail;
+       es->s_free_inodes_count =
+               cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
+-      BUFFER_TRACE(sb->u.extN_sb.s_sbh, "call extN_journal_dirty_metadata");
+-      err = extN_journal_dirty_metadata(handle, sb->u.extN_sb.s_sbh);
++      BUFFER_TRACE(sbi->s_sbh, "call extN_journal_dirty_metadata");
++      err = extN_journal_dirty_metadata(handle, sbi->s_sbh);
+       sb->s_dirt = 1;
+       if (err) goto fail;
+@@ -483,31 +489,31 @@
+       inode->i_blksize = PAGE_SIZE;
+       inode->i_blocks = 0;
+       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+-      inode->u.extN_i.i_flags = dir->u.extN_i.i_flags & ~EXTN_INDEX_FL;
++      ei->i_flags = EXTN_I(dir)->i_flags & ~EXTN_INDEX_FL;
+       if (S_ISLNK(mode))
+-              inode->u.extN_i.i_flags &= ~(EXTN_IMMUTABLE_FL|EXTN_APPEND_FL);
++              ei->i_flags &= ~(EXTN_IMMUTABLE_FL|EXTN_APPEND_FL);
+ #ifdef EXTN_FRAGMENTS
+-      inode->u.extN_i.i_faddr = 0;
+-      inode->u.extN_i.i_frag_no = 0;
+-      inode->u.extN_i.i_frag_size = 0;
++      ei->i_faddr = 0;
++      ei->i_frag_no = 0;
++      ei->i_frag_size = 0;
+ #endif
+-      inode->u.extN_i.i_file_acl = 0;
+-      inode->u.extN_i.i_dir_acl = 0;
+-      inode->u.extN_i.i_dtime = 0;
+-      INIT_LIST_HEAD(&inode->u.extN_i.i_orphan);
++      ei->i_file_acl = 0;
++      ei->i_dir_acl = 0;
++      ei->i_dtime = 0;
++      INIT_LIST_HEAD(&ei->i_orphan);
+ #ifdef EXTN_PREALLOCATE
+-      inode->u.extN_i.i_prealloc_count = 0;
++      ei->i_prealloc_count = 0;
+ #endif
+-      inode->u.extN_i.i_block_group = i;
++      ei->i_block_group = i;
+       
+-      if (inode->u.extN_i.i_flags & EXTN_SYNC_FL)
++      if (ei->i_flags & EXTN_SYNC_FL)
+               inode->i_flags |= S_SYNC;
+       if (IS_SYNC(inode))
+               handle->h_sync = 1;
+       insert_inode_hash(inode);
+-      inode->i_generation = sb->u.extN_sb.s_next_generation++;
++      inode->i_generation = sbi->s_next_generation++;
+-      inode->u.extN_i.i_state = EXTN_STATE_NEW;
++      ei->i_state = EXTN_STATE_NEW;
+       err = extN_mark_inode_dirty(handle, inode);
+       if (err) goto fail;
+       
+@@ -585,19 +591,19 @@
+ unsigned long extN_count_free_inodes (struct super_block * sb)
+ {
++      struct extN_sb_info *sbi = EXTN_SB(sb);
++      struct extN_super_block *es = sbi->s_es;
+ #ifdef EXTNFS_DEBUG
+-      struct extN_super_block * es;
+       unsigned long desc_count, bitmap_count, x;
+       int bitmap_nr;
+       struct extN_group_desc * gdp;
+       int i;
+       lock_super (sb);
+-      es = sb->u.extN_sb.s_es;
+       desc_count = 0;
+       bitmap_count = 0;
+       gdp = NULL;
+-      for (i = 0; i < sb->u.extN_sb.s_groups_count; i++) {
++      for (i = 0; i < sbi->s_groups_count; i++) {
+               gdp = extN_get_group_desc (sb, i, NULL);
+               if (!gdp)
+                       continue;
+@@ -606,8 +612,8 @@
+               if (bitmap_nr < 0)
+                       continue;
+-              x = extN_count_free (sb->u.extN_sb.s_inode_bitmap[bitmap_nr],
+-                                   EXTN_INODES_PER_GROUP(sb) / 8);
++              x = extN_count_free(sbi->s_inode_bitmap[bitmap_nr],
++                                  sbi->s_inodes_per_group / 8);
+               printk ("group %d: stored = %d, counted = %lu\n",
+                       i, le16_to_cpu(gdp->bg_free_inodes_count), x);
+               bitmap_count += x;
+@@ -617,7 +623,7 @@
+       unlock_super (sb);
+       return desc_count;
+ #else
+-      return le32_to_cpu(sb->u.extN_sb.s_es->s_free_inodes_count);
++      return le32_to_cpu(es->s_free_inodes_count);
+ #endif
+ }
+@@ -626,16 +632,18 @@
+ void extN_check_inodes_bitmap (struct super_block * sb)
+ {
+       struct extN_super_block * es;
++      struct extN_sb_info *sbi;
+       unsigned long desc_count, bitmap_count, x;
+       int bitmap_nr;
+       struct extN_group_desc * gdp;
+       int i;
+-      es = sb->u.extN_sb.s_es;
++      sbi = EXTN_SB(sb);
++      es = sbi->s_es;
+       desc_count = 0;
+       bitmap_count = 0;
+       gdp = NULL;
+-      for (i = 0; i < sb->u.extN_sb.s_groups_count; i++) {
++      for (i = 0; i < sbi->s_groups_count; i++) {
+               gdp = extN_get_group_desc (sb, i, NULL);
+               if (!gdp)
+                       continue;
+@@ -644,7 +652,7 @@
+               if (bitmap_nr < 0)
+                       continue;
+-              x = extN_count_free (sb->u.extN_sb.s_inode_bitmap[bitmap_nr],
++              x = extN_count_free (sbi->s_inode_bitmap[bitmap_nr],
+                                    EXTN_INODES_PER_GROUP(sb) / 8);
+               if (le16_to_cpu(gdp->bg_free_inodes_count) != x)
+                       extN_error (sb, "extN_check_inodes_bitmap",
+--- extN/extN.orig/inode.c     Tue Sep 24 15:41:40 2002
++++ extN/inode.c       Tue Sep 24 22:00:43 2002
+@@ -39,6 +39,18 @@
+  */
+ #undef SEARCH_FROM_ZERO
++/*
++ * Test whether an inode is a fast symlink.
++ */
++static inline int extN_inode_is_fast_symlink(struct inode *inode)
++{
++      int ea_blocks = EXTN_I(inode)->i_file_acl ?
++              (inode->i_sb->s_blocksize >> 9) : 0;
++
++      return (S_ISLNK(inode->i_mode) &&
++              inode->i_blocks - ea_blocks == 0);
++}
++
+ /* The extN forget function must perform a revoke if we are freeing data
+  * which has been journaled.  Metadata (eg. indirect blocks) must be
+  * revoked in all cases. 
+@@ -48,7 +60,7 @@
+  * still needs to be revoked.
+  */
+-static int extN_forget(handle_t *handle, int is_metadata,
++int extN_forget(handle_t *handle, int is_metadata,
+                      struct inode *inode, struct buffer_head *bh,
+                      int blocknr)
+ {
+@@ -164,9 +176,7 @@
+ {
+       handle_t *handle;
+       
+-      if (is_bad_inode(inode) ||
+-          inode->i_ino == EXTN_ACL_IDX_INO ||
+-          inode->i_ino == EXTN_ACL_DATA_INO)
++      if (is_bad_inode(inode))
+               goto no_delete;
+       lock_kernel();
+@@ -196,7 +206,7 @@
+        * (Well, we could do this if we need to, but heck - it works)
+        */
+       extN_orphan_del(handle, inode);
+-      inode->u.extN_i.i_dtime = CURRENT_TIME;
++      EXTN_I(inode)->i_dtime = CURRENT_TIME;
+       /* 
+        * One subtle ordering requirement: if anything has gone wrong
+@@ -220,13 +230,14 @@
+ void extN_discard_prealloc (struct inode * inode)
+ {
+ #ifdef EXTN_PREALLOCATE
++      struct extN_inode_info *ei = EXTN_I(inode);
+       lock_kernel();
+       /* Writer: ->i_prealloc* */
+-      if (inode->u.extN_i.i_prealloc_count) {
+-              unsigned short total = inode->u.extN_i.i_prealloc_count;
+-              unsigned long block = inode->u.extN_i.i_prealloc_block;
+-              inode->u.extN_i.i_prealloc_count = 0;
+-              inode->u.extN_i.i_prealloc_block = 0;
++      if (ei->i_prealloc_count) {
++              unsigned short total = ei->i_prealloc_count;
++              unsigned long block = ei->i_prealloc_block;
++              ei->i_prealloc_count = 0;
++              ei->i_prealloc_block = 0;
+               /* Writer: end */
+               extN_free_blocks (inode, block, total);
+       }
+@@ -243,13 +254,15 @@
+       unsigned long result;
+ #ifdef EXTN_PREALLOCATE
++      struct extN_inode_info *ei = EXTN_I(inode);
++
+       /* Writer: ->i_prealloc* */
+-      if (inode->u.extN_i.i_prealloc_count &&
+-          (goal == inode->u.extN_i.i_prealloc_block ||
+-           goal + 1 == inode->u.extN_i.i_prealloc_block))
++      if (ei->i_prealloc_count &&
++          (goal == ei->i_prealloc_block ||
++           goal + 1 == ei->i_prealloc_block))
+       {
+-              result = inode->u.extN_i.i_prealloc_block++;
+-              inode->u.extN_i.i_prealloc_count--;
++              result = ei->i_prealloc_block++;
++              ei->i_prealloc_count--;
+               /* Writer: end */
+               extN_debug ("preallocation hit (%lu/%lu).\n",
+                           ++alloc_hits, ++alloc_attempts);
+@@ -259,8 +272,8 @@
+                           alloc_hits, ++alloc_attempts);
+               if (S_ISREG(inode->i_mode))
+                       result = extN_new_block (inode, goal, 
+-                               &inode->u.extN_i.i_prealloc_count,
+-                               &inode->u.extN_i.i_prealloc_block, err);
++                               &ei->i_prealloc_count,
++                               &ei->i_prealloc_block, err);
+               else
+                       result = extN_new_block (inode, goal, 0, 0, err);
+               /*
+@@ -394,7 +407,7 @@
+       *err = 0;
+       /* i_data is not going away, no lock needed */
+-      add_chain (chain, NULL, inode->u.extN_i.i_data + *offsets);
++      add_chain (chain, NULL, EXTN_I(inode)->i_data + *offsets);
+       if (!p->key)
+               goto no_block;
+       while (--depth) {
+@@ -437,7 +450,8 @@
+ static inline unsigned long extN_find_near(struct inode *inode, Indirect *ind)
+ {
+-      u32 *start = ind->bh ? (u32*) ind->bh->b_data : inode->u.extN_i.i_data;
++      struct extN_inode_info *ei = EXTN_I(inode);
++      u32 *start = ind->bh ? (u32*) ind->bh->b_data : ei->i_data;
+       u32 *p;
+       /* Try to find previous block */
+@@ -453,9 +467,8 @@
+        * It is going to be refered from inode itself? OK, just put it into
+        * the same cylinder group then.
+        */
+-      return (inode->u.extN_i.i_block_group * 
+-              EXTN_BLOCKS_PER_GROUP(inode->i_sb)) +
+-             le32_to_cpu(inode->i_sb->u.extN_sb.s_es->s_first_data_block);
++      return (ei->i_block_group * EXTN_BLOCKS_PER_GROUP(inode->i_sb)) +
++             le32_to_cpu(EXTN_SB(inode->i_sb)->s_es->s_first_data_block);
+ }
+ /**
+@@ -474,14 +487,15 @@
+ static int extN_find_goal(struct inode *inode, long block, Indirect chain[4],
+                         Indirect *partial, unsigned long *goal)
+ {
++      struct extN_inode_info *ei = EXTN_I(inode);
+       /* Writer: ->i_next_alloc* */
+-      if (block == inode->u.extN_i.i_next_alloc_block + 1) {
+-              inode->u.extN_i.i_next_alloc_block++;
+-              inode->u.extN_i.i_next_alloc_goal++;
++      if (block == ei->i_next_alloc_block + 1) {
++              ei->i_next_alloc_block++;
++              ei->i_next_alloc_goal++;
+       }
+ #ifdef SEARCH_FROM_ZERO
+-      inode->u.extN_i.i_next_alloc_block = 0;
+-      inode->u.extN_i.i_next_alloc_goal = 0;
++      ei->i_next_alloc_block = 0;
++      ei->i_next_alloc_goal = 0;
+ #endif
+       /* Writer: end */
+       /* Reader: pointers, ->i_next_alloc* */
+@@ -490,8 +504,8 @@
+                * try the heuristic for sequential allocation,
+                * failing that at least try to get decent locality.
+                */
+-              if (block == inode->u.extN_i.i_next_alloc_block)
+-                      *goal = inode->u.extN_i.i_next_alloc_goal;
++              if (block == ei->i_next_alloc_block)
++                      *goal = ei->i_next_alloc_goal;
+               if (!*goal)
+                       *goal = extN_find_near(inode, partial);
+ #ifdef SEARCH_FROM_ZERO
+@@ -619,6 +633,7 @@
+ {
+       int i;
+       int err = 0;
++      struct extN_inode_info *ei = EXTN_I(inode);
+       /*
+        * If we're splicing into a [td]indirect block (as opposed to the
+@@ -641,11 +656,11 @@
+       /* That's it */
+       *where->p = where->key;
+-      inode->u.extN_i.i_next_alloc_block = block;
+-      inode->u.extN_i.i_next_alloc_goal = le32_to_cpu(where[num-1].key);
++      ei->i_next_alloc_block = block;
++      ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
+ #ifdef SEARCH_FROM_ZERO
+-      inode->u.extN_i.i_next_alloc_block = 0;
+-      inode->u.extN_i.i_next_alloc_goal = 0;
++      ei->i_next_alloc_block = 0;
++      ei->i_next_alloc_goal = 0;
+ #endif
+       /* Writer: end */
+@@ -729,6 +744,7 @@
+       unsigned long goal;
+       int left;
+       int depth = extN_block_to_path(inode, iblock, offsets);
++      struct extN_inode_info *ei = EXTN_I(inode);
+       loff_t new_size;
+       J_ASSERT(handle != NULL || create == 0);
+@@ -782,7 +798,7 @@
+       /*
+        * Block out extN_truncate while we alter the tree
+        */
+-      down_read(&inode->u.extN_i.truncate_sem);
++      down_read(&ei->truncate_sem);
+       err = extN_alloc_branch(handle, inode, left, goal,
+                                       offsets+(partial-chain), partial);
+@@ -794,7 +810,7 @@
+       if (!err)
+               err = extN_splice_branch(handle, inode, iblock, chain,
+                                        partial, left);
+-      up_read(&inode->u.extN_i.truncate_sem);
++      up_read(&ei->truncate_sem);
+       if (err == -EAGAIN)
+               goto changed;
+       if (err)
+@@ -807,8 +823,8 @@
+        * truncate is in progress.  It is racy between multiple parallel
+        * instances of get_block, but we have the BKL.
+        */
+-      if (new_size > inode->u.extN_i.i_disksize)
+-              inode->u.extN_i.i_disksize = new_size;
++      if (new_size > ei->i_disksize)
++              ei->i_disksize = new_size;
+       bh_result->b_state |= (1UL << BH_New);
+       goto got_it;
+@@ -921,7 +937,7 @@
+               struct buffer_head *tmp_bh;
+               for (i = 1;
+-                   inode->u.extN_i.i_prealloc_count &&
++                   EXTN_I(inode)->i_prealloc_count &&
+                    i < EXTN_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
+                    i++) {
+                       /*
+@@ -1131,8 +1147,8 @@
+                       kunmap(page);
+               }
+       }
+-      if (inode->i_size > inode->u.extN_i.i_disksize) {
+-              inode->u.extN_i.i_disksize = inode->i_size;
++      if (inode->i_size > EXTN_I(inode)->i_disksize) {
++              EXTN_I(inode)->i_disksize = inode->i_size;
+               ret2 = extN_mark_inode_dirty(handle, inode);
+               if (!ret) 
+                       ret = ret2;
+@@ -1832,7 +1848,8 @@
+ void extN_truncate(struct inode * inode)
+ {
+       handle_t *handle;
+-      u32 *i_data = inode->u.extN_i.i_data;
++      struct extN_inode_info *ei = EXTN_I(inode);
++      u32 *i_data = EXTN_I(inode)->i_data;
+       int addr_per_block = EXTN_ADDR_PER_BLOCK(inode->i_sb);
+       int offsets[4];
+       Indirect chain[4];
+@@ -1845,6 +1862,8 @@
+       if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+           S_ISLNK(inode->i_mode)))
+               return;
++      if (extN_inode_is_fast_symlink(inode))
++              return;
+       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+               return;
+@@ -1884,13 +1903,13 @@
+        * on-disk inode. We do this via i_disksize, which is the value which
+        * extN *really* writes onto the disk inode.
+        */
+-      inode->u.extN_i.i_disksize = inode->i_size;
++      ei->i_disksize = inode->i_size;
+       /*
+        * From here we block out all extN_get_block() callers who want to
+        * modify the block allocation tree.
+        */
+-      down_write(&inode->u.extN_i.truncate_sem);
++      down_write(&ei->truncate_sem);
+       if (n == 1) {           /* direct blocks */
+               extN_free_data(handle, inode, NULL, i_data+offsets[0],
+@@ -1954,7 +1973,7 @@
+               case EXTN_TIND_BLOCK:
+                       ;
+       }
+-      up_write(&inode->u.extN_i.truncate_sem);
++      up_write(&ei->truncate_sem);
+       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       extN_mark_inode_dirty(handle, inode);
+@@ -1983,6 +2002,8 @@
+ int extN_get_inode_loc (struct inode *inode, struct extN_iloc *iloc)
+ {
++      struct super_block *sb = inode->i_sb;
++      struct extN_sb_info *sbi = EXTN_SB(sb);
+       struct buffer_head *bh = 0;
+       unsigned long block;
+       unsigned long block_group;
+@@ -1992,28 +2013,22 @@
+       struct extN_group_desc * gdp;
+               
+       if ((inode->i_ino != EXTN_ROOT_INO &&
+-              inode->i_ino != EXTN_ACL_IDX_INO &&
+-              inode->i_ino != EXTN_ACL_DATA_INO &&
+               inode->i_ino != EXTN_JOURNAL_INO &&
+-              inode->i_ino < EXTN_FIRST_INO(inode->i_sb)) ||
+-              inode->i_ino > le32_to_cpu(
+-                      inode->i_sb->u.extN_sb.s_es->s_inodes_count)) {
+-              extN_error (inode->i_sb, "extN_get_inode_loc",
+-                          "bad inode number: %lu", inode->i_ino);
++              inode->i_ino < EXTN_FIRST_INO(sb)) ||
++              inode->i_ino > le32_to_cpu(sbi->s_es->s_inodes_count)) {
++              extN_error (sb, __FUNCTION__, "bad inode #%lu", inode->i_ino);
+               goto bad_inode;
+       }
+-      block_group = (inode->i_ino - 1) / EXTN_INODES_PER_GROUP(inode->i_sb);
+-      if (block_group >= inode->i_sb->u.extN_sb.s_groups_count) {
+-              extN_error (inode->i_sb, "extN_get_inode_loc",
+-                          "group >= groups count");
++      block_group = (inode->i_ino - 1) / sbi->s_inodes_per_group;
++      if (block_group >= sbi->s_groups_count) {
++              extN_error(sb, __FUNCTION__, "group >= groups count");
+               goto bad_inode;
+       }
+-      group_desc = block_group >> EXTN_DESC_PER_BLOCK_BITS(inode->i_sb);
+-      desc = block_group & (EXTN_DESC_PER_BLOCK(inode->i_sb) - 1);
+-      bh = inode->i_sb->u.extN_sb.s_group_desc[group_desc];
++      group_desc = block_group >> sbi->s_desc_per_block_bits;
++      desc = block_group & (sbi->s_desc_per_block - 1);
++      bh = sbi->s_group_desc[group_desc];
+       if (!bh) {
+-              extN_error (inode->i_sb, "extN_get_inode_loc",
+-                          "Descriptor not loaded");
++              extN_error(sb, __FUNCTION__, "Descriptor not loaded");
+               goto bad_inode;
+       }
+@@ -2021,17 +2036,17 @@
+       /*
+        * Figure out the offset within the block group inode table
+        */
+-      offset = ((inode->i_ino - 1) % EXTN_INODES_PER_GROUP(inode->i_sb)) *
+-              EXTN_INODE_SIZE(inode->i_sb);
++      offset = ((inode->i_ino - 1) % sbi->s_inodes_per_group) *
++              sbi->s_inode_size;
+       block = le32_to_cpu(gdp[desc].bg_inode_table) +
+-              (offset >> EXTN_BLOCK_SIZE_BITS(inode->i_sb));
+-      if (!(bh = sb_bread(inode->i_sb, block))) {
+-              extN_error (inode->i_sb, "extN_get_inode_loc",
++              (offset >> EXTN_BLOCK_SIZE_BITS(sb));
++      if (!(bh = sb_bread(sb, block))) {
++              extN_error (sb, __FUNCTION__,
+                           "unable to read inode block - "
+                           "inode=%lu, block=%lu", inode->i_ino, block);
+               goto bad_inode;
+       }
+-      offset &= (EXTN_BLOCK_SIZE(inode->i_sb) - 1);
++      offset &= (EXTN_BLOCK_SIZE(sb) - 1);
+       iloc->bh = bh;
+       iloc->raw_inode = (struct extN_inode *) (bh->b_data + offset);
+@@ -2047,6 +2062,7 @@
+ {
+       struct extN_iloc iloc;
+       struct extN_inode *raw_inode;
++      struct extN_inode_info *ei = EXTN_I(inode);
+       struct buffer_head *bh;
+       int block;
+       
+@@ -2054,7 +2070,7 @@
+               goto bad_inode;
+       bh = iloc.bh;
+       raw_inode = iloc.raw_inode;
+-      init_rwsem(&inode->u.extN_i.truncate_sem);
++      init_rwsem(&ei->truncate_sem);
+       inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+       inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+       inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+@@ -2067,7 +2083,7 @@
+       inode->i_atime = le32_to_cpu(raw_inode->i_atime);
+       inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
+       inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
+-      inode->u.extN_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
++      ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
+       /* We now have enough fields to check if the inode was active or not.
+        * This is needed because nfsd might try to access dead inodes
+        * the test is that same one that e2fsck uses
+@@ -2075,7 +2091,7 @@
+        */
+       if (inode->i_nlink == 0) {
+               if (inode->i_mode == 0 ||
+-                  !(inode->i_sb->u.extN_sb.s_mount_state & EXTN_ORPHAN_FS)) {
++                  !(EXTN_SB(inode->i_sb)->s_mount_state & EXTN_ORPHAN_FS)) {
+                       /* this inode is deleted */
+                       brelse (bh);
+                       goto bad_inode;
+@@ -2090,40 +2106,37 @@
+                                        * size */  
+       inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
+       inode->i_version = ++event;
+-      inode->u.extN_i.i_flags = le32_to_cpu(raw_inode->i_flags);
++      ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ #ifdef EXTN_FRAGMENTS
+-      inode->u.extN_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
+-      inode->u.extN_i.i_frag_no = raw_inode->i_frag;
+-      inode->u.extN_i.i_frag_size = raw_inode->i_fsize;
++      ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
++      ei->i_frag_no = raw_inode->i_frag;
++      ei->i_frag_size = raw_inode->i_fsize;
+ #endif
+-      inode->u.extN_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
++      ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
+       if (!S_ISREG(inode->i_mode)) {
+-              inode->u.extN_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
++              ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
+       } else {
+               inode->i_size |=
+                       ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
+       }
+-      inode->u.extN_i.i_disksize = inode->i_size;
++      ei->i_disksize = inode->i_size;
+       inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+ #ifdef EXTN_PREALLOCATE
+-      inode->u.extN_i.i_prealloc_count = 0;
++      ei->i_prealloc_count = 0;
+ #endif
+-      inode->u.extN_i.i_block_group = iloc.block_group;
++      ei->i_block_group = iloc.block_group;
+       /*
+        * NOTE! The in-memory inode i_data array is in little-endian order
+        * even on big-endian machines: we do NOT byteswap the block numbers!
+        */
+       for (block = 0; block < EXTN_N_BLOCKS; block++)
+-              inode->u.extN_i.i_data[block] = iloc.raw_inode->i_block[block];
+-      INIT_LIST_HEAD(&inode->u.extN_i.i_orphan);
++              ei->i_data[block] = iloc.raw_inode->i_block[block];
++      INIT_LIST_HEAD(&ei->i_orphan);
+       brelse (iloc.bh);
+-      if (inode->i_ino == EXTN_ACL_IDX_INO ||
+-          inode->i_ino == EXTN_ACL_DATA_INO)
+-              /* Nothing to do */ ;
+-      else if (S_ISREG(inode->i_mode)) {
++      if (S_ISREG(inode->i_mode)) {
+               inode->i_op = &extN_file_inode_operations;
+               inode->i_fop = &extN_file_operations;
+               inode->i_mapping->a_ops = &extN_aops;
+@@ -2131,7 +2144,7 @@
+               inode->i_op = &extN_dir_inode_operations;
+               inode->i_fop = &extN_dir_operations;
+       } else if (S_ISLNK(inode->i_mode)) {
+-              if (!inode->i_blocks)
++              if (extN_inode_is_fast_symlink(inode))
+                       inode->i_op = &extN_fast_symlink_inode_operations;
+               else {
+                       inode->i_op = &page_symlink_inode_operations;
+@@ -2141,19 +2154,19 @@
+               init_special_inode(inode, inode->i_mode,
+                                  le32_to_cpu(iloc.raw_inode->i_block[0]));
+       /* inode->i_attr_flags = 0;                             unused */
+-      if (inode->u.extN_i.i_flags & EXTN_SYNC_FL) {
++      if (ei->i_flags & EXTN_SYNC_FL) {
+               /* inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS; unused */
+               inode->i_flags |= S_SYNC;
+       }
+-      if (inode->u.extN_i.i_flags & EXTN_APPEND_FL) {
++      if (ei->i_flags & EXTN_APPEND_FL) {
+               /* inode->i_attr_flags |= ATTR_FLAG_APPEND;     unused */
+               inode->i_flags |= S_APPEND;
+       }
+-      if (inode->u.extN_i.i_flags & EXTN_IMMUTABLE_FL) {
++      if (ei->i_flags & EXTN_IMMUTABLE_FL) {
+               /* inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE;  unused */
+               inode->i_flags |= S_IMMUTABLE;
+       }
+-      if (inode->u.extN_i.i_flags & EXTN_NOATIME_FL) {
++      if (ei->i_flags & EXTN_NOATIME_FL) {
+               /* inode->i_attr_flags |= ATTR_FLAG_NOATIME;    unused */
+               inode->i_flags |= S_NOATIME;
+       }
+@@ -2175,6 +2188,7 @@
+                               struct extN_iloc *iloc)
+ {
+       struct extN_inode *raw_inode = iloc->raw_inode;
++      struct extN_inode_info *ei = EXTN_I(inode);
+       struct buffer_head *bh = iloc->bh;
+       int err = 0, rc, block;
+@@ -2192,7 +2206,7 @@
+  * Fix up interoperability with old kernels. Otherwise, old inodes get
+  * re-used with the upper 16 bits of the uid/gid intact
+  */
+-              if(!inode->u.extN_i.i_dtime) {
++              if(!ei->i_dtime) {
+                       raw_inode->i_uid_high =
+                               cpu_to_le16(high_16_bits(inode->i_uid));
+                       raw_inode->i_gid_high =
+@@ -2210,34 +2224,33 @@
+               raw_inode->i_gid_high = 0;
+       }
+       raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+-      raw_inode->i_size = cpu_to_le32(inode->u.extN_i.i_disksize);
++      raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+       raw_inode->i_atime = cpu_to_le32(inode->i_atime);
+       raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
+       raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
+       raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
+-      raw_inode->i_dtime = cpu_to_le32(inode->u.extN_i.i_dtime);
+-      raw_inode->i_flags = cpu_to_le32(inode->u.extN_i.i_flags);
++      raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
++      raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+ #ifdef EXTN_FRAGMENTS
+-      raw_inode->i_faddr = cpu_to_le32(inode->u.extN_i.i_faddr);
+-      raw_inode->i_frag = inode->u.extN_i.i_frag_no;
+-      raw_inode->i_fsize = inode->u.extN_i.i_frag_size;
++      raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
++      raw_inode->i_frag = ei->i_frag_no;
++      raw_inode->i_fsize = ei->i_frag_size;
+ #else
+       /* If we are not tracking these fields in the in-memory inode,
+        * then preserve them on disk, but still initialise them to zero
+        * for new inodes. */
+-      if (EXTN_I(inode)->i_state & EXTN_STATE_NEW) {
++      if (ei->i_state & EXTN_STATE_NEW) {
+               raw_inode->i_faddr = 0;
+               raw_inode->i_frag = 0;
+               raw_inode->i_fsize = 0;
+       }
+ #endif
+-      raw_inode->i_file_acl = cpu_to_le32(inode->u.extN_i.i_file_acl);
++      raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
+       if (!S_ISREG(inode->i_mode)) {
+-              raw_inode->i_dir_acl = cpu_to_le32(inode->u.extN_i.i_dir_acl);
++              raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
+       } else {
+-              raw_inode->i_size_high =
+-                      cpu_to_le32(inode->u.extN_i.i_disksize >> 32);
+-              if (inode->u.extN_i.i_disksize > 0x7fffffffULL) {
++              raw_inode->i_size_high = cpu_to_le32(ei->i_disksize >> 32);
++              if (ei->i_disksize > MAX_NON_LFS) {
+                       struct super_block *sb = inode->i_sb;
+                       if (!EXTN_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXTN_FEATURE_RO_COMPAT_LARGE_FILE) ||
+@@ -2247,7 +2260,7 @@
+                               * created, add a flag to the superblock.
+                               */
+                               err = extN_journal_get_write_access(handle,
+-                                              sb->u.extN_sb.s_sbh);
++                                              EXTN_SB(sb)->s_sbh);
+                               if (err)
+                                       goto out_brelse;
+                               extN_update_dynamic_rev(sb);
+@@ -2256,7 +2269,7 @@
+                               sb->s_dirt = 1;
+                               handle->h_sync = 1;
+                               err = extN_journal_dirty_metadata(handle,
+-                                              sb->u.extN_sb.s_sbh);
++                                              EXTN_SB(sb)->s_sbh);
+                       }
+               }
+       }
+@@ -2265,13 +2278,13 @@
+               raw_inode->i_block[0] =
+                       cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
+       else for (block = 0; block < EXTN_N_BLOCKS; block++)
+-              raw_inode->i_block[block] = inode->u.extN_i.i_data[block];
++              raw_inode->i_block[block] = ei->i_data[block];
+       BUFFER_TRACE(bh, "call extN_journal_dirty_metadata");
+       rc = extN_journal_dirty_metadata(handle, bh);
+       if (!err)
+               err = rc;
+-      EXTN_I(inode)->i_state &= ~EXTN_STATE_NEW;
++      ei->i_state &= ~EXTN_STATE_NEW;
+ out_brelse:
+       brelse (bh);
+@@ -2379,7 +2392,7 @@
+               }
+               
+               error = extN_orphan_add(handle, inode);
+-              inode->u.extN_i.i_disksize = attr->ia_size;
++              EXTN_I(inode)->i_disksize = attr->ia_size;
+               rc = extN_mark_inode_dirty(handle, inode);
+               if (!error)
+                       error = rc;
+@@ -2622,9 +2635,9 @@
+        */
+       if (val)
+-              inode->u.extN_i.i_flags |= EXTN_JOURNAL_DATA_FL;
++              EXTN_I(inode)->i_flags |= EXTN_JOURNAL_DATA_FL;
+       else
+-              inode->u.extN_i.i_flags &= ~EXTN_JOURNAL_DATA_FL;
++              EXTN_I(inode)->i_flags &= ~EXTN_JOURNAL_DATA_FL;
+       journal_unlock_updates(journal);
+--- extN/extN.orig/ioctl.c     Tue Sep 24 15:41:40 2002
++++ extN/ioctl.c       Tue Sep 24 22:00:43 2002
+@@ -18,13 +18,14 @@
+ int extN_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+               unsigned long arg)
+ {
++      struct extN_inode_info *ei = EXTN_I(inode);
+       unsigned int flags;
+       extN_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+       switch (cmd) {
+       case EXTN_IOC_GETFLAGS:
+-              flags = inode->u.extN_i.i_flags & EXTN_FL_USER_VISIBLE;
++              flags = ei->i_flags & EXTN_FL_USER_VISIBLE;
+               return put_user(flags, (int *) arg);
+       case EXTN_IOC_SETFLAGS: {
+               handle_t *handle = NULL;
+@@ -42,7 +43,7 @@
+               if (get_user(flags, (int *) arg))
+                       return -EFAULT;
+-              oldflags = inode->u.extN_i.i_flags;
++              oldflags = ei->i_flags;
+               /* The JOURNAL_DATA flag is modifiable only by root */
+               jflag = flags & EXTN_JOURNAL_DATA_FL;
+@@ -79,7 +80,7 @@
+               
+               flags = flags & EXTN_FL_USER_MODIFIABLE;
+               flags |= oldflags & ~EXTN_FL_USER_MODIFIABLE;
+-              inode->u.extN_i.i_flags = flags;
++              ei->i_flags = flags;
+               if (flags & EXTN_SYNC_FL)
+                       inode->i_flags |= S_SYNC;
+@@ -155,12 +156,12 @@
+                       int ret = 0;
+                       set_current_state(TASK_INTERRUPTIBLE);
+-                      add_wait_queue(&sb->u.extN_sb.ro_wait_queue, &wait);
+-                      if (timer_pending(&sb->u.extN_sb.turn_ro_timer)) {
++                      add_wait_queue(&EXTN_SB(sb)->ro_wait_queue, &wait);
++                      if (timer_pending(&EXTN_SB(sb)->turn_ro_timer)) {
+                               schedule();
+                               ret = 1;
+                       }
+-                      remove_wait_queue(&sb->u.extN_sb.ro_wait_queue, &wait);
++                      remove_wait_queue(&EXTN_SB(sb)->ro_wait_queue, &wait);
+                       return ret;
+               }
+ #endif
+--- extN/extN.orig/namei.c     Tue Sep 24 15:41:40 2002
++++ extN/namei.c       Tue Sep 24 22:00:43 2002
+@@ -16,6 +16,10 @@
+  *        David S. Miller (davem@caip.rutgers.edu), 1995
+  *  Directory entry file type support and forward compatibility hooks
+  *    for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
++ *  Hash Tree Directory indexing (c)
++ *    Daniel Phillips, 2001
++ *  Hash Tree Directory indexing porting
++ *    Christopher Li, 2002
+  */
+ #include <linux/fs.h>
+@@ -23,12 +27,13 @@
+ #include <linux/sched.h>
+ #include <linux/extN_fs.h>
+ #include <linux/extN_jbd.h>
++#include <linux/extN_xattr.h>
+ #include <linux/fcntl.h>
+ #include <linux/stat.h>
+ #include <linux/string.h>
+ #include <linux/locks.h>
+ #include <linux/quotaops.h>
+-
++#include <linux/slab.h>
+ /*
+  * define how far ahead to read directories while searching them.
+@@ -38,6 +43,432 @@
+ #define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+ #define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
++static struct buffer_head *extN_append(handle_t *handle,
++                                      struct inode *inode,
++                                      u32 *block, int *err)
++{
++      struct buffer_head *bh;
++
++      *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
++
++      if ((bh = extN_bread(handle, inode, *block, 1, err))) {
++              inode->i_size += inode->i_sb->s_blocksize;
++              EXTN_I(inode)->i_disksize = inode->i_size;
++              extN_journal_get_write_access(handle,bh);
++      }
++      return bh;
++}
++
++#ifndef assert
++#define assert(test) J_ASSERT(test)
++#endif
++
++#ifndef swap
++#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
++#endif
++
++typedef struct { u32 v; } le_u32;
++typedef struct { u16 v; } le_u16;
++
++#define dxtrace_on(command) command
++#define dxtrace_off(command)
++#define dxtrace dxtrace_off
++
++struct fake_dirent
++{
++      /*le*/u32 inode;
++      /*le*/u16 rec_len;
++      u8 name_len;
++      u8 file_type;
++};
++
++struct dx_countlimit
++{
++      le_u16 limit;
++      le_u16 count;
++};
++
++struct dx_entry
++{
++      le_u32 hash;
++      le_u32 block;
++};
++
++/*
++ * dx_root_info is laid out so that if it should somehow get overlaid by a
++ * dirent the two low bits of the hash version will be zero.  Therefore, the
++ * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
++ */
++
++struct dx_root
++{
++      struct fake_dirent dot;
++      char dot_name[4];
++      struct fake_dirent dotdot;
++      char dotdot_name[4];
++      struct dx_root_info
++      {
++              le_u32 reserved_zero;
++              u8 hash_version; /* 0 now, 1 at release */
++              u8 info_length; /* 8 */
++              u8 indirect_levels;
++              u8 unused_flags;
++      }
++      info;
++      struct dx_entry entries[0];
++};
++
++struct dx_node
++{
++      struct fake_dirent fake;
++      struct dx_entry entries[0];
++};
++
++
++struct dx_frame
++{
++      struct buffer_head *bh;
++      struct dx_entry *entries;
++      struct dx_entry *at;
++};
++
++struct dx_map_entry
++{
++      u32 hash;
++      u32 offs;
++};
++
++typedef struct extN_dir_entry_2 extN_dirent;
++static inline unsigned dx_get_block (struct dx_entry *entry);
++static void dx_set_block (struct dx_entry *entry, unsigned value);
++static inline unsigned dx_get_hash (struct dx_entry *entry);
++static void dx_set_hash (struct dx_entry *entry, unsigned value);
++static unsigned dx_get_count (struct dx_entry *entries);
++static unsigned dx_get_limit (struct dx_entry *entries);
++static void dx_set_count (struct dx_entry *entries, unsigned value);
++static void dx_set_limit (struct dx_entry *entries, unsigned value);
++static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
++static unsigned dx_node_limit (struct inode *dir);
++static unsigned dx_hack_hash (const u8 *name, int len);
++static struct dx_frame *dx_probe (struct inode *dir, u32 hash, struct dx_frame *frame);
++static void dx_release (struct dx_frame *frames);
++static int dx_make_map (extN_dirent *de, int size, struct dx_map_entry map[]);
++static void dx_sort_map(struct dx_map_entry *map, unsigned count);
++static extN_dirent *dx_copy_dirents (char *from, char *to,
++     struct dx_map_entry *map, int count);
++static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
++
++
++#ifdef CONFIG_EXTN_INDEX
++/*
++ * Future: use high four bits of block for coalesce-on-delete flags
++ * Mask them off for now.
++ */
++
++static inline unsigned dx_get_block (struct dx_entry *entry)
++{
++      return le32_to_cpu(entry->block.v) & 0x00ffffff;
++}
++
++static inline void dx_set_block (struct dx_entry *entry, unsigned value)
++{
++      entry->block.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_hash (struct dx_entry *entry)
++{
++      return le32_to_cpu(entry->hash.v);
++}
++
++static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
++{
++      entry->hash.v = cpu_to_le32(value);
++}
++
++static inline unsigned dx_get_count (struct dx_entry *entries)
++{
++      return le16_to_cpu(((struct dx_countlimit *) entries)->count.v);
++}
++
++static inline unsigned dx_get_limit (struct dx_entry *entries)
++{
++      return le16_to_cpu(((struct dx_countlimit *) entries)->limit.v);
++}
++
++static inline void dx_set_count (struct dx_entry *entries, unsigned value)
++{
++      ((struct dx_countlimit *) entries)->count.v = cpu_to_le16(value);
++}
++
++static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
++{
++      ((struct dx_countlimit *) entries)->limit.v = cpu_to_le16(value);
++}
++
++static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
++{
++      unsigned entry_space = dir->i_sb->s_blocksize - EXTN_DIR_REC_LEN(1) -
++              EXTN_DIR_REC_LEN(2) - infosize;
++      return 0? 20: entry_space / sizeof(struct dx_entry);
++}
++
++static inline unsigned dx_node_limit (struct inode *dir)
++{
++      unsigned entry_space = dir->i_sb->s_blocksize - EXTN_DIR_REC_LEN(0);
++      return 0? 22: entry_space / sizeof(struct dx_entry);
++}
++
++/* Hash function - not bad, but still looking for an ideal default */
++
++static unsigned dx_hack_hash (const u8 *name, int len)
++{
++      u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
++      while (len--)
++      {
++              u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
++              if (hash & 0x80000000) hash -= 0x7fffffff;
++              hash1 = hash0;
++              hash0 = hash;
++      }
++      return hash0;
++}
++
++#define dx_hash(s,n) (dx_hack_hash(s,n) << 1)
++
++/*
++ * Debug
++ */
++static void dx_show_index (char * label, struct dx_entry *entries)
++{
++      int i, n = dx_get_count (entries);
++      printk("%s index ", label);
++      for (i = 0; i < n; i++)
++      {
++              printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
++      }
++      printk("\n");
++}
++
++struct stats
++{ 
++      unsigned names;
++      unsigned space;
++      unsigned bcount;
++};
++
++static struct stats dx_show_leaf (extN_dirent *de, int size, int show_names)
++{
++      unsigned names = 0, space = 0;
++      char *base = (char *) de;
++      printk("names: ");
++      while ((char *) de < base + size)
++      {
++              if (de->inode)
++              {
++                      if (show_names)
++                      {
++                              int len = de->name_len;
++                              char *name = de->name;
++                              while (len--) printk("%c", *name++);
++                              printk(":%x.%u ", dx_hash (de->name, de->name_len), ((char *) de - base));
++                      }
++                      space += EXTN_DIR_REC_LEN(de->name_len);
++                      names++;
++              }
++              de = (extN_dirent *) ((char *) de + le16_to_cpu(de->rec_len));
++      }
++      printk("(%i)\n", names);
++      return (struct stats) { names, space, 1 };
++}
++
++struct stats dx_show_entries (struct inode *dir, struct dx_entry *entries, int levels)
++{
++      unsigned blocksize = dir->i_sb->s_blocksize;
++      unsigned count = dx_get_count (entries), names = 0, space = 0, i;
++      unsigned bcount = 0;
++      struct buffer_head *bh;
++      int err;
++      printk("%i indexed blocks...\n", count);
++      for (i = 0; i < count; i++, entries++)
++      {
++              u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
++              u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
++              struct stats stats;
++              printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
++              if (!(bh = extN_bread (NULL,dir, block, 0,&err))) continue;
++              stats = levels?
++                 dx_show_entries (dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
++                 dx_show_leaf ((extN_dirent *) bh->b_data, blocksize, 0);
++              names += stats.names;
++              space += stats.space;
++              bcount += stats.bcount;
++              brelse (bh);
++      }
++      if (bcount)
++              printk("%snames %u, fullness %u (%u%%)\n", levels?"":"   ",
++                      names, space/bcount,(space/bcount)*100/blocksize);
++      return (struct stats) { names, space, bcount};
++}
++
++/*
++ * Probe for a directory leaf block to search
++ */
++
++static struct dx_frame *
++dx_probe(struct inode *dir, u32 hash, struct dx_frame *frame_in)
++{
++      unsigned count, indirect;
++      struct dx_entry *at, *entries, *p, *q, *m;
++      struct dx_root *root;
++      struct buffer_head *bh;
++      struct dx_frame *frame = frame_in;
++      int err;
++
++      frame->bh = NULL;
++      if (!(bh = extN_bread(NULL, dir, 0, 0, &err)))
++              goto fail;
++      root = (struct dx_root *) bh->b_data;
++      if (root->info.hash_version > 0 || root->info.unused_flags & 1) {
++              brelse(bh);
++              goto fail;
++      }
++      if ((indirect = root->info.indirect_levels) > 1) {
++              brelse(bh);
++              goto fail;
++      }
++      entries = (struct dx_entry *) (((char *) &root->info) + root->info.info_length);
++      assert (dx_get_limit(entries) == dx_root_limit(dir, root->info.info_length));
++      dxtrace (printk("Look up %x", hash));
++      while (1)
++      {
++              count = dx_get_count(entries);
++              assert (count && count <= dx_get_limit(entries));
++              p = entries + 1;
++              q = entries + count - 1;
++              while (p <= q)
++              {
++                      m = p + (q - p)/2;
++                      dxtrace(printk("."));
++                      if (dx_get_hash(m) > hash)
++                              q = m - 1;
++                      else
++                              p = m + 1;
++              }
++
++              if (0) // linear search cross check
++              {
++                      unsigned n = count - 1;
++                      at = entries;
++                      while (n--)
++                      {
++                              dxtrace(printk(","));
++                              if (dx_get_hash(++at) > hash)
++                              {
++                                      at--;
++                                      break;
++                              }
++                      }
++                      assert (at == p - 1);
++              }
++
++              at = p - 1;
++              dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
++              frame->bh = bh;
++              frame->entries = entries;
++              frame->at = at;
++              if (!indirect--) return frame;
++              if (!(bh = extN_bread (NULL,dir, dx_get_block(at), 0,&err)))
++                      goto fail2;
++              at = entries = ((struct dx_node *) bh->b_data)->entries;
++              assert (dx_get_limit(entries) == dx_node_limit (dir));
++              frame++;
++      }
++fail2:
++      while (frame >= frame_in) {
++              brelse(frame->bh);
++              frame--;
++      }
++fail:
++      return NULL;
++}
++
++static void dx_release (struct dx_frame *frames)
++{
++      if (frames[0].bh == NULL)
++              return;
++
++      if (((struct dx_root *)frames[0].bh->b_data)->info.indirect_levels)
++              brelse (frames[1].bh);
++      brelse (frames[0].bh);
++}
++
++/*
++ * Directory block splitting, compacting
++ */
++
++static int dx_make_map (extN_dirent *de, int size, struct dx_map_entry map[])
++{
++      int count = 0;
++      char *base = (char *) de;
++      while ((char *) de < base + size)
++      {
++              map[count].hash = dx_hash (de->name, de->name_len);
++              map[count].offs = (u32) ((char *) de - base);
++              de = (extN_dirent *) ((char *) de + le16_to_cpu(de->rec_len));
++              count++;
++      }
++      return count;
++}
++
++static void dx_sort_map (struct dx_map_entry *map, unsigned count)
++{
++        struct dx_map_entry *p, *q, *top = map + count - 1;
++        int more;
++        /* Combsort until bubble sort doesn't suck */
++        while (count > 2)
++      {
++                count = count*10/13;
++                if (count - 9 < 2) /* 9, 10 -> 11 */
++                        count = 11;
++                for (p = top, q = p - count; q >= map; p--, q--)
++                        if (p->hash < q->hash)
++                                swap(*p, *q);
++        }
++        /* Garden variety bubble sort */
++        do {
++                more = 0;
++                q = top;
++                while (q-- > map)
++              {
++                        if (q[1].hash >= q[0].hash)
++                              continue;
++                        swap(*(q+1), *q);
++                        more = 1;
++              }
++      } while(more);
++}
++
++static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
++{
++      struct dx_entry *entries = frame->entries;
++      struct dx_entry *old = frame->at, *new = old + 1;
++      int count = dx_get_count(entries);
++
++      assert(count < dx_get_limit(entries));
++      assert(old < entries + count);
++      memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
++      dx_set_hash(new, hash);
++      dx_set_block(new, block);
++      dx_set_count(entries, count + 1);
++}
++#endif
++
++static void extN_update_dx_flag(struct inode *inode)
++{
++      if (!test_opt(inode->i_sb, INDEX))
++              EXTN_I(inode)->i_flags &= ~EXTN_INDEX_FL;
++}
++
+ /*
+  * NOTE! unlike strncmp, extN_match returns 1 for success, 0 for failure.
+  *
+@@ -95,6 +526,15 @@
+ }
+ /*
++ * p is at least 6 bytes before the end of page
++ */
++static inline extN_dirent *extN_next_entry(extN_dirent *p)
++{
++      return (extN_dirent *)((char*)p + le16_to_cpu(p->rec_len));
++}
++
++
++/*
+  *    extN_find_entry()
+  *
+  * finds an entry in the specified directory with the wanted name. It
+@@ -105,6 +545,8 @@
+  * The returned buffer_head has ->b_count elevated.  The caller is expected
+  * to brelse() it when appropriate.
+  */
++
++      
+ static struct buffer_head * extN_find_entry (struct dentry *dentry,
+                                       struct extN_dir_entry_2 ** res_dir)
+ {
+@@ -119,12 +561,78 @@
+       int num = 0;
+       int nblocks, i, err;
+       struct inode *dir = dentry->d_parent->d_inode;
++      int namelen;
++      const u8 *name;
++      unsigned blocksize;
++      extN_dirent *de, *top;
+       *res_dir = NULL;
+       sb = dir->i_sb;
++      blocksize = sb->s_blocksize;
++      namelen = dentry->d_name.len;
++      name = dentry->d_name.name;
++      if (namelen > EXTN_NAME_LEN)
++              return NULL;
++      if (extN_dx && is_dx(dir)) {
++              u32 hash = dx_hash (name, namelen);
++              struct dx_frame frames[2], *frame;
++              if (!(frame = dx_probe (dir, hash, frames)))
++                      return NULL;
++dxnext:
++              block = dx_get_block(frame->at);
++              if (!(bh = extN_bread (NULL,dir, block, 0, &err)))
++                      goto dxfail;
++              de = (extN_dirent *) bh->b_data;
++              top = (extN_dirent *) ((char *) de + blocksize -
++                              EXTN_DIR_REC_LEN(0));
++              for (; de < top; de = extN_next_entry(de))
++                      if (extN_match (namelen, name, de)) {
++                              if (!extN_check_dir_entry("extN_find_entry",
++                                        dir, de, bh,
++                                        (block<<EXTN_BLOCK_SIZE_BITS(sb))
++                                         +((char *)de - bh->b_data))) {
++                                      brelse (bh);
++                                      goto dxfail;
++                              }
++                              *res_dir = de;
++                              goto dxfound;
++                      }
++              brelse (bh);
++              /* Same hash continues in next block?  Search on. */
++              if (++(frame->at) == frame->entries + dx_get_count(frame->entries))
++              {
++                      struct buffer_head *bh2;
++                      if (frame == frames)
++                              goto dxfail;
++                      if (++(frames->at) == frames->entries + dx_get_count(frames->entries))
++                              goto dxfail;
++                      /* should omit read if not continued */
++                      if (!(bh2 = extN_bread (NULL, dir,
++                                              dx_get_block(frames->at),
++                                              0, &err)))
++                              goto dxfail;
++                      brelse (frame->bh);
++                      frame->bh = bh2;
++                      frame->at = frame->entries = ((struct dx_node *) bh2->b_data)->entries;
++                      /* Subtle: the 0th entry has the count, find the hash in frame above */
++                      if ((dx_get_hash(frames->at) & -2) == hash)
++                              goto dxnext;
++                      goto dxfail;
++              }
++              if ((dx_get_hash(frame->at) & -2) == hash)
++                      goto dxnext;
++dxfail:
++              dxtrace(printk("%s not found\n", name));
++              dx_release (frames);
++              return NULL;
++dxfound:
++              dx_release (frames);
++              return bh;
++      }
++      
+       nblocks = dir->i_size >> EXTN_BLOCK_SIZE_BITS(sb);
+-      start = dir->u.extN_i.i_dir_start_lookup;
++      start = EXTN_I(dir)->i_dir_start_lookup;
+       if (start >= nblocks)
+               start = 0;
+       block = start;
+@@ -165,7 +673,7 @@
+               i = search_dirblock(bh, dir, dentry,
+                           block << EXTN_BLOCK_SIZE_BITS(sb), res_dir);
+               if (i == 1) {
+-                      dir->u.extN_i.i_dir_start_lookup = block;
++                      EXTN_I(dir)->i_dir_start_lookup = block;
+                       ret = bh;
+                       goto cleanup_and_exit;
+               } else {
+@@ -237,6 +745,92 @@
+               de->file_type = extN_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+ }
++static extN_dirent *
++dx_copy_dirents (char *from, char *to, struct dx_map_entry *map, int count)
++{
++      unsigned rec_len = 0;
++
++      while (count--) {
++              extN_dirent *de = (extN_dirent *) (from + map->offs);
++              rec_len = EXTN_DIR_REC_LEN(de->name_len);
++              memcpy (to, de, rec_len);
++              ((extN_dirent *) to)->rec_len = rec_len;
++              to += rec_len;
++              map++;
++      }
++      return (extN_dirent *) (to - rec_len);
++}
++
++#ifdef CONFIG_EXTN_INDEX
++static extN_dirent *do_split(handle_t *handle, struct inode *dir,
++                      struct buffer_head **bh,struct dx_frame *frame,
++                      u32 hash, int *error)
++{
++      unsigned blocksize = dir->i_sb->s_blocksize;
++      unsigned count, continued;
++      struct buffer_head *bh2;
++      u32 newblock;
++      unsigned MAX_DX_MAP = PAGE_CACHE_SIZE/EXTN_DIR_REC_LEN(1) + 1;
++      u32 hash2;
++      struct dx_map_entry *map;
++      char *data1 = (*bh)->b_data, *data2, *data3;
++      unsigned split;
++      extN_dirent *de, *de2;
++
++      bh2 = extN_append (handle, dir, &newblock, error);
++      if (!(bh2))
++      {
++              brelse(*bh);
++              *bh = NULL;
++              return (extN_dirent *)bh2;
++      }
++
++      BUFFER_TRACE(*bh, "get_write_access");
++      extN_journal_get_write_access(handle, *bh);
++      BUFFER_TRACE(frame->bh, "get_write_access");
++      extN_journal_get_write_access(handle, frame->bh);
++
++      data2 = bh2->b_data;
++
++      map = kmalloc(sizeof(*map) * MAX_DX_MAP, GFP_KERNEL);
++      if (!map)
++              panic("no memory for do_split\n");
++      count = dx_make_map ((extN_dirent *) data1, blocksize, map);
++      split = count/2; // need to adjust to actual middle
++      dx_sort_map (map, count);
++      hash2 = map[split].hash;
++      continued = hash2 == map[split - 1].hash;
++      dxtrace(printk("Split block %i at %x, %i/%i\n",
++              dx_get_block(frame->at), hash2, split, count-split));
++
++      /* Fancy dance to stay within two buffers */
++      de2 = dx_copy_dirents (data1, data2, map + split, count - split);
++      data3 = (char *) de2 + de2->rec_len;
++      de = dx_copy_dirents (data1, data3, map, split);
++      memcpy(data1, data3, (char *) de + de->rec_len - data3);
++      de = (extN_dirent *) ((char *) de - data3 + data1); // relocate de
++      de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
++      de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2);
++      dxtrace(dx_show_leaf ((extN_dirent *) data1, blocksize, 1));
++      dxtrace(dx_show_leaf ((extN_dirent *) data2, blocksize, 1));
++
++      /* Which block gets the new entry? */
++      if (hash >= hash2)
++      {
++              swap(*bh, bh2);
++              de = de2;
++      }
++      dx_insert_block (frame, hash2 + continued, newblock);
++      extN_journal_dirty_metadata (handle, bh2);
++      brelse (bh2);
++      extN_journal_dirty_metadata (handle, frame->bh);
++      dxtrace(dx_show_index ("frame", frame->entries));
++      kfree(map);
++      return de;
++}
++#endif
++
++
+ /*
+  *    extN_add_entry()
+  *
+@@ -251,6 +845,7 @@
+ /*
+  * AKPM: the journalling code here looks wrong on the error paths
+  */
++
+ static int extN_add_entry (handle_t *handle, struct dentry *dentry,
+       struct inode *inode)
+ {
+@@ -258,117 +853,281 @@
+       const char *name = dentry->d_name.name;
+       int namelen = dentry->d_name.len;
+       unsigned long offset;
+-      unsigned short rec_len;
+       struct buffer_head * bh;
+-      struct extN_dir_entry_2 * de, * de1;
+-      struct super_block * sb;
++      extN_dirent *de;
++      struct super_block * sb = dir->i_sb;
+       int     retval;
++      unsigned short reclen = EXTN_DIR_REC_LEN(namelen);
+-      sb = dir->i_sb;
++      unsigned blocksize = sb->s_blocksize;
++      unsigned nlen, rlen;
++      u32 block, blocks;
++      char *top;
+       if (!namelen)
+               return -EINVAL;
+-      bh = extN_bread (handle, dir, 0, 0, &retval);
+-      if (!bh)
+-              return retval;
+-      rec_len = EXTN_DIR_REC_LEN(namelen);
+-      offset = 0;
+-      de = (struct extN_dir_entry_2 *) bh->b_data;
+-      while (1) {
+-              if ((char *)de >= sb->s_blocksize + bh->b_data) {
+-                      brelse (bh);
+-                      bh = NULL;
+-                      bh = extN_bread (handle, dir,
+-                              offset >> EXTN_BLOCK_SIZE_BITS(sb), 1, &retval);
+-                      if (!bh)
+-                              return retval;
+-                      if (dir->i_size <= offset) {
+-                              if (dir->i_size == 0) {
+-                                      brelse(bh);
+-                                      return -ENOENT;
++      if (extN_dx && is_dx(dir)) {
++              struct dx_frame frames[2], *frame;
++              struct dx_entry *entries, *at;
++              u32 hash;
++              char *data1;
++
++              hash = dx_hash(name, namelen);
++              /* FIXME: do something if dx_probe() fails here */
++              frame = dx_probe(dir, hash, frames);
++              entries = frame->entries;
++              at = frame->at;
++
++              if (!(bh = extN_bread(handle,dir, dx_get_block(at), 0,&retval)))
++                      goto dxfail1;
++
++              BUFFER_TRACE(bh, "get_write_access");
++              extN_journal_get_write_access(handle, bh);
++
++              data1 = bh->b_data;
++              de = (extN_dirent *) data1;
++              top = data1 + (0? 200: blocksize);
++              while ((char *) de < top)
++              {
++                      /* FIXME: check EEXIST and dir */
++                      nlen = EXTN_DIR_REC_LEN(de->name_len);
++                      rlen = le16_to_cpu(de->rec_len);
++                      if ((de->inode? rlen - nlen: rlen) >= reclen)
++                              goto dx_add;
++                      de = (extN_dirent *) ((char *) de + rlen);
++              }
++              /* Block full, should compress but for now just split */
++              dxtrace(printk("using %u of %u node entries\n",
++                      dx_get_count(entries), dx_get_limit(entries)));
++              /* Need to split index? */
++              if (dx_get_count(entries) == dx_get_limit(entries))
++              {
++                      u32 newblock;
++                      unsigned icount = dx_get_count(entries);
++                      int levels = frame - frames;
++                      struct dx_entry *entries2;
++                      struct dx_node *node2;
++                      struct buffer_head *bh2;
++                      if (levels && dx_get_count(frames->entries) == dx_get_limit(frames->entries))
++                              goto dxfull;
++                      bh2 = extN_append (handle, dir, &newblock, &retval);
++                      if (!(bh2))
++                              goto dxfail2;
++                      node2 = (struct dx_node *)(bh2->b_data);
++                      entries2 = node2->entries;
++                      node2->fake.rec_len = cpu_to_le16(blocksize);
++                      node2->fake.inode = 0;
++                      BUFFER_TRACE(frame->bh, "get_write_access");
++                      extN_journal_get_write_access(handle, frame->bh);
++                      if (levels)
++                      {
++                              unsigned icount1 = icount/2, icount2 = icount - icount1;
++                              unsigned hash2 = dx_get_hash(entries + icount1);
++                              dxtrace(printk("Split index %i/%i\n", icount1, icount2));
++                              
++                              BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
++                              extN_journal_get_write_access(handle, frames[0].bh);
++                              
++                              memcpy ((char *) entries2, (char *) (entries + icount1),
++                                      icount2 * sizeof(struct dx_entry));
++                              dx_set_count (entries, icount1);
++                              dx_set_count (entries2, icount2);
++                              dx_set_limit (entries2, dx_node_limit(dir));
++
++                              /* Which index block gets the new entry? */
++                              if (at - entries >= icount1) {
++                                      frame->at = at = at - entries - icount1 + entries2;
++                                      frame->entries = entries = entries2;
++                                      swap(frame->bh, bh2);
+                               }
+-
+-                              extN_debug ("creating next block\n");
+-
+-                              BUFFER_TRACE(bh, "get_write_access");
+-                              extN_journal_get_write_access(handle, bh);
+-                              de = (struct extN_dir_entry_2 *) bh->b_data;
+-                              de->inode = 0;
+-                              de->rec_len = le16_to_cpu(sb->s_blocksize);
+-                              dir->u.extN_i.i_disksize =
+-                                      dir->i_size = offset + sb->s_blocksize;
+-                              dir->u.extN_i.i_flags &= ~EXTN_INDEX_FL;
+-                              extN_mark_inode_dirty(handle, dir);
++                              dx_insert_block (frames + 0, hash2, newblock);
++                              dxtrace(dx_show_index ("node", frames[1].entries));
++                              dxtrace(dx_show_index ("node",
++                                      ((struct dx_node *) bh2->b_data)->entries));
++                              extN_journal_dirty_metadata(handle, bh2);
++                              brelse (bh2);
+                       } else {
+-
+-                              extN_debug ("skipping to next block\n");
+-
+-                              de = (struct extN_dir_entry_2 *) bh->b_data;
++                              dxtrace(printk("Creating second level index...\n"));
++                              memcpy((char *) entries2, (char *) entries,
++                                      icount * sizeof(struct dx_entry));
++                              dx_set_limit(entries2, dx_node_limit(dir));
++
++                              /* Set up root */
++                              dx_set_count(entries, 1);
++                              dx_set_block(entries + 0, newblock);
++                              ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
++
++                              /* Add new access path frame */
++                              frame = frames + 1;
++                              frame->at = at = at - entries + entries2;
++                              frame->entries = entries = entries2;
++                              frame->bh = bh2;
++                              extN_journal_get_write_access(handle, frame->bh);
+                       }
++                      extN_journal_dirty_metadata(handle, frames[0].bh);
+               }
+-              if (!extN_check_dir_entry ("extN_add_entry", dir, de, bh,
+-                                         offset)) {
+-                      brelse (bh);
+-                      return -ENOENT;
+-              }
+-              if (extN_match (namelen, name, de)) {
++              de = do_split(handle, dir, &bh, frame, hash, &retval);
++              dx_release (frames);
++              if (!(de))
++                      goto fail;
++              nlen = EXTN_DIR_REC_LEN(de->name_len);
++              rlen = le16_to_cpu(de->rec_len);
++              goto add;
++
++dx_add:
++              dx_release (frames);
++              goto add;
++
++dxfull:
++              extN_warning(sb, __FUNCTION__, "Directory index full!\n");
++              retval = -ENOSPC;
++dxfail2:
++              brelse(bh);
++dxfail1:
++              dx_release (frames);
++              goto fail1;
++      }
++
++      blocks = dir->i_size >> sb->s_blocksize_bits;
++      for (block = 0, offset = 0; block < blocks; block++) {
++              bh = extN_bread(handle, dir, block, 0, &retval);
++              if(!bh)
++                      return retval;
++              de = (extN_dirent *)bh->b_data;
++              top = bh->b_data + blocksize - reclen;
++              while ((char *) de <= top) {
++                      if (!extN_check_dir_entry("extN_add_entry", dir, de,
++                                                bh, offset)) {
++                              brelse (bh);
++                              return -EIO;
++                      }
++                      if (extN_match (namelen, name, de)) {
+                               brelse (bh);
+                               return -EEXIST;
+-              }
+-              if ((le32_to_cpu(de->inode) == 0 &&
+-                              le16_to_cpu(de->rec_len) >= rec_len) ||
+-                  (le16_to_cpu(de->rec_len) >=
+-                              EXTN_DIR_REC_LEN(de->name_len) + rec_len)) {
+-                      BUFFER_TRACE(bh, "get_write_access");
+-                      extN_journal_get_write_access(handle, bh);
+-                      /* By now the buffer is marked for journaling */
+-                      offset += le16_to_cpu(de->rec_len);
+-                      if (le32_to_cpu(de->inode)) {
+-                              de1 = (struct extN_dir_entry_2 *) ((char *) de +
+-                                      EXTN_DIR_REC_LEN(de->name_len));
+-                              de1->rec_len =
+-                                      cpu_to_le16(le16_to_cpu(de->rec_len) -
+-                                      EXTN_DIR_REC_LEN(de->name_len));
+-                              de->rec_len = cpu_to_le16(
+-                                              EXTN_DIR_REC_LEN(de->name_len));
+-                              de = de1;
+                       }
+-                      de->file_type = EXTN_FT_UNKNOWN;
+-                      if (inode) {
+-                              de->inode = cpu_to_le32(inode->i_ino);
+-                              extN_set_de_type(dir->i_sb, de, inode->i_mode);
+-                      } else
+-                              de->inode = 0;
+-                      de->name_len = namelen;
+-                      memcpy (de->name, name, namelen);
+-                      /*
+-                       * XXX shouldn't update any times until successful
+-                       * completion of syscall, but too many callers depend
+-                       * on this.
+-                       *
+-                       * XXX similarly, too many callers depend on
+-                       * extN_new_inode() setting the times, but error
+-                       * recovery deletes the inode, so the worst that can
+-                       * happen is that the times are slightly out of date
+-                       * and/or different from the directory change time.
+-                       */
+-                      dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+-                      dir->u.extN_i.i_flags &= ~EXTN_INDEX_FL;
+-                      extN_mark_inode_dirty(handle, dir);
+-                      dir->i_version = ++event;
+-                      BUFFER_TRACE(bh, "call extN_journal_dirty_metadata");
+-                      extN_journal_dirty_metadata(handle, bh);
++                      nlen = EXTN_DIR_REC_LEN(de->name_len);
++                      rlen = le16_to_cpu(de->rec_len);
++                      if ((de->inode? rlen - nlen: rlen) >= reclen)
++                              goto add;
++                      de = (extN_dirent *)((char *)de + rlen);
++                      offset += rlen;
++              }
++              if (extN_dx && blocks == 1 && test_opt(sb, INDEX))
++                      goto dx_make_index;
++              brelse(bh);
++      }
++      bh = extN_append(handle, dir, &block, &retval);
++      if (!bh)
++              return retval;
++      de = (extN_dirent *) bh->b_data;
++      de->inode = 0;
++      de->rec_len = cpu_to_le16(rlen = blocksize);
++      nlen = 0;
++      goto add;
++
++add:
++      BUFFER_TRACE(bh, "get_write_access");
++      extN_journal_get_write_access(handle, bh);
++      /* By now the buffer is marked for journaling */
++      if (de->inode) {
++              extN_dirent *de1 = (extN_dirent *)((char *)de + nlen);
++              de1->rec_len = cpu_to_le16(rlen - nlen);
++              de->rec_len = cpu_to_le16(nlen);
++              de = de1;
++      }
++      de->file_type = EXTN_FT_UNKNOWN;
++      if (inode) {
++              de->inode = cpu_to_le32(inode->i_ino);
++              extN_set_de_type(dir->i_sb, de, inode->i_mode);
++      } else
++              de->inode = 0;
++      de->name_len = namelen;
++      memcpy (de->name, name, namelen);
++      /*
++       * XXX shouldn't update any times until successful
++       * completion of syscall, but too many callers depend
++       * on this.
++       *
++       * XXX similarly, too many callers depend on
++       * extN_new_inode() setting the times, but error
++       * recovery deletes the inode, so the worst that can
++       * happen is that the times are slightly out of date
++       * and/or different from the directory change time.
++       */
++      dir->i_mtime = dir->i_ctime = CURRENT_TIME;
++      extN_update_dx_flag(dir);
++      dir->i_version = ++event;
++      extN_mark_inode_dirty(handle, dir);
++      BUFFER_TRACE(bh, "call extN_journal_dirty_metadata");
++      extN_journal_dirty_metadata(handle, bh);
++      brelse(bh);
++      return 0;
++
++dx_make_index:
++      {
++              struct buffer_head *bh2;
++              struct dx_root *root;
++              struct dx_frame frames[2], *frame;
++              struct dx_entry *entries;
++              extN_dirent *de2;
++              char *data1;
++              unsigned len;
++              u32 hash;
++              
++              dxtrace(printk("Creating index\n"));
++              extN_journal_get_write_access(handle, bh);
++              root = (struct dx_root *) bh->b_data;
++              
++              EXTN_I(dir)->i_flags |= EXTN_INDEX_FL;
++              bh2 = extN_append (handle, dir, &block, &retval);
++              if (!(bh2))
++              {
+                       brelse(bh);
+-                      return 0;
++                      return retval;
+               }
+-              offset += le16_to_cpu(de->rec_len);
+-              de = (struct extN_dir_entry_2 *)
+-                      ((char *) de + le16_to_cpu(de->rec_len));
++              data1 = bh2->b_data;
++
++              /* The 0th block becomes the root, move the dirents out */
++              de = (extN_dirent *) &root->info;
++              len = ((char *) root) + blocksize - (char *) de;
++              memcpy (data1, de, len);
++              de = (extN_dirent *) data1;
++              top = data1 + len;
++              while (((char *) de2=(char*)de+le16_to_cpu(de->rec_len)) < top)
++                      de = de2;
++              de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
++              /* Initialize the root; the dot dirents already exist */
++              de = (extN_dirent *) (&root->dotdot);
++              de->rec_len = cpu_to_le16(blocksize - EXTN_DIR_REC_LEN(2));
++              memset (&root->info, 0, sizeof(root->info));
++              root->info.info_length = sizeof(root->info);
++              entries = root->entries;
++              dx_set_block (entries, 1);
++              dx_set_count (entries, 1);
++              dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
++
++              /* Initialize as for dx_probe */
++              hash = dx_hash (name, namelen);
++              frame = frames;
++              frame->entries = entries;
++              frame->at = entries;
++              frame->bh = bh;
++              bh = bh2;
++              de = do_split(handle,dir, &bh, frame, hash, &retval);
++              dx_release (frames);
++              if (!(de))
++                      return retval;
++              nlen = EXTN_DIR_REC_LEN(de->name_len);
++              rlen = le16_to_cpu(de->rec_len);
++              goto add;
+       }
+-      brelse (bh);
+-      return -ENOSPC;
++fail1:
++      return retval;
++fail:
++      return -ENOENT;
+ }
++
+ /*
+  * extN_delete_entry deletes a directory entry by merging it with the
+  * previous entry
+@@ -451,7 +1210,8 @@
+       struct inode * inode;
+       int err;
+-      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS + 3);
++      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS +
++                                      EXTN_INDEX_EXTRA_TRANS_BLOCKS + 3);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+@@ -464,8 +1224,10 @@
+               inode->i_op = &extN_file_inode_operations;
+               inode->i_fop = &extN_file_operations;
+               inode->i_mapping->a_ops = &extN_aops;
+-              extN_mark_inode_dirty(handle, inode);
+               err = extN_add_nondir(handle, dentry, inode);
++              if (err)
++                      extN_xattr_drop_inode(handle, inode);
++              extN_mark_inode_dirty(handle, inode);
+       }
+       extN_journal_stop(handle, dir);
+       return err;
+@@ -478,7 +1240,8 @@
+       struct inode *inode;
+       int err;
+-      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS + 3);
++      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS +
++                                      EXTN_INDEX_EXTRA_TRANS_BLOCKS + 3);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+@@ -489,8 +1252,10 @@
+       err = PTR_ERR(inode);
+       if (!IS_ERR(inode)) {
+               init_special_inode(inode, mode, rdev);
+-              extN_mark_inode_dirty(handle, inode);
+               err = extN_add_nondir(handle, dentry, inode);
++              if (err)
++                      extN_xattr_drop_inode(handle, inode);
++              extN_mark_inode_dirty(handle, inode);
+       }
+       extN_journal_stop(handle, dir);
+       return err;
+@@ -507,22 +1272,22 @@
+       if (dir->i_nlink >= EXTN_LINK_MAX)
+               return -EMLINK;
+-      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS + 3);
++      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS +
++                                      EXTN_INDEX_EXTRA_TRANS_BLOCKS + 3);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       if (IS_SYNC(dir))
+               handle->h_sync = 1;
+-      inode = extN_new_inode (handle, dir, S_IFDIR);
++      inode = extN_new_inode (handle, dir, S_IFDIR | mode);
+       err = PTR_ERR(inode);
+       if (IS_ERR(inode))
+               goto out_stop;
+       inode->i_op = &extN_dir_inode_operations;
+       inode->i_fop = &extN_dir_operations;
+-      inode->i_size = inode->u.extN_i.i_disksize = inode->i_sb->s_blocksize;
+-      inode->i_blocks = 0;    
++      inode->i_size = EXTN_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+       dir_block = extN_bread (handle, inode, 0, 1, &err);
+       if (!dir_block) {
+               inode->i_nlink--; /* is this nlink == 0? */
+@@ -549,15 +1314,12 @@
+       BUFFER_TRACE(dir_block, "call extN_journal_dirty_metadata");
+       extN_journal_dirty_metadata(handle, dir_block);
+       brelse (dir_block);
+-      inode->i_mode = S_IFDIR | mode;
+-      if (dir->i_mode & S_ISGID)
+-              inode->i_mode |= S_ISGID;
+       extN_mark_inode_dirty(handle, inode);
+       err = extN_add_entry (handle, dentry, inode);
+       if (err)
+               goto out_no_entry;
+       dir->i_nlink++;
+-      dir->u.extN_i.i_flags &= ~EXTN_INDEX_FL;
++      extN_update_dx_flag(dir);
+       extN_mark_inode_dirty(handle, dir);
+       d_instantiate(dentry, inode);
+ out_stop:
+@@ -565,6 +1327,7 @@
+       return err;
+ out_no_entry:
++      extN_xattr_drop_inode(handle, inode);
+       inode->i_nlink = 0;
+       extN_mark_inode_dirty(handle, inode);
+       iput (inode);
+@@ -655,7 +1418,7 @@
+       int err = 0, rc;
+       
+       lock_super(sb);
+-      if (!list_empty(&inode->u.extN_i.i_orphan))
++      if (!list_empty(&EXTN_I(inode)->i_orphan))
+               goto out_unlock;
+       /* Orphan handling is only valid for files with data blocks
+@@ -670,8 +1433,8 @@
+       J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+               S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
+-      BUFFER_TRACE(sb->u.extN_sb.s_sbh, "get_write_access");
+-      err = extN_journal_get_write_access(handle, sb->u.extN_sb.s_sbh);
++      BUFFER_TRACE(EXTN_SB(sb)->s_sbh, "get_write_access");
++      err = extN_journal_get_write_access(handle, EXTN_SB(sb)->s_sbh);
+       if (err)
+               goto out_unlock;
+       
+@@ -682,7 +1445,7 @@
+       /* Insert this inode at the head of the on-disk orphan list... */
+       NEXT_ORPHAN(inode) = le32_to_cpu(EXTN_SB(sb)->s_es->s_last_orphan);
+       EXTN_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+-      err = extN_journal_dirty_metadata(handle, sb->u.extN_sb.s_sbh);
++      err = extN_journal_dirty_metadata(handle, EXTN_SB(sb)->s_sbh);
+       rc = extN_mark_iloc_dirty(handle, inode, &iloc);
+       if (!err)
+               err = rc;
+@@ -696,7 +1459,7 @@
+        * This is safe: on error we're going to ignore the orphan list
+        * anyway on the next recovery. */
+       if (!err)
+-              list_add(&inode->u.extN_i.i_orphan, &EXTN_SB(sb)->s_orphan);
++              list_add(&EXTN_I(inode)->i_orphan, &EXTN_SB(sb)->s_orphan);
+       jbd_debug(4, "superblock will point to %ld\n", inode->i_ino);
+       jbd_debug(4, "orphan inode %ld will point to %d\n",
+@@ -714,25 +1477,25 @@
+ int extN_orphan_del(handle_t *handle, struct inode *inode)
+ {
+       struct list_head *prev;
++      struct extN_inode_info *ei = EXTN_I(inode);
+       struct extN_sb_info *sbi;
+       ino_t ino_next; 
+       struct extN_iloc iloc;
+       int err = 0;
+       
+       lock_super(inode->i_sb);
+-      if (list_empty(&inode->u.extN_i.i_orphan)) {
++      if (list_empty(&ei->i_orphan)) {
+               unlock_super(inode->i_sb);
+               return 0;
+       }
+       ino_next = NEXT_ORPHAN(inode);
+-      prev = inode->u.extN_i.i_orphan.prev;
++      prev = ei->i_orphan.prev;
+       sbi = EXTN_SB(inode->i_sb);
+       jbd_debug(4, "remove inode %ld from orphan list\n", inode->i_ino);
+-      list_del(&inode->u.extN_i.i_orphan);
+-      INIT_LIST_HEAD(&inode->u.extN_i.i_orphan);
++      list_del_init(&ei->i_orphan);
+       /* If we're on an error path, we may not have a valid
+        * transaction handle with which to update the orphan list on
+@@ -755,9 +1518,8 @@
+               err = extN_journal_dirty_metadata(handle, sbi->s_sbh);
+       } else {
+               struct extN_iloc iloc2;
+-              struct inode *i_prev =
+-                      list_entry(prev, struct inode, u.extN_i.i_orphan);
+-              
++              struct inode *i_prev = orphan_list_entry(prev);
++
+               jbd_debug(4, "orphan inode %ld will point to %ld\n",
+                         i_prev->i_ino, ino_next);
+               err = extN_reserve_inode_write(handle, i_prev, &iloc2);
+@@ -832,7 +1594,7 @@
+       extN_mark_inode_dirty(handle, inode);
+       dir->i_nlink--;
+       inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+-      dir->u.extN_i.i_flags &= ~EXTN_INDEX_FL;
++      extN_update_dx_flag(dir);
+       extN_mark_inode_dirty(handle, dir);
+ end_rmdir:
+@@ -878,7 +1640,7 @@
+       if (retval)
+               goto end_unlink;
+       dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+-      dir->u.extN_i.i_flags &= ~EXTN_INDEX_FL;
++      extN_update_dx_flag(dir);
+       extN_mark_inode_dirty(handle, dir);
+       inode->i_nlink--;
+       if (!inode->i_nlink)
+@@ -904,7 +1666,8 @@
+       if (l > dir->i_sb->s_blocksize)
+               return -ENAMETOOLONG;
+-      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS + 5);
++      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS +
++                                      EXTN_INDEX_EXTRA_TRANS_BLOCKS + 5);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+@@ -916,7 +1679,7 @@
+       if (IS_ERR(inode))
+               goto out_stop;
+-      if (l > sizeof (inode->u.extN_i.i_data)) {
++      if (l > sizeof(EXTN_I(inode)->i_data)) {
+               inode->i_op = &page_symlink_inode_operations;
+               inode->i_mapping->a_ops = &extN_aops;
+               /*
+@@ -959,7 +1722,8 @@
+       if (inode->i_nlink >= EXTN_LINK_MAX)
+               return -EMLINK;
+-      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS);
++      handle = extN_journal_start(dir, EXTN_DATA_TRANS_BLOCKS +
++                                      EXTN_INDEX_EXTRA_TRANS_BLOCKS);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+@@ -995,7 +1759,8 @@
+       old_bh = new_bh = dir_bh = NULL;
+-      handle = extN_journal_start(old_dir, 2 * EXTN_DATA_TRANS_BLOCKS + 2);
++      handle = extN_journal_start(old_dir, 2 * EXTN_DATA_TRANS_BLOCKS +
++                                      EXTN_INDEX_EXTRA_TRANS_BLOCKS + 2);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+@@ -1077,7 +1842,7 @@
+               new_inode->i_ctime = CURRENT_TIME;
+       }
+       old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+-      old_dir->u.extN_i.i_flags &= ~EXTN_INDEX_FL;
++      extN_update_dx_flag(old_dir);
+       if (dir_bh) {
+               BUFFER_TRACE(dir_bh, "get_write_access");
+               extN_journal_get_write_access(handle, dir_bh);
+@@ -1089,7 +1854,7 @@
+                       new_inode->i_nlink--;
+               } else {
+                       new_dir->i_nlink++;
+-                      new_dir->u.extN_i.i_flags &= ~EXTN_INDEX_FL;
++                      extN_update_dx_flag(new_dir);
+                       extN_mark_inode_dirty(handle, new_dir);
+               }
+       }
+--- extN/extN.orig/super.c     Tue Sep 24 15:41:40 2002
++++ extN/super.c       Tue Sep 24 22:05:23 2002
+@@ -24,6 +24,7 @@
+ #include <linux/jbd.h>
+ #include <linux/extN_fs.h>
+ #include <linux/extN_jbd.h>
++#include <linux/extN_xattr.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/locks.h>
+@@ -121,7 +122,7 @@
+       /* If no overrides were specified on the mount, then fall back
+        * to the default behaviour set in the filesystem's superblock
+        * on disk. */
+-      switch (le16_to_cpu(sb->u.extN_sb.s_es->s_errors)) {
++      switch (le16_to_cpu(EXTN_SB(sb)->s_es->s_errors)) {
+       case EXTN_ERRORS_PANIC:
+               return EXTN_ERRORS_PANIC;
+       case EXTN_ERRORS_RO:
+@@ -269,9 +270,9 @@
+               return;
+       
+       printk (KERN_CRIT "Remounting filesystem read-only\n");
+-      sb->u.extN_sb.s_mount_state |= EXTN_ERROR_FS;
++      EXTN_SB(sb)->s_mount_state |= EXTN_ERROR_FS;
+       sb->s_flags |= MS_RDONLY;
+-      sb->u.extN_sb.s_mount_opt |= EXTN_MOUNT_ABORT;
++      EXTN_SB(sb)->s_mount_opt |= EXTN_MOUNT_ABORT;
+       journal_abort(EXTN_SB(sb)->s_journal, -EIO);
+ }
+@@ -377,8 +378,6 @@
+       return ret;
+ }
+-#define orphan_list_entry(l) list_entry((l), struct inode, u.extN_i.i_orphan)
+-
+ static void dump_orphan_list(struct super_block *sb, struct extN_sb_info *sbi)
+ {
+       struct list_head *l;
+@@ -404,6 +403,7 @@
+       kdev_t j_dev = sbi->s_journal->j_dev;
+       int i;
++      extN_xattr_put_super(sb);
+       journal_destroy(sbi->s_journal);
+       if (!(sb->s_flags & MS_RDONLY)) {
+               EXTN_CLEAR_INCOMPAT_FEATURE(sb, EXTN_FEATURE_INCOMPAT_RECOVER);
+@@ -529,6 +529,12 @@
+                                      "EXTN Check option not supported\n");
+ #endif
+               }
++              else if (!strcmp (this_char, "index"))
++#ifdef CONFIG_EXTN_INDEX
++                      set_opt (*mount_options, INDEX);
++#else
++                      printk("EXTN index option not supported\n");
++#endif
+               else if (!strcmp (this_char, "debug"))
+                       set_opt (*mount_options, DEBUG);
+               else if (!strcmp (this_char, "errors")) {
+@@ -702,6 +708,12 @@
+       es->s_mtime = cpu_to_le32(CURRENT_TIME);
+       extN_update_dynamic_rev(sb);
+       EXTN_SET_INCOMPAT_FEATURE(sb, EXTN_FEATURE_INCOMPAT_RECOVER);
++
++      if (test_opt(sb, INDEX))
++              EXTN_SET_COMPAT_FEATURE(sb, EXTN_FEATURE_COMPAT_DIR_INDEX);
++      else if (EXTN_HAS_COMPAT_FEATURE(sb, EXTN_FEATURE_COMPAT_DIR_INDEX))
++              set_opt (EXTN_SB(sb)->s_mount_opt, INDEX);
++
+       extN_commit_super (sb, es, 1);
+       if (test_opt (sb, DEBUG))
+               printk (KERN_INFO
+@@ -818,7 +830,7 @@
+               sb->s_flags &= ~MS_RDONLY;
+       }
+-      if (sb->u.extN_sb.s_mount_state & EXTN_ERROR_FS) {
++      if (EXTN_SB(sb)->s_mount_state & EXTN_ERROR_FS) {
+               if (es->s_last_orphan)
+                       jbd_debug(1, "Errors on filesystem, "
+                                 "clearing orphan list.\n");
+@@ -1334,10 +1346,10 @@
+               printk(KERN_ERR "EXTN-fs: I/O error on journal device\n");
+               goto out_journal;
+       }
+-      if (ntohl(journal->j_superblock->s_nr_users) != 1) {
++      if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
+               printk(KERN_ERR "EXTN-fs: External journal has more than one "
+                                       "user (unsupported) - %d\n",
+-                      ntohl(journal->j_superblock->s_nr_users));
++                      be32_to_cpu(journal->j_superblock->s_nr_users));
+               goto out_journal;
+       }
+       EXTN_SB(sb)->journal_bdev = bdev;
+@@ -1463,12 +1475,14 @@
+                              struct extN_super_block * es,
+                              int sync)
+ {
++      struct buffer_head *sbh = EXTN_SB(sb)->s_sbh;
++
+       es->s_wtime = cpu_to_le32(CURRENT_TIME);
+-      BUFFER_TRACE(sb->u.extN_sb.s_sbh, "marking dirty");
+-      mark_buffer_dirty(sb->u.extN_sb.s_sbh);
++      BUFFER_TRACE(sbh, "marking dirty");
++      mark_buffer_dirty(sbh);
+       if (sync) {
+-              ll_rw_block(WRITE, 1, &sb->u.extN_sb.s_sbh);
+-              wait_on_buffer(sb->u.extN_sb.s_sbh);
++              ll_rw_block(WRITE, 1, &sbh);
++              wait_on_buffer(sbh);
+       }
+ }
+@@ -1519,7 +1533,7 @@
+               extN_warning(sb, __FUNCTION__, "Marking fs in need of "
+                            "filesystem check.");
+               
+-              sb->u.extN_sb.s_mount_state |= EXTN_ERROR_FS;
++              EXTN_SB(sb)->s_mount_state |= EXTN_ERROR_FS;
+               es->s_state |= cpu_to_le16(EXTN_ERROR_FS);
+               extN_commit_super (sb, es, 1);
+@@ -1734,14 +1748,25 @@
+ static DECLARE_FSTYPE_DEV(extN_fs_type, "extN", extN_read_super);
+-static int __init init_extN_fs(void)
++static void exit_extN_fs(void)
+ {
+-        return register_filesystem(&extN_fs_type);
++      unregister_filesystem(&extN_fs_type);
++      exit_extN_xattr_user();
++      exit_extN_xattr();
+ }
+-static void __exit exit_extN_fs(void)
++static int __init init_extN_fs(void)
+ {
+-      unregister_filesystem(&extN_fs_type);
++      int error = init_extN_xattr();
++      if (!error)
++              error = init_extN_xattr_user();
++      if (!error)
++              error = register_filesystem(&extN_fs_type);
++      if (!error)
++              return 0;
++
++      exit_extN_fs();
++      return error;
+ }
+ EXPORT_SYMBOL(extN_bread);
+--- extN/extN.orig/symlink.c   Tue Sep 24 15:41:40 2002
++++ extN/symlink.c     Tue Sep 24 22:00:43 2002
+@@ -23,14 +23,14 @@
+ static int extN_readlink(struct dentry *dentry, char *buffer, int buflen)
+ {
+-      char *s = (char *)dentry->d_inode->u.extN_i.i_data;
+-      return vfs_readlink(dentry, buffer, buflen, s);
++      struct extN_inode_info *ei = EXTN_I(dentry->d_inode);
++      return vfs_readlink(dentry, buffer, buflen, (char *)ei->i_data);
+ }
+ static int extN_follow_link(struct dentry *dentry, struct nameidata *nd)
+ {
+-      char *s = (char *)dentry->d_inode->u.extN_i.i_data;
+-      return vfs_follow_link(nd, s);
++      struct extN_inode_info *ei = EXTN_I(dentry->d_inode);
++      return vfs_follow_link(nd, (char*)ei->i_data);
+ }
+ struct inode_operations extN_fast_symlink_inode_operations = {
+--- extN/extN.orig-include/extN_fs.h   Tue Sep 24 15:41:40 2002
++++ include/linux/extN_fs.h    Tue Sep 24 22:01:11 2002
+@@ -58,8 +58,6 @@
+  */
+ #define       EXTN_BAD_INO             1      /* Bad blocks inode */
+ #define EXTN_ROOT_INO          2      /* Root inode */
+-#define EXTN_ACL_IDX_INO       3      /* ACL inode */
+-#define EXTN_ACL_DATA_INO      4      /* ACL inode */
+ #define EXTN_BOOT_LOADER_INO   5      /* Boot loader inode */
+ #define EXTN_UNDEL_DIR_INO     6      /* Undelete directory inode */
+ #define EXTN_RESIZE_INO                7      /* Reserved group descriptors inode */
+@@ -84,23 +82,25 @@
+ #define EXTN_MIN_BLOCK_SIZE           1024
+ #define       EXTN_MAX_BLOCK_SIZE             4096
+ #define EXTN_MIN_BLOCK_LOG_SIZE                 10
++
+ #ifdef __KERNEL__
+-# define EXTN_BLOCK_SIZE(s)           ((s)->s_blocksize)
+-#else
+-# define EXTN_BLOCK_SIZE(s)           (EXTN_MIN_BLOCK_SIZE << (s)->s_log_block_size)
+-#endif
+-#define EXTN_ACLE_PER_BLOCK(s)                (EXTN_BLOCK_SIZE(s) / sizeof (struct extN_acl_entry))
+-#define       EXTN_ADDR_PER_BLOCK(s)          (EXTN_BLOCK_SIZE(s) / sizeof (__u32))
+-#ifdef __KERNEL__
+-# define EXTN_BLOCK_SIZE_BITS(s)      ((s)->s_blocksize_bits)
+-#else
+-# define EXTN_BLOCK_SIZE_BITS(s)      ((s)->s_log_block_size + 10)
+-#endif
+-#ifdef __KERNEL__
+-#define       EXTN_ADDR_PER_BLOCK_BITS(s)     ((s)->u.extN_sb.s_addr_per_block_bits)
+-#define EXTN_INODE_SIZE(s)            ((s)->u.extN_sb.s_inode_size)
+-#define EXTN_FIRST_INO(s)             ((s)->u.extN_sb.s_first_ino)
++#define EXTN_SB(sb)   (&((sb)->u.extN_sb))
++#define EXTN_I(inode) (&((inode)->u.extN_i))
++
++#define EXTN_BLOCK_SIZE(s)            ((s)->s_blocksize)
++#define EXTN_BLOCK_SIZE_BITS(s)               ((s)->s_blocksize_bits)
++#define       EXTN_ADDR_PER_BLOCK_BITS(s)     (EXTN_SB(s)->s_addr_per_block_bits)
++#define EXTN_INODE_SIZE(s)            (EXTN_SB(s)->s_inode_size)
++#define EXTN_FIRST_INO(s)             (EXTN_SB(s)->s_first_ino)
+ #else
++
++/* Assume that user mode programs are passing in an extNfs superblock, not
++ * a kernel struct super_block.  This will allow us to call the feature-test
++ * macros from user land. */
++#define EXTN_SB(sb)   (sb)
++
++#define EXTN_BLOCK_SIZE(s)    (EXTN_MIN_BLOCK_SIZE << (s)->s_log_block_size)
++#define EXTN_BLOCK_SIZE_BITS(s)       ((s)->s_log_block_size + 10)
+ #define EXTN_INODE_SIZE(s)    (((s)->s_rev_level == EXTN_GOOD_OLD_REV) ? \
+                                EXTN_GOOD_OLD_INODE_SIZE : \
+                                (s)->s_inode_size)
+@@ -108,6 +108,7 @@
+                                EXTN_GOOD_OLD_FIRST_INO : \
+                                (s)->s_first_ino)
+ #endif
++#define EXTN_ADDR_PER_BLOCK(s)        (EXTN_BLOCK_SIZE(s) / sizeof (__u32))
+ /*
+  * Macro-instructions used to manage fragments
+@@ -116,36 +117,14 @@
+ #define       EXTN_MAX_FRAG_SIZE              4096
+ #define EXTN_MIN_FRAG_LOG_SIZE                  10
+ #ifdef __KERNEL__
+-# define EXTN_FRAG_SIZE(s)            ((s)->u.extN_sb.s_frag_size)
+-# define EXTN_FRAGS_PER_BLOCK(s)      ((s)->u.extN_sb.s_frags_per_block)
++# define EXTN_FRAG_SIZE(s)            (EXTN_SB(s)->s_frag_size)
++# define EXTN_FRAGS_PER_BLOCK(s)      (EXTN_SB(s)->s_frags_per_block)
+ #else
+ # define EXTN_FRAG_SIZE(s)            (EXTN_MIN_FRAG_SIZE << (s)->s_log_frag_size)
+ # define EXTN_FRAGS_PER_BLOCK(s)      (EXTN_BLOCK_SIZE(s) / EXTN_FRAG_SIZE(s))
+ #endif
+ /*
+- * ACL structures
+- */
+-struct extN_acl_header        /* Header of Access Control Lists */
+-{
+-      __u32   aclh_size;
+-      __u32   aclh_file_count;
+-      __u32   aclh_acle_count;
+-      __u32   aclh_first_acle;
+-};
+-
+-struct extN_acl_entry /* Access Control List Entry */
+-{
+-      __u32   acle_size;
+-      __u16   acle_perms;     /* Access permissions */
+-      __u16   acle_type;      /* Type of entry */
+-      __u16   acle_tag;       /* User or group identity */
+-      __u16   acle_pad1;
+-      __u32   acle_next;      /* Pointer on next entry for the */
+-                                      /* same inode or on next free entry */
+-};
+-
+-/*
+  * Structure of a blocks group descriptor
+  */
+ struct extN_group_desc
+@@ -163,15 +142,13 @@
+ /*
+  * Macro-instructions used to manage group descriptors
+  */
++# define EXTN_BLOCKS_PER_GROUP(s)     (EXTN_SB(s)->s_blocks_per_group)
++# define EXTN_INODES_PER_GROUP(s)     (EXTN_SB(s)->s_inodes_per_group)
+ #ifdef __KERNEL__
+-# define EXTN_BLOCKS_PER_GROUP(s)     ((s)->u.extN_sb.s_blocks_per_group)
+-# define EXTN_DESC_PER_BLOCK(s)               ((s)->u.extN_sb.s_desc_per_block)
+-# define EXTN_INODES_PER_GROUP(s)     ((s)->u.extN_sb.s_inodes_per_group)
+-# define EXTN_DESC_PER_BLOCK_BITS(s)  ((s)->u.extN_sb.s_desc_per_block_bits)
++# define EXTN_DESC_PER_BLOCK(s)               (EXTN_SB(s)->s_desc_per_block)
++# define EXTN_DESC_PER_BLOCK_BITS(s)  (EXTN_SB(s)->s_desc_per_block_bits)
+ #else
+-# define EXTN_BLOCKS_PER_GROUP(s)     ((s)->s_blocks_per_group)
+ # define EXTN_DESC_PER_BLOCK(s)               (EXTN_BLOCK_SIZE(s) / sizeof (struct extN_group_desc))
+-# define EXTN_INODES_PER_GROUP(s)     ((s)->s_inodes_per_group)
+ #endif
+ /*
+@@ -339,12 +316,13 @@
+   #define EXTN_MOUNT_WRITEBACK_DATA   0x0C00  /* No data ordering */
+ #define EXTN_MOUNT_UPDATE_JOURNAL     0x1000  /* Update the journal format */
+ #define EXTN_MOUNT_NO_UID32           0x2000  /* Disable 32-bit UIDs */
++#define EXTN_MOUNT_INDEX              0x4000  /* Enable directory index */
+ /* Compatibility, for having both ext2_fs.h and extN_fs.h included at once */
+ #ifndef _LINUX_EXT2_FS_H
+ #define clear_opt(o, opt)             o &= ~EXTN_MOUNT_##opt
+ #define set_opt(o, opt)                       o |= EXTN_MOUNT_##opt
+-#define test_opt(sb, opt)             ((sb)->u.extN_sb.s_mount_opt & \
++#define test_opt(sb, opt)             (EXTN_SB(sb)->s_mount_opt & \
+                                        EXTN_MOUNT_##opt)
+ #else
+ #define EXT2_MOUNT_NOLOAD             EXTN_MOUNT_NOLOAD
+@@ -441,17 +419,11 @@
+ /*EC*/        __u32   s_reserved[197];        /* Padding to the end of the block */
+ };
+-#ifdef __KERNEL__
+-#define EXTN_SB(sb)   (&((sb)->u.extN_sb))
+-#define EXTN_I(inode) (&((inode)->u.extN_i))
+-#else
+-/* Assume that user mode programs are passing in an extNfs superblock, not
+- * a kernel struct super_block.  This will allow us to call the feature-test
+- * macros from user land. */
+-#define EXTN_SB(sb)   (sb)
+-#endif
+-
+-#define NEXT_ORPHAN(inode) (inode)->u.extN_i.i_dtime
++#define NEXT_ORPHAN(inode) EXTN_I(inode)->i_dtime
++static inline struct inode *orphan_list_entry(struct list_head *l)
++{
++      return list_entry(l, struct inode, u.extN_i.i_orphan);
++}
+ /*
+  * Codes for operating systems
+@@ -512,7 +484,7 @@
+ #define EXTN_FEATURE_INCOMPAT_RECOVER         0x0004 /* Needs recovery */
+ #define EXTN_FEATURE_INCOMPAT_JOURNAL_DEV     0x0008 /* Journal device */
+-#define EXTN_FEATURE_COMPAT_SUPP      0
++#define EXTN_FEATURE_COMPAT_SUPP      EXTN_FEATURE_COMPAT_EXT_ATTR
+ #define EXTN_FEATURE_INCOMPAT_SUPP    (EXTN_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXTN_FEATURE_INCOMPAT_RECOVER)
+ #define EXTN_FEATURE_RO_COMPAT_SUPP   (EXTN_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+@@ -575,6 +547,24 @@
+ #define EXTN_DIR_ROUND                        (EXTN_DIR_PAD - 1)
+ #define EXTN_DIR_REC_LEN(name_len)    (((name_len) + 8 + EXTN_DIR_ROUND) & \
+                                        ~EXTN_DIR_ROUND)
++/*
++ * Hash Tree Directory indexing
++ * (c) Daniel Phillips, 2001
++ */
++
++#define CONFIG_EXTN_INDEX
++
++#ifdef CONFIG_EXTN_INDEX
++  enum {extN_dx = 1};
++  #define is_dx(dir) (EXTN_I(dir)->i_flags & EXTN_INDEX_FL)
++#define EXTN_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXTN_LINK_MAX)
++#define EXTN_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
++#else
++  enum {extN_dx = 0};
++  #define is_dx(dir) 0
++#define EXTN_DIR_LINK_MAX(dir) ((dir)->i_nlink >= EXTN_LINK_MAX)
++#define EXTN_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2)
++#endif
+ #ifdef __KERNEL__
+ /*
+@@ -587,6 +577,24 @@
+       unsigned long block_group;
+ };
++/* Defined for extended attributes */
++#define CONFIG_EXTN_FS_XATTR y
++#ifndef ENOATTR
++#define ENOATTR ENODATA               /* No such attribute */
++#endif
++#ifndef ENOTSUP
++#define ENOTSUP EOPNOTSUPP    /* Operation not supported */
++#endif
++#ifndef XATTR_NAME_MAX
++#define XATTR_NAME_MAX   255  /* # chars in an extended attribute name */
++#define XATTR_SIZE_MAX 65536  /* size of an extended attribute value (64k) */
++#define XATTR_LIST_MAX 65536  /* size of extended attribute namelist (64k) */
++#endif
++#ifndef XATTR_CREATE
++#define XATTR_CREATE  1       /* set value, fail if attr already exists */
++#define XATTR_REPLACE 2       /* set value, fail if attr does not exist */
++#endif
++
+ /*
+  * Function prototypes
+  */
+@@ -628,6 +636,7 @@
+ extern unsigned long extN_count_free (struct buffer_head *, unsigned);
+ /* inode.c */
++extern int extN_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
+ extern struct buffer_head * extN_getblk (handle_t *, struct inode *, long, int, int *);
+ extern struct buffer_head * extN_bread (handle_t *, struct inode *, int, int, int *);
+--- extN/extN.orig-include/extN_jbd.h  Tue Sep 24 15:41:41 2002
++++ include/linux/extN_jbd.h   Tue Sep 24 22:01:11 2002
+@@ -30,13 +30,19 @@
+ #define EXTN_SINGLEDATA_TRANS_BLOCKS  8
++/* Extended attributes may touch two data buffers, two bitmap buffers,
++ * and two group and summaries. */
++
++#define EXTN_XATTR_TRANS_BLOCKS               8
++
+ /* Define the minimum size for a transaction which modifies data.  This
+  * needs to take into account the fact that we may end up modifying two
+  * quota files too (one for the group, one for the user quota).  The
+  * superblock only gets updated once, of course, so don't bother
+  * counting that again for the quota updates. */
+-#define EXTN_DATA_TRANS_BLOCKS                (3 * EXTN_SINGLEDATA_TRANS_BLOCKS - 2)
++#define EXTN_DATA_TRANS_BLOCKS                (3 * EXTN_SINGLEDATA_TRANS_BLOCKS + \
++                                       EXTN_XATTR_TRANS_BLOCKS - 2)
+ extern int extN_writepage_trans_blocks(struct inode *inode);
+@@ -63,6 +69,8 @@
+ #define EXTN_RESERVE_TRANS_BLOCKS     12
++#define EXTN_INDEX_EXTRA_TRANS_BLOCKS 8
++
+ int
+ extN_mark_iloc_dirty(handle_t *handle, 
+                    struct inode *inode,
+@@ -289,7 +297,7 @@
+               return 1;
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXTN_MOUNT_JOURNAL_DATA)
+               return 1;
+-      if (inode->u.extN_i.i_flags & EXTN_JOURNAL_DATA_FL)
++      if (EXTN_I(inode)->i_flags & EXTN_JOURNAL_DATA_FL)
+               return 1;
+       return 0;
+ }