Whamcloud - gitweb
* Compiles after merging b1_4
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
index 91513f8..c5ff7a9 100644 (file)
@@ -25,6 +25,8 @@
 
 #define DEBUG_SUBSYSTEM S_FILTER
 
+#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/jbd.h>
 #include <linux/slab.h>
 #include <linux/ext3_fs.h>
 #include <linux/ext3_jbd.h>
 #include <linux/version.h>
-/* XXX ugh */
+#include <linux/bitops.h>
+#include <linux/quota.h>
+#include <linux/quotaio_v1.h>
+#include <linux/quotaio_v2.h>
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- #include <linux/ext3_xattr.h>
+#include <linux/ext3_xattr.h>
 #else
- #include <linux/../../fs/ext3/xattr.h>
+#include <ext3/xattr.h>
 #endif
-#include <linux/kp30.h>
+
+#include <libcfs/kp30.h>
 #include <linux/lustre_fsfilt.h>
 #include <linux/obd.h>
 #include <linux/obd_class.h>
-#include <linux/module.h>
+#include <linux/lustre_quota.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+#include <linux/iobuf.h>
+#endif
+
+#ifdef EXT3_MULTIBLOCK_ALLOCATOR
+#include <linux/ext3_extents.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
+# define lock_24kernel() lock_kernel()
+# define unlock_24kernel() unlock_kernel()
+#else
+# define lock_24kernel() do {} while (0)
+# define unlock_24kernel() do {} while (0)
+#endif
 
 static kmem_cache_t *fcb_cache;
-static atomic_t fcb_cache_count = ATOMIC_INIT(0);
 
 struct fsfilt_cb_data {
         struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
@@ -61,52 +81,59 @@ struct fsfilt_cb_data {
 #endif
 #define XATTR_LUSTRE_MDS_LOV_EA         "lov"
 
-#define EXT3_XATTR_INDEX_LUSTRE         5                         /* old */
-#define XATTR_LUSTRE_MDS_OBJID          "system.lustre_mds_objid" /* old */
-
 /*
  * We don't currently need any additional blocks for rmdir and
  * unlink transactions because we are storing the OST oa_id inside
  * the inode (which we will be changing anyways as part of this
  * transaction).
  */
-static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private)
+static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
+                               int logs)
 {
-        /* For updates to the last recieved file */
-        int nblocks = EXT3_DATA_TRANS_BLOCKS;
-        int blocksize, block_count = 0;
+        /* For updates to the last received file */
+        int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
+        journal_t *journal;
         void *handle;
 
         if (current->journal_info) {
-                CDEBUG(D_INODE, "increasing refcount on %p\n", current->journal_info);
+                CDEBUG(D_INODE, "increasing refcount on %p\n",
+                       current->journal_info);
                 goto journal_start;
         }
 
         switch(op) {
-        case FSFILT_OP_CREATE_LOG:
-                nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
-                op = FSFILT_OP_CREATE;
-                break;
-        case FSFILT_OP_UNLINK_LOG:
-                nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
-                op = FSFILT_OP_UNLINK;
-                break;
-        }
-
-        switch(op) {
         case FSFILT_OP_RMDIR:
         case FSFILT_OP_UNLINK:
+                /* delete one file + create/update logs for each stripe */
                 nblocks += EXT3_DELETE_TRANS_BLOCKS;
+                nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
+                            EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
                 break;
         case FSFILT_OP_RENAME:
                 /* modify additional directory */
-                nblocks += EXT3_DATA_TRANS_BLOCKS;
+                nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
                 /* no break */
         case FSFILT_OP_SYMLINK:
                 /* additional block + block bitmap + GDT for long symlink */
                 nblocks += 3;
                 /* no break */
-        case FSFILT_OP_CREATE:
+        case FSFILT_OP_CREATE: {
+#if defined(EXT3_EXTENTS_FL) && defined(EXT3_INDEX_FL)
+                static int warned;
+                if (!warned) {
+                        if (!test_opt(inode->i_sb, EXTENTS)) {
+                                warned = 1;
+                        } else if (((EXT3_I(inode)->i_flags &
+                              cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL)) ==
+                              cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL))) {
+                                CWARN("extent-mapped directory found - contact "
+                                      "CFS: support@clusterfs.com\n");
+                                warned = 1;
+                        }
+                }
+#endif
+                /* no break */
+        }
         case FSFILT_OP_MKDIR:
         case FSFILT_OP_MKNOD:
                 /* modify one inode + block bitmap + GDT */
@@ -114,32 +141,50 @@ static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private)
                 /* no break */
         case FSFILT_OP_LINK:
                 /* modify parent directory */
-                nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
+                nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
+                        EXT3_DATA_TRANS_BLOCKS;
+                /* create/update logs for each stripe */
+                nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
+                            EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
                 break;
         case FSFILT_OP_SETATTR:
                 /* Setattr on inode */
                 nblocks += 1;
+                nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
+                        EXT3_DATA_TRANS_BLOCKS;
+                /* quota chown log for each stripe */
+                nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
+                            EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
                 break;
-        case FSFILT_OP_CANCEL_UNLINK_LOG:
-                blocksize = 1 << inode->i_blkbits;
-                block_count = (blocksize - 1) + LLOG_CHUNK_SIZE;
-                block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
-                block_count = block_count * EXT3_DATA_TRANS_BLOCKS + 2;
-                nblocks = 2 * 2 * block_count;
+        case FSFILT_OP_CANCEL_UNLINK:
+                /* blocks for log header bitmap update OR
+                 * blocks for catalog header bitmap update + unlink of logs */
+                nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
+                        EXT3_DELETE_TRANS_BLOCKS * logs;
                 break;
         default: CERROR("unknown transaction start op %d\n", op);
-                 LBUG();
+                LBUG();
         }
 
         LASSERT(current->journal_info == desc_private);
+        journal = EXT3_SB(inode->i_sb)->s_journal;
+        if (nblocks > journal->j_max_transaction_buffers) {
+                CERROR("too many credits %d for op %ux%u using %d instead\n",
+                       nblocks, op, logs, journal->j_max_transaction_buffers);
+                nblocks = journal->j_max_transaction_buffers;
+        }
 
  journal_start:
-        lock_kernel();
+        LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
+        lock_24kernel();
         handle = journal_start(EXT3_JOURNAL(inode), nblocks);
-        unlock_kernel();
+        unlock_24kernel();
 
         if (!IS_ERR(handle))
                 LASSERT(current->journal_info == handle);
+        else
+                CERROR("error starting handle for op %u (%u credits): rc %ld\n",
+                       op, nblocks, PTR_ERR(handle));
         return handle;
 }
 
@@ -215,7 +260,7 @@ static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
         /* last_rcvd update */
         needed += EXT3_DATA_TRANS_BLOCKS;
 
-#if defined(CONFIG_QUOTA) && !defined(__x86_64__) /* XXX */
+#if defined(CONFIG_QUOTA)
         /* We assume that there will be 1 bit set in s_dquot.flags for each
          * quota file that is active.  This is at least true for now.
          */
@@ -242,7 +287,7 @@ static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
  */
 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
                                    int niocount, struct niobuf_local *nb,
-                                   void *desc_private)
+                                   void *desc_private, int logs)
 {
         journal_t *journal;
         handle_t *handle;
@@ -269,9 +314,10 @@ static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
                 needed = journal->j_max_transaction_buffers;
         }
 
-        lock_kernel();
+        LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
+        lock_24kernel();
         handle = journal_start(journal, needed);
-        unlock_kernel();
+        unlock_24kernel();
         if (IS_ERR(handle)) {
                 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
                        PTR_ERR(handle));
@@ -292,19 +338,21 @@ static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
         if (force_sync)
                 handle->h_sync = 1; /* recovery likes this */
 
-        lock_kernel();
+        lock_24kernel();
         rc = journal_stop(handle);
-        unlock_kernel();
+        unlock_24kernel();
 
-        // LASSERT(current->journal_info == NULL);
         return rc;
 }
 
 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
-                                        void **wait_handle)
+                                    void **wait_handle)
 {
+        unsigned long tid;
         transaction_t *transaction;
-        unsigned long tid, rtid;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+        unsigned long rtid;
+#endif
         handle_t *handle = h;
         journal_t *journal;
         int rc;
@@ -340,14 +388,17 @@ static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
 
 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
 {
+        journal_t *journal = EXT3_JOURNAL(inode);
         tid_t tid = (tid_t)(long)h;
 
         CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
-       if (is_journal_aborted(EXT3_JOURNAL(inode)))
+        if (unlikely(is_journal_aborted(journal)))
                 return -EIO;
 
         log_wait_commit(EXT3_JOURNAL(inode), tid);
 
+        if (unlikely(is_journal_aborted(journal)))
+                return -EIO;
         return 0;
 }
 
@@ -411,71 +462,21 @@ static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
         RETURN(rc);
 }
 
-#undef INLINE_EA
-#undef OLD_EA
 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
                               void *lmm, int lmm_size)
 {
-        int rc, old_ea = 0;
+        int rc;
 
-#ifdef INLINE_EA  /* can go away before 1.0 - just for testing bug 2097 now */
-        /* Nasty hack city - store stripe MD data in the block pointers if
-         * it will fit, because putting it in an EA currently kills the MDS
-         * performance.  We'll fix this with "fast EAs" in the future.
-         */
-        if (inode->i_blocks == 0 && lmm_size <= sizeof(EXT3_I(inode)->i_data) -
-                                            sizeof(EXT3_I(inode)->i_data[0])) {
-                unsigned old_size = EXT3_I(inode)->i_data[0];
-                if (old_size != 0) {
-                        LASSERT(old_size < sizeof(EXT3_I(inode)->i_data));
-                        CERROR("setting EA on %lu/%u again... interesting\n",
-                               inode->i_ino, inode->i_generation);
-                }
+        LASSERT(down_trylock(&inode->i_sem) != 0);
 
-                EXT3_I(inode)->i_data[0] = cpu_to_le32(lmm_size);
-                memcpy(&EXT3_I(inode)->i_data[1], lmm, lmm_size);
-                mark_inode_dirty(inode);
-                return 0;
-        }
-#endif
-#ifdef OLD_EA
-        /* keep this when we get rid of OLD_EA (too noisy during conversion) */
-        if (EXT3_I(inode)->i_file_acl /* || large inode EA flag */) {
+        if (EXT3_I(inode)->i_file_acl /* || large inode EA flag */)
                 CWARN("setting EA on %lu/%u again... interesting\n",
                        inode->i_ino, inode->i_generation);
-                old_ea = 1;
-        }
 
         lock_kernel();
-        /* this can go away before 1.0.  For bug 2097 testing only. */
-        rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_LUSTRE,
-                                   XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size, 0);
-#else
-        lock_kernel();
         rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
                                    XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size, 0);
 
-        /* This tries to delete the old-format LOV EA, but only as long as we
-         * have successfully saved the new-format LOV EA (we can always try
-         * the conversion again the next time the file is accessed).  It is
-         * possible (although unlikely) that the new-format LOV EA couldn't be
-         * saved because it ran out of space but we would need a file striped
-         * over least 123 OSTs before the two EAs filled a 4kB block.
-         *
-         * This can be removed when all filesystems have converted to the
-         * new EA format, but otherwise adds little if any overhead.  If we
-         * wanted backward compatibility for existing files, we could keep
-         * the old EA around for a while but we'd have to clean it up later. */
-        if (rc >= 0 && old_ea) {
-                int err = ext3_xattr_set_handle(handle, inode,
-                                                EXT3_XATTR_INDEX_LUSTRE,
-                                                XATTR_LUSTRE_MDS_OBJID,
-                                                NULL, 0, 0);
-                if (err)
-                        CERROR("error deleting old LOV EA on %lu/%u: rc %d\n",
-                               inode->i_ino, inode->i_generation, err);
-        }
-#endif
         unlock_kernel();
 
         if (rc)
@@ -491,61 +492,9 @@ static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
 
         LASSERT(down_trylock(&inode->i_sem) != 0);
         lock_kernel();
-        /* Keep support for reading "inline EAs" until we convert
-         * users over to new format entirely.  See bug 841/2097. */
-        if (inode->i_blocks == 0 && EXT3_I(inode)->i_data[0]) {
-                unsigned size = le32_to_cpu(EXT3_I(inode)->i_data[0]);
-                void *handle;
-
-                LASSERT(size < sizeof(EXT3_I(inode)->i_data));
-                if (lmm) {
-                        if (size > lmm_size) {
-                                CERROR("inline EA on %lu/%u bad size %u > %u\n",
-                                       inode->i_ino, inode->i_generation,
-                                       size, lmm_size);
-                                return -ERANGE;
-                        }
-                        memcpy(lmm, &EXT3_I(inode)->i_data[1], size);
-                }
-
-#ifndef INLINE_EA
-                /* migrate LOV EA data to external block - keep same format */
-                CWARN("DEBUG: migrate inline EA for inode %lu/%u to block\n",
-                      inode->i_ino, inode->i_generation);
-
-                handle = journal_start(EXT3_JOURNAL(inode),
-                                       EXT3_XATTR_TRANS_BLOCKS);
-                if (!IS_ERR(handle)) {
-                        int err;
-                        rc = fsfilt_ext3_set_md(inode, handle,
-                                                &EXT3_I(inode)->i_data[1],size);
-                        if (rc == 0) {
-                                memset(EXT3_I(inode)->i_data, 0,
-                                       sizeof(EXT3_I(inode)->i_data));
-                                mark_inode_dirty(inode);
-                        }
-                        err = journal_stop(handle);
-                        if (err && rc == 0)
-                                rc = err;
-                } else {
-                        rc = PTR_ERR(handle);
-                }
-#endif
-                unlock_kernel();
-                return size;
-        }
 
         rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
                             XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size);
-        /* try old EA type if new one failed - MDS will convert it for us */
-        if (rc == -ENODATA) {
-                CDEBUG(D_INFO,"failed new LOV EA %d/%s from inode %lu: rc %d\n",
-                       EXT3_XATTR_INDEX_TRUSTED, XATTR_LUSTRE_MDS_LOV_EA,
-                       inode->i_ino, rc);
-
-                rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_LUSTRE,
-                                    XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size);
-        }
         unlock_kernel();
 
         /* This gives us the MD size */
@@ -554,7 +503,7 @@ static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
 
         if (rc < 0) {
                 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
-                       EXT3_XATTR_INDEX_LUSTRE, XATTR_LUSTRE_MDS_OBJID,
+                       EXT3_XATTR_INDEX_TRUSTED, XATTR_LUSTRE_MDS_LOV_EA,
                        inode->i_ino, rc);
                 memset(lmm, 0, lmm_size);
                 return (rc == -ENODATA) ? 0 : rc;
@@ -563,6 +512,44 @@ static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
         return rc;
 }
 
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
+{
+        submit_bio(rw, bio);
+        return 0;
+}
+#else
+static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct kiobuf *bio)
+{
+        int rc, blk_per_page;
+
+        rc = brw_kiovec(rw, 1, &bio, inode->i_dev,
+                        KIOBUF_GET_BLOCKS(bio), 1 << inode->i_blkbits);
+        /*
+         * brw_kiovec() returns number of bytes actually written. If error
+         * occurred after something was written, error code is returned though
+         * kiobuf->errno. (See bug 6854.)
+         */
+
+        blk_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+
+        if (rc != (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page) {
+                CERROR("short write?  expected %d, wrote %d (%d)\n",
+                       (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
+                       rc, bio->errno);
+        }
+        if (bio->errno != 0) {
+                CERROR("IO error. Wrote %d of %d (%d)\n",
+                       rc,
+                       (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
+                       bio->errno);
+                rc = bio->errno;
+        }
+
+        return rc;
+}
+#endif
+
 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
                                     loff_t *off)
 {
@@ -626,7 +613,6 @@ static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
         fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
 
         OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
-        atomic_dec(&fcb_cache_count);
 }
 
 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
@@ -639,17 +625,16 @@ static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
         if (fcb == NULL)
                 RETURN(-ENOMEM);
 
-        atomic_inc(&fcb_cache_count);
         fcb->cb_func = cb_func;
         fcb->cb_obd = obd;
         fcb->cb_last_rcvd = last_rcvd;
         fcb->cb_data = cb_data;
 
         CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
-        lock_kernel();
+        lock_24kernel();
         journal_callback_set(handle, fsfilt_ext3_cb_func,
                              (struct journal_callback *)fcb);
-        unlock_kernel();
+        unlock_24kernel();
 
         return 0;
 }
@@ -684,12 +669,341 @@ static int fsfilt_ext3_sync(struct super_block *sb)
         return ext3_force_commit(sb);
 }
 
+#if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))
+#warning "kernel code has old extents/mballoc patch, disabling"
+#undef EXT3_MULTIBLOCK_ALLOCATOR
+#endif
+#ifndef EXT3_EXTENTS_FL
+#define EXT3_EXTENTS_FL                        0x00080000 /* Inode uses extents */
+#endif
+
+#ifdef EXT3_MULTIBLOCK_ALLOCATOR
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+#define ext3_up_truncate_sem(inode)  up_write(&EXT3_I(inode)->truncate_sem);
+#define ext3_down_truncate_sem(inode)  down_write(&EXT3_I(inode)->truncate_sem);
+#else
+#define ext3_up_truncate_sem(inode)  up(&EXT3_I(inode)->truncate_sem);
+#define ext3_down_truncate_sem(inode)  down(&EXT3_I(inode)->truncate_sem);
+#endif
+
+#include <linux/lustre_version.h>
+#if EXT3_EXT_MAGIC == 0xf301
+#define ee_start e_start
+#define ee_block e_block
+#define ee_len   e_num
+#endif
+#ifndef EXT3_BB_MAX_BLOCKS
+#define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
+        ext3_new_blocks(handle, inode, count, goal, err)
+#endif
+
+struct bpointers {
+        unsigned long *blocks;
+        int *created;
+        unsigned long start;
+        int num;
+        int init_num;
+        int create;
+};
+
+static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
+                              unsigned long block, int *aflags)
+{
+        struct ext3_inode_info *ei = EXT3_I(inode);
+        unsigned long bg_start;
+        unsigned long colour;
+        int depth;
+
+        if (path) {
+                struct ext3_extent *ex;
+                depth = path->p_depth;
+
+                /* try to predict block placement */
+                if ((ex = path[depth].p_ext)) {
+#if 0
+                        /* This prefers to eat into a contiguous extent
+                         * rather than find an extent that the whole
+                         * request will fit into.  This can fragment data
+                         * block allocation and prevents our lovely 1M I/Os
+                         * from reaching the disk intact. */
+                        if (ex->ee_block + ex->ee_len == block)
+                                *aflags |= 1;
+#endif
+                        return ex->ee_start + (block - ex->ee_block);
+                }
+
+                /* it looks index is empty
+                 * try to find starting from index itself */
+                if (path[depth].p_bh)
+                        return path[depth].p_bh->b_blocknr;
+        }
+
+        /* OK. use inode's group */
+        bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
+                le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
+        colour = (current->pid % 16) *
+                (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+        return bg_start + colour + block;
+}
+
+static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
+                                  struct ext3_ext_path *path,
+                                  struct ext3_ext_cache *cex)
+{
+        struct inode *inode = tree->inode;
+        struct bpointers *bp = tree->private;
+        struct ext3_extent nex;
+        int count, err, goal;
+        unsigned long pblock;
+        unsigned long tgen;
+        loff_t new_i_size;
+        handle_t *handle;
+        int i, aflags = 0;
+
+        i = EXT_DEPTH(tree);
+        EXT_ASSERT(i == path->p_depth);
+        EXT_ASSERT(path[i].p_hdr);
+
+               if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
+                err = EXT_CONTINUE;
+                goto map;
+        }
+
+        if (bp->create == 0) {
+                i = 0;
+                if (cex->ec_block < bp->start)
+                        i = bp->start - cex->ec_block;
+                if (i >= cex->ec_len)
+                        CERROR("nothing to do?! i = %d, e_num = %u\n",
+                                        i, cex->ec_len);
+                for (; i < cex->ec_len && bp->num; i++) {
+                        *(bp->created) = 0;
+                        bp->created++;
+                        *(bp->blocks) = 0;
+                        bp->blocks++;
+                        bp->num--;
+                        bp->start++;
+                }
+
+                return EXT_CONTINUE;
+        }
+
+        tgen = EXT_GENERATION(tree);
+        count = ext3_ext_calc_credits_for_insert(tree, path);
+        ext3_up_truncate_sem(inode);
+
+        lock_24kernel();
+        handle = journal_start(EXT3_JOURNAL(inode), count+EXT3_ALLOC_NEEDED+1);
+        unlock_24kernel();
+        if (IS_ERR(handle)) {
+                ext3_down_truncate_sem(inode);
+                return PTR_ERR(handle);
+        }
+
+        ext3_down_truncate_sem(inode);
+        if (tgen != EXT_GENERATION(tree)) {
+                /* the tree has changed. so path can be invalid at moment */
+                lock_24kernel();
+                journal_stop(handle);
+                unlock_24kernel();
+                return EXT_REPEAT;
+        }
+
+        count = cex->ec_len;
+        goal = ext3_ext_find_goal(inode, path, cex->ec_block, &aflags);
+        aflags |= 2; /* block have been already reserved */
+        pblock = ext3_mb_new_blocks(handle, inode, goal, &count, aflags, &err);
+        if (!pblock)
+                goto out;
+        EXT_ASSERT(count <= cex->ec_len);
+
+        /* insert new extent */
+        nex.ee_block = cex->ec_block;
+        nex.ee_start = pblock;
+        nex.ee_len = count;
+        err = ext3_ext_insert_extent(handle, tree, path, &nex);
+        if (err)
+                goto out;
+
+        /*
+         * Putting len of the actual extent we just inserted,
+         * we are asking ext3_ext_walk_space() to continue
+         * scaning after that block
+         */
+        cex->ec_len = nex.ee_len;
+        cex->ec_start = nex.ee_start;
+        BUG_ON(nex.ee_len == 0);
+        BUG_ON(nex.ee_block != cex->ec_block);
+
+        /* correct on-disk inode size */
+        if (nex.ee_len > 0) {
+                new_i_size = (loff_t) nex.ee_block + nex.ee_len;
+                new_i_size = new_i_size << inode->i_blkbits;
+                if (new_i_size > EXT3_I(inode)->i_disksize) {
+                        EXT3_I(inode)->i_disksize = new_i_size;
+                        err = ext3_mark_inode_dirty(handle, inode);
+                }
+        }
+
+out:
+        lock_24kernel();
+        journal_stop(handle);
+        unlock_24kernel();
+map:
+        if (err >= 0) {
+                /* map blocks */
+                if (bp->num == 0) {
+                        CERROR("hmm. why do we find this extent?\n");
+                        CERROR("initial space: %lu:%u\n",
+                                bp->start, bp->init_num);
+                        CERROR("current extent: %u/%u/%u %d\n",
+                                cex->ec_block, cex->ec_len,
+                                cex->ec_start, cex->ec_type);
+                }
+                i = 0;
+                if (cex->ec_block < bp->start)
+                        i = bp->start - cex->ec_block;
+                if (i >= cex->ec_len)
+                        CERROR("nothing to do?! i = %d, e_num = %u\n",
+                                        i, cex->ec_len);
+                for (; i < cex->ec_len && bp->num; i++) {
+                        if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
+                                *(bp->created) = 0;
+                        else
+                                *(bp->created) = 1;
+                        bp->created++;
+                        *(bp->blocks) = cex->ec_start + i;
+                        bp->blocks++;
+                        bp->num--;
+                        bp->start++;
+                }
+        }
+        return err;
+}
+
+int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
+                       unsigned long num, unsigned long *blocks,
+                       int *created, int create)
+{
+        struct ext3_extents_tree tree;
+        struct bpointers bp;
+        int err;
+
+        CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
+                block, block + num, (unsigned) inode->i_ino);
+
+        ext3_init_tree_desc(&tree, inode);
+        tree.private = &bp;
+        bp.blocks = blocks;
+        bp.created = created;
+        bp.start = block;
+        bp.init_num = bp.num = num;
+        bp.create = create;
+
+        ext3_down_truncate_sem(inode);
+        err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
+        ext3_ext_invalidate_cache(&tree);
+        ext3_up_truncate_sem(inode);
+
+        return err;
+}
+
+int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
+                                    int pages, unsigned long *blocks,
+                                    int *created, int create)
+{
+        int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
+        int rc = 0, i = 0;
+        struct page *fp = NULL;
+        int clen = 0;
+
+        CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
+                inode->i_ino, pages, (*page)->index);
+
+        /* pages are sorted already. so, we just have to find
+         * contig. space and process them properly */
+        while (i < pages) {
+                if (fp == NULL) {
+                        /* start new extent */
+                        fp = *page++;
+                        clen = 1;
+                        i++;
+                        continue;
+                } else if (fp->index + clen == (*page)->index) {
+                        /* continue the extent */
+                        page++;
+                        clen++;
+                        i++;
+                        continue;
+                }
+
+                /* process found extent */
+                rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
+                                        clen * blocks_per_page, blocks,
+                                        created, create);
+                if (rc)
+                        GOTO(cleanup, rc);
+
+                /* look for next extent */
+                fp = NULL;
+                blocks += blocks_per_page * clen;
+                created += blocks_per_page * clen;
+        }
+
+        if (fp)
+                rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
+                                        clen * blocks_per_page, blocks,
+                                        created, create);
+cleanup:
+        return rc;
+}
+#endif
+
 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
                                unsigned long *blocks, int *created, int create);
-int fsfilt_ext3_map_inode_page(struct inode *inode, struct page *page,
-                               unsigned long *blocks, int *created, int create)
+int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
+                                   int pages, unsigned long *blocks,
+                                   int *created, int create)
+{
+        int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
+        unsigned long *b;
+        int rc = 0, i, *cr;
+
+        for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
+                rc = ext3_map_inode_page(inode, *page, b, cr, create);
+                if (rc) {
+                        CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
+                               inode->i_ino, *b, *cr, create, rc);
+                        break;
+                }
+
+                b += blocks_per_page;
+                cr += blocks_per_page;
+        }
+        return rc;
+}
+
+int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
+                                int pages, unsigned long *blocks,
+                                int *created, int create,
+                                struct semaphore *optional_sem)
 {
-        return ext3_map_inode_page(inode, page, blocks, created, create);
+        int rc;
+#ifdef EXT3_MULTIBLOCK_ALLOCATOR
+        if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
+                rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
+                                                     blocks, created, create);
+                return rc;
+        }
+#endif
+        if (optional_sem != NULL)
+                down(optional_sem);
+        rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
+                                            created, create);
+        if (optional_sem != NULL)
+                up(optional_sem);
+
+        return rc;
 }
 
 extern int ext3_prep_san_write(struct inode *inode, long *blocks,
@@ -764,10 +1078,10 @@ static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
         block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
 
         journal = EXT3_SB(inode->i_sb)->s_journal;
-        lock_kernel();
+        lock_24kernel();
         handle = journal_start(journal,
                                block_count * EXT3_DATA_TRANS_BLOCKS + 2);
-        unlock_kernel();
+        unlock_24kernel();
         if (IS_ERR(handle)) {
                 CERROR("can't start transaction\n");
                 return PTR_ERR(handle);
@@ -825,9 +1139,9 @@ out:
                 unlock_kernel();
         }
 
-        lock_kernel();
+        lock_24kernel();
         journal_stop(handle);
-        unlock_kernel();
+        unlock_24kernel();
 
         if (err == 0)
                 *offs = offset;
@@ -848,34 +1162,114 @@ static int fsfilt_ext3_setup(struct super_block *sb)
         return 0;
 }
 
+/* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
+   objects. Logs is number of logfiles to update */
+static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
+{
+        if ( !fso ) {
+                switch(op) {
+                case FSFILT_OP_CREATE:
+                                 /* directory leaf, index & indirect & EA*/
+                        return 4 + 3 * logs;
+                case FSFILT_OP_UNLINK:
+                        return 3 * logs;
+                }
+        } else {
+                int i;
+                int needed = 0;
+                struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
+                int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
+                int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
+                for (i = 0; i < op; i++, fso++) {
+                        int nblocks = fso->fso_bufcnt * blockpp;
+                        int ndindirect = min(nblocks, addrpp + 1);
+                        int nindir = nblocks + ndindirect + 1;
+
+                        needed += nindir;
+                }
+                return needed + 3 * logs;
+        }
+
+        return 0;
+}
+
+static inline struct ext3_group_desc *
+get_group_desc(struct super_block *sb, int group)
+{
+        unsigned long desc_block, desc;
+        struct ext3_group_desc *gdp;
+
+        desc_block = group / EXT3_DESC_PER_BLOCK(sb);
+        desc = group % EXT3_DESC_PER_BLOCK(sb);
+        gdp = (struct ext3_group_desc *)
+              EXT3_SB(sb)->s_group_desc[desc_block]->b_data;
+
+        return gdp + desc;
+}
+
+static inline struct buffer_head *
+read_inode_bitmap(struct super_block *sb, unsigned long group)
+{
+        struct ext3_group_desc *desc;
+        struct buffer_head *bh;
+
+        desc = get_group_desc(sb, group);
+        bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
+
+        return bh;
+}
+
+static inline struct inode *ext3_iget_inuse(struct super_block *sb,
+                                     struct buffer_head *bitmap_bh,
+                                     int index, unsigned long ino)
+{
+        struct inode *inode = NULL;
+
+        if (ext3_test_bit(index, bitmap_bh->b_data))
+                inode = iget(sb, ino);
+
+        return inode;
+}
+
+#ifdef HAVE_QUOTA_SUPPORT
+# include "fsfilt_ext3_quota.h"
+#endif
+
 static struct fsfilt_operations fsfilt_ext3_ops = {
-        fs_type:                "ext3",
-        fs_owner:               THIS_MODULE,
-        fs_start:               fsfilt_ext3_start,
-        fs_brw_start:           fsfilt_ext3_brw_start,
-        fs_commit:              fsfilt_ext3_commit,
-        fs_commit_async:        fsfilt_ext3_commit_async,
-        fs_commit_wait:         fsfilt_ext3_commit_wait,
-        fs_setattr:             fsfilt_ext3_setattr,
-        fs_iocontrol:           fsfilt_ext3_iocontrol,
-        fs_set_md:              fsfilt_ext3_set_md,
-        fs_get_md:              fsfilt_ext3_get_md,
-        fs_readpage:            fsfilt_ext3_readpage,
-        fs_add_journal_cb:      fsfilt_ext3_add_journal_cb,
-        fs_statfs:              fsfilt_ext3_statfs,
-        fs_sync:                fsfilt_ext3_sync,
-        fs_map_inode_page:      fsfilt_ext3_map_inode_page,
-        fs_prep_san_write:      fsfilt_ext3_prep_san_write,
-        fs_write_record:        fsfilt_ext3_write_record,
-        fs_read_record:         fsfilt_ext3_read_record,
-        fs_setup:               fsfilt_ext3_setup,
+        .fs_type                = "ext3",
+        .fs_owner               = THIS_MODULE,
+        .fs_start               = fsfilt_ext3_start,
+        .fs_brw_start           = fsfilt_ext3_brw_start,
+        .fs_commit              = fsfilt_ext3_commit,
+        .fs_commit_async        = fsfilt_ext3_commit_async,
+        .fs_commit_wait         = fsfilt_ext3_commit_wait,
+        .fs_setattr             = fsfilt_ext3_setattr,
+        .fs_iocontrol           = fsfilt_ext3_iocontrol,
+        .fs_set_md              = fsfilt_ext3_set_md,
+        .fs_get_md              = fsfilt_ext3_get_md,
+        .fs_readpage            = fsfilt_ext3_readpage,
+        .fs_add_journal_cb      = fsfilt_ext3_add_journal_cb,
+        .fs_statfs              = fsfilt_ext3_statfs,
+        .fs_sync                = fsfilt_ext3_sync,
+        .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
+        .fs_prep_san_write      = fsfilt_ext3_prep_san_write,
+        .fs_write_record        = fsfilt_ext3_write_record,
+        .fs_read_record         = fsfilt_ext3_read_record,
+        .fs_setup               = fsfilt_ext3_setup,
+        .fs_send_bio            = fsfilt_ext3_send_bio,
+        .fs_get_op_len          = fsfilt_ext3_get_op_len,
+#ifdef HAVE_QUOTA_SUPPORT
+        .fs_quotactl            = fsfilt_ext3_quotactl,
+        .fs_quotacheck          = fsfilt_ext3_quotacheck,
+        .fs_quotainfo           = fsfilt_ext3_quotainfo,
+        .fs_dquot               = fsfilt_ext3_dquot,
+#endif
 };
 
 static int __init fsfilt_ext3_init(void)
 {
         int rc;
 
-        //rc = ext3_xattr_register();
         fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
                                       sizeof(struct fsfilt_cb_data), 0,
                                       0, NULL, NULL);
@@ -894,17 +1288,8 @@ out:
 
 static void __exit fsfilt_ext3_exit(void)
 {
-        int rc;
-
         fsfilt_unregister_ops(&fsfilt_ext3_ops);
-        rc = kmem_cache_destroy(fcb_cache);
-
-        if (rc || atomic_read(&fcb_cache_count)) {
-                CERROR("can't free fsfilt callback cache: count %d, rc = %d\n",
-                       atomic_read(&fcb_cache_count), rc);
-        }
-
-        //rc = ext3_xattr_unregister();
+        LASSERT(kmem_cache_destroy(fcb_cache) == 0);
 }
 
 module_init(fsfilt_ext3_init);