--- /dev/null
+#ifndef __LVFS_H__
+#define __LVFS_H__
+
+#include <linux/kp30.h>
+
+#if defined __KERNEL__
+#include <linux/lvfs_linux.h>
+#endif
+
+#ifdef LIBLUSTRE
+#include <lvfs_user_fs.h>
+#endif
+
+/* simple.c */
+struct obd_ucred {
+ __u32 ouc_fsuid;
+ __u32 ouc_fsgid;
+ __u32 ouc_cap;
+ __u32 ouc_suppgid1;
+ __u32 ouc_suppgid2;
+};
+
+struct lvfs_callback_ops {
+ struct dentry *(*l_fid2dentry)(__u64 id_ino, __u32 gr_gen, void *data);
+};
+
+#define OBD_RUN_CTXT_MAGIC 0xC0FFEEAA
+#define OBD_CTXT_DEBUG /* development-only debugging */
+struct obd_run_ctxt {
+ struct vfsmount *pwdmnt;
+ struct dentry *pwd;
+ mm_segment_t fs;
+ struct obd_ucred ouc;
+ int ngroups;
+ struct lvfs_callback_ops cb_ops;
+#ifdef OBD_CTXT_DEBUG
+ __u32 magic;
+#endif
+};
+
+#ifdef OBD_CTXT_DEBUG
+#define OBD_SET_CTXT_MAGIC(ctxt) (ctxt)->magic = OBD_RUN_CTXT_MAGIC
+#else
+#define OBD_SET_CTXT_MAGIC(ctxt) do {} while(0)
+#endif
+
+/* lvfs_common.c */
+struct dentry *lvfs_fid2dentry(struct obd_run_ctxt *, __u64, __u32, void *data);
+
+#ifdef __KERNEL__
+
+void push_ctxt(struct obd_run_ctxt *save, struct obd_run_ctxt *new_ctx,
+ struct obd_ucred *cred);
+void pop_ctxt(struct obd_run_ctxt *saved, struct obd_run_ctxt *new_ctx,
+ struct obd_ucred *cred);
+struct dentry *simple_mkdir(struct dentry *dir, char *name, int mode);
+struct dentry *simple_mknod(struct dentry *dir, char *name, int mode);
+int lustre_fread(struct file *file, void *buf, int len, loff_t *off);
+int lustre_fwrite(struct file *file, const void *buf, int len, loff_t *off);
+int lustre_fsync(struct file *file);
+
+static inline void l_dput(struct dentry *de)
+{
+ if (!de || IS_ERR(de))
+ return;
+ //shrink_dcache_parent(de);
+ LASSERT(atomic_read(&de->d_count) > 0);
+ dput(de);
+}
+
+/* We need to hold the inode semaphore over the dcache lookup itself, or we
+ * run the risk of entering the filesystem lookup path concurrently on SMP
+ * systems, and instantiating two inodes for the same entry. We still
+ * protect against concurrent addition/removal races with the DLM locking.
+ */
+static inline struct dentry *ll_lookup_one_len(char *fid_name,
+ struct dentry *dparent,
+ int fid_namelen)
+{
+ struct dentry *dchild;
+
+ down(&dparent->d_inode->i_sem);
+ dchild = lookup_one_len(fid_name, dparent, fid_namelen);
+ up(&dparent->d_inode->i_sem);
+
+ return dchild;
+}
+
+static inline void ll_sleep(int t)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(t * HZ);
+ set_current_state(TASK_RUNNING);
+}
+#endif
+
+#define LL_FID_NAMELEN (16 + 1 + 8 + 1)
+static inline int ll_fid2str(char *str, __u64 id, __u32 generation)
+{
+ return sprintf(str, "%llx:%08x", (unsigned long long)id, generation);
+}
+
+#endif
--- /dev/null
+#ifndef __LVFS_LINUX_H__
+#define __LVFS_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+
+#include <linux/lvfs.h>
+
+#define l_file file
+#define l_dentry dentry
+#define l_inode inode
+
+#define l_filp_open filp_open
+
+struct obd_run_ctxt;
+struct l_file *l_dentry_open(struct obd_run_ctxt *, struct l_dentry *,
+ int flags);
+
+#endif
--- /dev/null
+ fs/ext3/inode.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/ext3/super.c | 3 +++
+ 2 files changed, 55 insertions(+)
+
+--- linux-2.6.0-test3/fs/ext3/inode.c~ext3-map_inode_page-2.6.0 2003-09-02 14:48:43.000000000 +0400
++++ linux-2.6.0-test3-alexey/fs/ext3/inode.c 2003-09-08 17:50:16.000000000 +0400
+@@ -3129,3 +3129,55 @@ int ext3_prep_san_write(struct inode *in
+ ret = ret2;
+ return ret;
+ }
++
++int ext3_map_inode_page(struct inode *inode, struct page *page,
++ unsigned long *blocks, int *created, int create)
++{
++ unsigned int blocksize, blocks_per_page;
++ unsigned long iblock;
++ struct buffer_head dummy;
++ void *handle;
++ int i, rc = 0, failed = 0, needed_blocks;
++
++ blocksize = inode->i_sb->s_blocksize;
++ blocks_per_page = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
++ iblock = page->index >> (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
++
++ for (i = 0; i < blocks_per_page; i++, iblock++) {
++ blocks[i] = ext3_bmap(inode->i_mapping, iblock);
++ if (blocks[i] == 0) {
++ failed++;
++ created[i] = -1;
++ } else {
++ created[i] = 0;
++ }
++ }
++
++ if (failed == 0 || create == 0)
++ return 0;
++
++ needed_blocks = ext3_writepage_trans_blocks(inode) * failed;
++ handle = ext3_journal_start(inode, needed_blocks);
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ iblock = page->index >> (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
++ for (i = 0; i < blocks_per_page; i++, iblock++) {
++ if (blocks[i] != 0)
++ continue;
++
++ rc = ext3_get_block_handle(handle, inode, iblock, &dummy, 1, 1);
++ if (rc) {
++ printk(KERN_INFO "ext3_map_inode_page: error reading "
++ "block %ld\n", iblock);
++ goto out;
++ }
++ blocks[i] = dummy.b_blocknr;
++ created[i] = 1;
++ }
++
++ out:
++ ext3_journal_stop(handle);
++ return rc;
++}
++
+--- linux-2.6.0-test3/fs/ext3/super.c~ext3-map_inode_page-2.6.0 2003-09-02 14:48:43.000000000 +0400
++++ linux-2.6.0-test3-alexey/fs/ext3/super.c 2003-09-08 17:48:33.000000000 +0400
+@@ -2094,6 +2094,9 @@ static void __exit exit_ext3_fs(void)
+ int ext3_prep_san_write(struct inode *inode, long *blocks,
+ int nblocks, loff_t newsize);
+ EXPORT_SYMBOL(ext3_prep_san_write);
++int ext3_map_inode_page(struct inode *inode, struct page *page,
++ unsigned long *blocks, int *created, int create)
++EXPORT_SYMBOL(ext3_map_inode_page);
+
+ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+ MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
+
+_
--- /dev/null
+diff -rup linux-2.4.20-uml/fs/jbd/transaction.c linux-2.4.21-rc2/fs/jbd/transaction.c
+--- linux-2.4.20-uml/fs/jbd/transaction.c Thu Nov 28 16:53:15 2002
++++ linux-2.4.21-rc2/fs/jbd/transaction.c Fri May 16 11:00:40 2003
+@@ -666,7 +673,8 @@ repeat:
+ spin_unlock(&journal_datalist_lock);
+ unlock_journal(journal);
+ /* commit wakes up all shadow buffers after IO */
+- sleep_on(&jh2bh(jh)->b_wait);
++ wait_event(jh2bh(jh)->b_wait,
++ jh->b_jlist != BJ_Shadow);
+ lock_journal(journal);
+ goto repeat;
+ }
--- /dev/null
+diff -rup linux-2.4.20-uml/fs/jbd/transaction.c linux-2.4.21-rc2/fs/jbd/transaction.c
+--- linux-2.4.20-uml/fs/jbd/transaction.c Thu Nov 28 16:53:15 2002
++++ linux-2.4.21-rc2/fs/jbd/transaction.c Fri May 16 11:00:40 2003
+@@ -1109,7 +1142,6 @@ int journal_dirty_metadata (handle_t *ha
+
+ spin_lock(&journal_datalist_lock);
+ set_bit(BH_JBDDirty, &bh->b_state);
+- set_buffer_flushtime(bh);
+
+ J_ASSERT_JH(jh, jh->b_transaction != NULL);
+
+@@ -2035,6 +2093,13 @@ void journal_file_buffer(struct journal_
+ spin_unlock(&journal_datalist_lock);
+ }
+
++static void jbd_refile_buffer(struct buffer_head *bh)
++{
++ if (buffer_dirty(bh) && (bh->b_list != BUF_DIRTY))
++ set_buffer_flushtime(bh);
++ refile_buffer(bh);
++}
++
+ /*
+ * Remove a buffer from its current buffer list in preparation for
+ * dropping it from its current transaction entirely. If the buffer has
+@@ -2055,7 +2120,7 @@ void __journal_refile_buffer(struct jour
+ __journal_unfile_buffer(jh);
+ jh->b_transaction = NULL;
+ /* Onto BUF_DIRTY for writeback */
+- refile_buffer(jh2bh(jh));
++ jbd_refile_buffer(jh2bh(jh));
+ return;
+ }
+
--- /dev/null
+diff -rup linux-2.4.20-uml/fs/jbd/transaction.c linux-2.4.21-rc2/fs/jbd/transaction.c
+--- linux-2.4.20-uml/fs/jbd/transaction.c Thu Nov 28 16:53:15 2002
++++ linux-2.4.21-rc2/fs/jbd/transaction.c Fri May 16 11:00:40 2003
+@@ -735,7 +743,8 @@ done_locked:
+ int offset;
+ char *source;
+
+- J_ASSERT_JH(jh, buffer_uptodate(jh2bh(jh)));
++ J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
++ "Possible IO failure.\n");
+ page = jh2bh(jh)->b_page;
+ offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+ source = kmap(page);
+diff -rup linux-2.4.20-uml/include/linux/jbd.h linux-2.4.21-rc2/include/linux/jbd.h
+--- linux-2.4.20-uml/include/linux/jbd.h Mon Aug 25 16:16:57 2003
++++ linux-2.4.21-rc2/include/linux/jbd.h Tue May 13 13:28:29 2003
+@@ -40,6 +40,15 @@
+ */
+ #undef JBD_PARANOID_WRITES
+
++/*
++ * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds
++ * certain classes of error which can occur due to failed IOs. Under
++ * normal use we want ext3 to continue after such errors, because
++ * hardware _can_ fail, but for debugging purposes when running tests on
++ * known-good hardware we may want to trap these errors.
++ */
++#undef JBD_PARANOID_IOFAIL
++
+ #ifdef CONFIG_JBD_DEBUG
+ /*
+ * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
+@@ -232,6 +272,23 @@ void buffer_assertion_failure(struct buf
+ #define J_ASSERT(assert) do { } while (0)
+ #endif /* JBD_ASSERTIONS */
+
++#if defined(JBD_PARANOID_IOFAIL)
++#define J_EXPECT(expr, why...) J_ASSERT(expr)
++#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
++#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
++#else
++#define __journal_expect(expr, why...) \
++ do { \
++ if (!(expr)) { \
++ printk(KERN_ERR "EXT3-fs unexpected failure: %s;\n", # expr); \
++ printk(KERN_ERR why); \
++ } \
++ } while (0)
++#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
++#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
++#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
++#endif
++
+ enum jbd_state_bits {
+ BH_JWrite
+ = BH_PrivateStart, /* 1 if being written to log (@@@ DEBUGGING) */
--- /dev/null
+ include/linux/fs.h | 1 +
+ mm/filemap.c | 3 +++
+ 2 files changed, 4 insertions(+)
+
+--- linux-2.6.0-test3-l25/include/linux/fs.h~removepage-2.6.0 2003-09-05 15:31:52.000000000 -0700
++++ linux-2.6.0-test3-l25-zab/include/linux/fs.h 2003-09-08 10:47:30.000000000 -0700
+@@ -311,6 +311,7 @@ struct address_space_operations {
+ int (*releasepage) (struct page *, int);
+ int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs);
++ void (*removepage)(struct page *); /* called when page gets removed from the inode */
+ };
+
+ struct backing_dev_info;
+--- linux-2.6.0-test3-l25/mm/filemap.c~removepage-2.6.0 2003-08-08 21:34:39.000000000 -0700
++++ linux-2.6.0-test3-l25-zab/mm/filemap.c 2003-09-08 10:48:10.000000000 -0700
+@@ -81,6 +81,9 @@ void __remove_from_page_cache(struct pag
+ {
+ struct address_space *mapping = page->mapping;
+
++ if (mapping->a_ops->removepage)
++ mapping->a_ops->removepage(page);
++
+ radix_tree_delete(&mapping->page_tree, page->index);
+ list_del(&page->list);
+ page->mapping = NULL;
+
+_
--- /dev/null
+ include/asm-um/unistd.h | 2 ++
+ 1 files changed, 2 insertions(+)
+
+diff -puN include/asm-um/unistd.h~uml-2.6.0-fix include/asm-um/unistd.h
+--- linux-2.6.0-test3/include/asm-um/unistd.h~uml-2.6.0-fix 2003-09-04 18:39:45.000000000 +0400
++++ linux-2.6.0-test3-alexey/include/asm-um/unistd.h 2003-09-04 18:39:59.000000000 +0400
+@@ -6,8 +6,10 @@
+ #ifndef _UM_UNISTD_H_
+ #define _UM_UNISTD_H_
+
++#ifdef __KERNEL__
+ #include "linux/resource.h"
+ #include "asm/uaccess.h"
++#endif
+
+ extern long sys_open(const char *filename, int flags, int mode);
+ extern long sys_dup(unsigned int fildes);
+
+_
--- /dev/null
+fs/ext3/inode.c
+fs/ext3/super.c
--- /dev/null
+fs/jbd/transaction.c
--- /dev/null
+fs/jbd/transaction.c
--- /dev/null
+fs/jbd/transaction.c
+include/linux/jbd.h
--- /dev/null
+include/linux/fs.h
+mm/filemap.c
--- /dev/null
+include/asm-um/unistd.h
--- /dev/null
+.Xrefs
+config.log
+config.status
+configure
+Makefile
+Makefile.in
+.deps
+TAGS
+.*.cmd
--- /dev/null
+# Copyright (C) 2001 Cluster File Systems, Inc.
+#
+# This code is issued under the GNU General Public License.
+# See the file COPYING in this distribution
+DEFS=
+MODULE = lvfs
+
+
+if EXTN
+FSMOD = fsfilt_extN
+else
+FSMOD = fsfilt_ext3
+endif
+
+
+if LIBLUSTRE
+lib_LIBRARIES = liblvfs.a
+liblvfs_a_SOURCES = lvfs_user_fs.c
+
+#if MYSQL
+#liblvfs_a_SOURCES += lvfs_user_mysql.c
+#endif
+
+else
+modulefs_DATA = lvfs.o $(FSMOD).o fsfilt_reiserfs.o
+
+EXTRA_PROGRAMS = lvfs $(FSMOD) fsfilt_reiserfs
+lvfs_SOURCES = lvfs_common.c lvfs_linux.c fsfilt.c
+endif
+
+
+include $(top_srcdir)/Rules
--- /dev/null
+#ifndef EXPORT_SYMTAB
+# define EXPORT_SYMTAB
+#endif
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/kp30.h>
+#include <linux/lustre_fsfilt.h>
+
+LIST_HEAD(fsfilt_types);
+
+static struct fsfilt_operations *fsfilt_search_type(const char *type)
+{
+ struct fsfilt_operations *found;
+ struct list_head *p;
+
+ list_for_each(p, &fsfilt_types) {
+ found = list_entry(p, struct fsfilt_operations, fs_list);
+ if (!strcmp(found->fs_type, type)) {
+ return found;
+ }
+ }
+ return NULL;
+}
+
+int fsfilt_register_ops(struct fsfilt_operations *fs_ops)
+{
+ struct fsfilt_operations *found;
+
+ /* lock fsfilt_types list */
+ if ((found = fsfilt_search_type(fs_ops->fs_type))) {
+ if (found != fs_ops) {
+ CERROR("different operations for type %s\n",
+ fs_ops->fs_type);
+ /* unlock fsfilt_types list */
+ RETURN(-EEXIST);
+ }
+ } else {
+ PORTAL_MODULE_USE;
+ list_add(&fs_ops->fs_list, &fsfilt_types);
+ }
+
+ /* unlock fsfilt_types list */
+ return 0;
+}
+
+void fsfilt_unregister_ops(struct fsfilt_operations *fs_ops)
+{
+ struct list_head *p;
+
+ /* lock fsfilt_types list */
+ list_for_each(p, &fsfilt_types) {
+ struct fsfilt_operations *found;
+
+ found = list_entry(p, typeof(*found), fs_list);
+ if (found == fs_ops) {
+ list_del(p);
+ PORTAL_MODULE_UNUSE;
+ break;
+ }
+ }
+ /* unlock fsfilt_types list */
+}
+
+struct fsfilt_operations *fsfilt_get_ops(const char *type)
+{
+ struct fsfilt_operations *fs_ops;
+
+ /* lock fsfilt_types list */
+ if (!(fs_ops = fsfilt_search_type(type))) {
+ char name[32];
+ int rc;
+
+ snprintf(name, sizeof(name) - 1, "fsfilt_%s", type);
+ name[sizeof(name) - 1] = '\0';
+
+ if ((rc = request_module(name))) {
+ fs_ops = fsfilt_search_type(type);
+ CDEBUG(D_INFO, "Loaded module '%s'\n", name);
+ if (!fs_ops)
+ rc = -ENOENT;
+ }
+
+ if (rc) {
+ CERROR("Can't find fsfilt_%s interface\n", name);
+ RETURN(ERR_PTR(rc));
+ /* unlock fsfilt_types list */
+ }
+ }
+ try_module_get(fs_ops->fs_owner);
+ /* unlock fsfilt_types list */
+
+ return fs_ops;
+}
+
+void fsfilt_put_ops(struct fsfilt_operations *fs_ops)
+{
+ module_put(fs_ops->fs_owner);
+}
+
+
+EXPORT_SYMBOL(fsfilt_register_ops);
+EXPORT_SYMBOL(fsfilt_unregister_ops);
+EXPORT_SYMBOL(fsfilt_get_ops);
+EXPORT_SYMBOL(fsfilt_put_ops);
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * lustre/lib/fsfilt_ext3.c
+ * Lustre filesystem abstraction routines
+ *
+ * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/version.h>
+/* XXX ugh */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ #include <linux/ext3_xattr.h>
+#else
+ #include <linux/../../fs/ext3/xattr.h>
+#endif
+#include <linux/kp30.h>
+#include <linux/lustre_fsfilt.h>
+#include <linux/obd.h>
+#include <linux/obd_class.h>
+#include <linux/module.h>
+
+static kmem_cache_t *fcb_cache;
+static atomic_t fcb_cache_count = ATOMIC_INIT(0);
+
+struct fsfilt_cb_data {
+ struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
+ fsfilt_cb_t cb_func; /* MDS/OBD completion function */
+ struct obd_device *cb_obd; /* MDS/OBD completion device */
+ __u64 cb_last_rcvd; /* MDS/OST last committed operation */
+ void *cb_data; /* MDS/OST completion function data */
+};
+
+#define EXT3_XATTR_INDEX_LUSTRE 5
+#define XATTR_LUSTRE_MDS_OBJID "system.lustre_mds_objid"
+
+/*
+ * We don't currently need any additional blocks for rmdir and
+ * unlink transactions because we are storing the OST oa_id inside
+ * the inode (which we will be changing anyways as part of this
+ * transaction).
+ */
+static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private)
+{
+ /* For updates to the last recieved file */
+ int nblocks = EXT3_DATA_TRANS_BLOCKS;
+ void *handle;
+
+ LASSERT(current->journal_info == NULL);
+
+ switch(op) {
+ case FSFILT_OP_CREATE_LOG:
+ nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
+ op = FSFILT_OP_CREATE;
+ break;
+ case FSFILT_OP_UNLINK_LOG:
+ nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
+ op = FSFILT_OP_UNLINK;
+ break;
+ }
+
+ switch(op) {
+ case FSFILT_OP_RMDIR:
+ case FSFILT_OP_UNLINK:
+ nblocks += EXT3_DELETE_TRANS_BLOCKS;
+ break;
+ case FSFILT_OP_RENAME:
+ /* modify additional directory */
+ nblocks += EXT3_DATA_TRANS_BLOCKS;
+ /* no break */
+ case FSFILT_OP_SYMLINK:
+ /* additional block + block bitmap + GDT for long symlink */
+ nblocks += 3;
+ /* no break */
+ case FSFILT_OP_CREATE:
+ case FSFILT_OP_MKDIR:
+ case FSFILT_OP_MKNOD:
+ /* modify one inode + block bitmap + GDT */
+ nblocks += 3;
+ /* no break */
+ case FSFILT_OP_LINK:
+ /* modify parent directory */
+ nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
+ break;
+ case FSFILT_OP_SETATTR:
+ /* Setattr on inode */
+ nblocks += 1;
+ break;
+ default: CERROR("unknown transaction start op %d\n", op);
+ LBUG();
+ }
+
+ LASSERT(current->journal_info == desc_private);
+ lock_kernel();
+ handle = journal_start(EXT3_JOURNAL(inode), nblocks);
+ unlock_kernel();
+
+ if (!IS_ERR(handle))
+ LASSERT(current->journal_info == handle);
+ return handle;
+}
+
+/*
+ * Calculate the number of buffer credits needed to write multiple pages in
+ * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
+ * doesn't have a nice API for calculating this sort of thing in advance.
+ *
+ * See comment above ext3_writepage_trans_blocks for details. We assume
+ * no data journaling is being done, but it does allow for all of the pages
+ * being non-contiguous. If we are guaranteed contiguous pages we could
+ * reduce the number of (d)indirect blocks a lot.
+ *
+ * With N blocks per page and P pages, for each inode we have at most:
+ * N*P indirect
+ * min(N*P, blocksize/4 + 1) dindirect blocks
+ * niocount tindirect
+ *
+ * For the entire filesystem, we have at most:
+ * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
+ * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
+ * objcount inode blocks
+ * 1 superblock
+ * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
+ *
+ * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
+ */
+static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso)
+{
+ struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
+ int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
+ int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
+ int nbitmaps = 0;
+ int ngdblocks = 0;
+ int needed = objcount + 1;
+ int i;
+
+ for (i = 0; i < objcount; i++, fso++) {
+ int nblocks = fso->fso_bufcnt * blockpp;
+ int ndindirect = min(nblocks, addrpp + 1);
+ int nindir = nblocks + ndindirect + 1;
+
+ nbitmaps += nindir + nblocks;
+ ngdblocks += nindir + nblocks;
+
+ needed += nindir;
+ }
+
+ /* Assumes ext3 and ext3 have same sb_info layout at the start. */
+ if (nbitmaps > EXT3_SB(sb)->s_groups_count)
+ nbitmaps = EXT3_SB(sb)->s_groups_count;
+ if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
+ ngdblocks = EXT3_SB(sb)->s_gdb_count;
+
+ needed += nbitmaps + ngdblocks;
+
+ /* last_rcvd update */
+ needed += EXT3_DATA_TRANS_BLOCKS;
+
+#ifdef CONFIG_QUOTA
+ /* We assume that there will be 1 bit set in s_dquot.flags for each
+ * quota file that is active. This is at least true for now.
+ */
+ needed += hweight32(sb_any_quota_enabled(sb)) *
+ EXT3_SINGLEDATA_TRANS_BLOCKS;
+#endif
+
+ return needed;
+}
+
+/* We have to start a huge journal transaction here to hold all of the
+ * metadata for the pages being written here. This is necessitated by
+ * the fact that we do lots of prepare_write operations before we do
+ * any of the matching commit_write operations, so even if we split
+ * up to use "smaller" transactions none of them could complete until
+ * all of them were opened. By having a single journal transaction,
+ * we eliminate duplicate reservations for common blocks like the
+ * superblock and group descriptors or bitmaps.
+ *
+ * We will start the transaction here, but each prepare_write will
+ * add a refcount to the transaction, and each commit_write will
+ * remove a refcount. The transaction will be closed when all of
+ * the pages have been written.
+ */
+static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
+ int niocount, void *desc_private)
+{
+ journal_t *journal;
+ handle_t *handle;
+ int needed;
+ ENTRY;
+
+ LASSERT(current->journal_info == desc_private);
+ journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
+ needed = fsfilt_ext3_credits_needed(objcount, fso);
+
+ /* The number of blocks we could _possibly_ dirty can very large.
+ * We reduce our request if it is absurd (and we couldn't get that
+ * many credits for a single handle anyways).
+ *
+ * At some point we have to limit the size of I/Os sent at one time,
+ * increase the size of the journal, or we have to calculate the
+ * actual journal requirements more carefully by checking all of
+ * the blocks instead of being maximally pessimistic. It remains to
+ * be seen if this is a real problem or not.
+ */
+ if (needed > journal->j_max_transaction_buffers) {
+ CERROR("want too many journal credits (%d) using %d instead\n",
+ needed, journal->j_max_transaction_buffers);
+ needed = journal->j_max_transaction_buffers;
+ }
+
+ lock_kernel();
+ handle = journal_start(journal, needed);
+ unlock_kernel();
+ if (IS_ERR(handle)) {
+ CERROR("can't get handle for %d credits: rc = %ld\n", needed,
+ PTR_ERR(handle));
+ } else {
+ LASSERT(handle->h_buffer_credits >= needed);
+ LASSERT(current->journal_info == handle);
+ }
+
+ RETURN(handle);
+}
+
+static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
+{
+ int rc;
+ handle_t *handle = h;
+
+ LASSERT(current->journal_info == handle);
+ if (force_sync)
+ handle->h_sync = 1; /* recovery likes this */
+
+ lock_kernel();
+ rc = journal_stop(handle);
+ unlock_kernel();
+
+ LASSERT(current->journal_info == NULL);
+ return rc;
+}
+
+static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
+ struct iattr *iattr, int do_trunc)
+{
+ struct inode *inode = dentry->d_inode;
+ int rc;
+
+ lock_kernel();
+
+ /* A _really_ horrible hack to avoid removing the data stored
+ * in the block pointers; this is really the "small" stripe MD data.
+ * We can avoid further hackery by virtue of the MDS file size being
+ * zero all the time (which doesn't invoke block truncate at unlink
+ * time), so we assert we never change the MDS file size from zero. */
+ if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
+ /* ATTR_SIZE would invoke truncate: clear it */
+ iattr->ia_valid &= ~ATTR_SIZE;
+ EXT3_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
+
+ /* make sure _something_ gets set - so new inode
+ * goes to disk (probably won't work over XFS */
+ if (!(iattr->ia_valid & (ATTR_MODE | ATTR_MTIME | ATTR_CTIME))){
+ iattr->ia_valid |= ATTR_MODE;
+ iattr->ia_mode = inode->i_mode;
+ }
+ }
+
+ /* Don't allow setattr to change file type */
+ iattr->ia_mode = (inode->i_mode & S_IFMT)|(iattr->ia_mode & ~S_IFMT);
+
+ if (inode->i_op->setattr) {
+ rc = inode->i_op->setattr(dentry, iattr);
+ } else {
+ rc = inode_change_ok(inode, iattr);
+ if (!rc)
+ rc = inode_setattr(inode, iattr);
+ }
+
+ unlock_kernel();
+
+ return rc;
+}
+
+static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
+ void *lmm, int lmm_size)
+{
+ int rc;
+
+ /* Nasty hack city - store stripe MD data in the block pointers if
+ * it will fit, because putting it in an EA currently kills the MDS
+ * performance. We'll fix this with "fast EAs" in the future.
+ */
+ if (inode->i_blocks == 0 && lmm_size <= sizeof(EXT3_I(inode)->i_data) -
+ sizeof(EXT3_I(inode)->i_data[0])) {
+ /* XXX old_size is debugging only */
+ int old_size = EXT3_I(inode)->i_data[0];
+ if (old_size != 0) {
+ LASSERT(old_size < sizeof(EXT3_I(inode)->i_data));
+ CERROR("setting EA on %lu again... interesting\n",
+ inode->i_ino);
+ }
+
+ EXT3_I(inode)->i_data[0] = cpu_to_le32(lmm_size);
+ memcpy(&EXT3_I(inode)->i_data[1], lmm, lmm_size);
+ mark_inode_dirty(inode);
+ return 0;
+ } else {
+ down(&inode->i_sem);
+ lock_kernel();
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ rc = ext3_xattr_set(handle, inode, EXT3_XATTR_INDEX_LUSTRE,
+ XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size, 0);
+#else
+ rc = ext3_xattr_set_handle(handle, inode,
+ EXT3_XATTR_INDEX_LUSTRE,
+ XATTR_LUSTRE_MDS_OBJID, lmm,
+ lmm_size, 0);
+#endif
+ unlock_kernel();
+ up(&inode->i_sem);
+ }
+
+ if (rc)
+ CERROR("error adding MD data to inode %lu: rc = %d\n",
+ inode->i_ino, rc);
+ return rc;
+}
+
+static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
+{
+ int rc;
+
+ if (inode->i_blocks == 0 && EXT3_I(inode)->i_data[0]) {
+ int size = le32_to_cpu(EXT3_I(inode)->i_data[0]);
+ LASSERT(size < sizeof(EXT3_I(inode)->i_data));
+ if (lmm) {
+ if (size > lmm_size)
+ return -ERANGE;
+ memcpy(lmm, &EXT3_I(inode)->i_data[1], size);
+ }
+ return size;
+ }
+
+ down(&inode->i_sem);
+ lock_kernel();
+ rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_LUSTRE,
+ XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size);
+ unlock_kernel();
+ up(&inode->i_sem);
+
+ /* This gives us the MD size */
+ if (lmm == NULL)
+ return (rc == -ENODATA) ? 0 : rc;
+
+ if (rc < 0) {
+ CDEBUG(D_INFO, "error getting EA %s from inode %lu: "
+ "rc = %d\n", XATTR_LUSTRE_MDS_OBJID, inode->i_ino, rc);
+ memset(lmm, 0, lmm_size);
+ return (rc == -ENODATA) ? 0 : rc;
+ }
+
+ return rc;
+}
+
+static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
+ loff_t *off)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ int rc = 0;
+
+ if (S_ISREG(inode->i_mode))
+ rc = file->f_op->read(file, buf, count, off);
+ else {
+ const int blkbits = inode->i_sb->s_blocksize_bits;
+ const int blksize = inode->i_sb->s_blocksize;
+
+ CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
+ count, inode->i_ino, *off);
+ while (count > 0) {
+ struct buffer_head *bh;
+
+ bh = NULL;
+ if (*off < inode->i_size) {
+ int err = 0;
+
+ bh = ext3_bread(NULL, inode, *off >> blkbits,
+ 0, &err);
+
+ CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
+
+ if (bh) {
+ memcpy(buf, bh->b_data, blksize);
+ brelse(bh);
+ } else if (err) {
+ /* XXX in theory we should just fake
+ * this buffer and continue like ext3,
+ * especially if this is a partial read
+ */
+ CERROR("error read dir %lu+%llu: %d\n",
+ inode->i_ino, *off, err);
+ RETURN(err);
+ }
+ }
+ if (!bh) {
+ struct ext3_dir_entry_2 *fake = (void *)buf;
+
+ CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
+ memset(fake, 0, sizeof(*fake));
+ fake->rec_len = cpu_to_le32(blksize);
+ }
+ count -= blksize;
+ buf += blksize;
+ *off += blksize;
+ rc += blksize;
+ }
+ }
+
+ return rc;
+}
+
+static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
+{
+ struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
+
+ fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
+
+ OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
+ atomic_dec(&fcb_cache_count);
+}
+
+static int fsfilt_ext3_set_last_rcvd(struct obd_device *obd, __u64 last_rcvd,
+ void *handle, fsfilt_cb_t cb_func,
+ void *cb_data)
+{
+ struct fsfilt_cb_data *fcb;
+
+ OBD_SLAB_ALLOC(fcb, fcb_cache, GFP_NOFS, sizeof *fcb);
+ if (fcb == NULL)
+ RETURN(-ENOMEM);
+
+ atomic_inc(&fcb_cache_count);
+ fcb->cb_func = cb_func;
+ fcb->cb_obd = obd;
+ fcb->cb_last_rcvd = last_rcvd;
+ fcb->cb_data = cb_data;
+
+ CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
+ lock_kernel();
+ journal_callback_set(handle, fsfilt_ext3_cb_func,
+ (struct journal_callback *)fcb);
+ unlock_kernel();
+
+ return 0;
+}
+
+static int fsfilt_ext3_journal_data(struct file *filp)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ /* bug 1576: enable data journaling on 2.5 when appropriate */
+ struct inode *inode = filp->f_dentry->d_inode;
+ EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
+#endif
+ return 0;
+}
+
+/*
+ * We need to hack the return value for the free inode counts because
+ * the current EA code requires one filesystem block per inode with EAs,
+ * so it is possible to run out of blocks before we run out of inodes.
+ *
+ * This can be removed when the ext3 EA code is fixed.
+ */
+static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
+{
+ struct kstatfs sfs;
+ int rc = vfs_statfs(sb, &sfs);
+
+ if (!rc && sfs.f_bfree < sfs.f_ffree) {
+ sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
+ sfs.f_ffree = sfs.f_bfree;
+ }
+
+ statfs_pack(osfs, &sfs);
+ return rc;
+}
+
+static int fsfilt_ext3_sync(struct super_block *sb)
+{
+ return ext3_force_commit(sb);
+}
+
+extern int ext3_prep_san_write(struct inode *inode, long *blocks,
+ int nblocks, loff_t newsize);
+static int fsfilt_ext3_prep_san_write(struct inode *inode, long *blocks,
+ int nblocks, loff_t newsize)
+{
+ return ext3_prep_san_write(inode, blocks, nblocks, newsize);
+}
+
+static int fsfilt_ext3_read_record(struct file * file, void *buf,
+ int size, loff_t *offs)
+{
+ struct buffer_head *bh;
+ unsigned long block, boffs;
+ struct inode *inode = file->f_dentry->d_inode;
+ int err;
+
+ if (inode->i_size < *offs + size) {
+ CERROR("file size %llu is too short for read %u@%llu\n",
+ inode->i_size, size, *offs);
+ return -EIO;
+ }
+
+ block = *offs >> inode->i_blkbits;
+ bh = ext3_bread(NULL, inode, block, 0, &err);
+ if (!bh) {
+ CERROR("can't read block: %d\n", err);
+ return err;
+ }
+
+ boffs = (unsigned)*offs % bh->b_size;
+ if (boffs + size > bh->b_size) {
+ CERROR("request crosses block's border. offset %llu, size %u\n",
+ *offs, size);
+ brelse(bh);
+ return -EIO;
+ }
+
+ memcpy(buf, bh->b_data + boffs, size);
+ brelse(bh);
+ *offs += size;
+ return 0;
+}
+
+static int fsfilt_ext3_write_record(struct file *file, void *buf, int size,
+ loff_t *offs, int force_sync)
+{
+ struct buffer_head *bh;
+ unsigned long block, boffs;
+ struct inode *inode = file->f_dentry->d_inode;
+ loff_t old_size = inode->i_size;
+ journal_t *journal;
+ handle_t *handle;
+ int err;
+
+ journal = EXT3_SB(inode->i_sb)->s_journal;
+ handle = journal_start(journal, EXT3_DATA_TRANS_BLOCKS + 2);
+ if (IS_ERR(handle)) {
+ CERROR("can't start transaction\n");
+ return PTR_ERR(handle);
+ }
+
+ block = *offs >> inode->i_blkbits;
+ if (*offs + size > inode->i_size) {
+ down(&inode->i_sem);
+ if (*offs + size > inode->i_size)
+ inode->i_size = ((loff_t)block + 1) << inode->i_blkbits;
+ up(&inode->i_sem);
+ }
+
+ bh = ext3_bread(handle, inode, block, 1, &err);
+ if (!bh) {
+ CERROR("can't read/create block: %d\n", err);
+ goto out;
+ }
+
+ /* This is a hack only needed because ext3_get_block_handle() updates
+ * i_disksize after marking the inode dirty in ext3_splice_branch().
+ * We will fix that when we get a chance, as ext3_mark_inode_dirty()
+ * is not without cost, nor is it even exported.
+ */
+ if (inode->i_size > old_size)
+ mark_inode_dirty(inode);
+
+ boffs = (unsigned)*offs % bh->b_size;
+ if (boffs + size > bh->b_size) {
+ CERROR("request crosses block's border. offset %llu, size %u\n",
+ *offs, size);
+ err = -EIO;
+ goto out;
+ }
+
+ err = ext3_journal_get_write_access(handle, bh);
+ if (err) {
+ CERROR("journal_get_write_access() returned error %d\n", err);
+ goto out;
+ }
+ memcpy(bh->b_data + boffs, buf, size);
+ err = ext3_journal_dirty_metadata(handle, bh);
+ if (err) {
+ CERROR("journal_dirty_metadata() returned error %d\n", err);
+ goto out;
+ }
+
+ if (force_sync)
+ handle->h_sync = 1; /* recovery likes this */
+out:
+ if (bh)
+ brelse(bh);
+ journal_stop(handle);
+ if (err == 0)
+ *offs += size;
+ return err;
+}
+
+static struct fsfilt_operations fsfilt_ext3_ops = {
+ fs_type: "ext3",
+ fs_owner: THIS_MODULE,
+ fs_start: fsfilt_ext3_start,
+ fs_brw_start: fsfilt_ext3_brw_start,
+ fs_commit: fsfilt_ext3_commit,
+ fs_setattr: fsfilt_ext3_setattr,
+ fs_set_md: fsfilt_ext3_set_md,
+ fs_get_md: fsfilt_ext3_get_md,
+ fs_readpage: fsfilt_ext3_readpage,
+ fs_journal_data: fsfilt_ext3_journal_data,
+ fs_set_last_rcvd: fsfilt_ext3_set_last_rcvd,
+ fs_statfs: fsfilt_ext3_statfs,
+ fs_sync: fsfilt_ext3_sync,
+ fs_prep_san_write: fsfilt_ext3_prep_san_write,
+ fs_write_record: fsfilt_ext3_write_record,
+ fs_read_record: fsfilt_ext3_read_record,
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+
+#warning "fsfilt_ext3_init() and fsfilt_ext3_exit() aren't called on 2.6. MUST be fixed"
+
+static int __init fsfilt_ext3_init(void)
+{
+ int rc;
+
+ //rc = ext3_xattr_register();
+ fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
+ sizeof(struct fsfilt_cb_data), 0,
+ 0, NULL, NULL);
+ if (!fcb_cache) {
+ CERROR("error allocating fsfilt journal callback cache\n");
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ rc = fsfilt_register_ops(&fsfilt_ext3_ops);
+
+ if (rc)
+ kmem_cache_destroy(fcb_cache);
+out:
+ return rc;
+}
+
+static void __exit fsfilt_ext3_exit(void)
+{
+ int rc;
+
+ fsfilt_unregister_ops(&fsfilt_ext3_ops);
+ rc = kmem_cache_destroy(fcb_cache);
+
+ if (rc || atomic_read(&fcb_cache_count)) {
+ CERROR("can't free fsfilt callback cache: count %d, rc = %d\n",
+ atomic_read(&fcb_cache_count), rc);
+ }
+
+ //rc = ext3_xattr_unregister();
+}
+
+MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
+MODULE_LICENSE("GPL");
+
+module_init(fsfilt_ext3_init);
+module_exit(fsfilt_ext3_exit);
+
+#endif
+
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * lustre/lib/fsfilt_extN.c
+ * Lustre filesystem abstraction routines
+ *
+ * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/extN_fs.h>
+#include <linux/extN_jbd.h>
+#include <linux/extN_xattr.h>
+#include <linux/kp30.h>
+#include <linux/lustre_fsfilt.h>
+#include <linux/obd.h>
+#include <linux/obd_class.h>
+#include <linux/module.h>
+
+static kmem_cache_t *fcb_cache;
+static atomic_t fcb_cache_count = ATOMIC_INIT(0);
+
+struct fsfilt_cb_data {
+ struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
+ fsfilt_cb_t cb_func; /* MDS/OBD completion function */
+ struct obd_device *cb_obd; /* MDS/OBD completion device */
+ __u64 cb_last_rcvd; /* MDS/OST last committed operation */
+ void *cb_data; /* MDS/OST completion function data */
+};
+
+#define EXTN_XATTR_INDEX_LUSTRE 5
+#define XATTR_LUSTRE_MDS_OBJID "system.lustre_mds_objid"
+
+/*
+ * We don't currently need any additional blocks for rmdir and
+ * unlink transactions because we are storing the OST oa_id inside
+ * the inode (which we will be changing anyways as part of this
+ * transaction).
+ */
+static void *fsfilt_extN_start(struct inode *inode, int op, void *desc_private)
+{
+ /* For updates to the last recieved file */
+ int nblocks = EXTN_DATA_TRANS_BLOCKS;
+ void *handle;
+
+ LASSERT(current->journal_info == NULL);
+
+ switch(op) {
+ case FSFILT_OP_CREATE_LOG:
+ nblocks += EXTN_INDEX_EXTRA_TRANS_BLOCKS+EXTN_DATA_TRANS_BLOCKS;
+ op = FSFILT_OP_CREATE;
+ break;
+ case FSFILT_OP_UNLINK_LOG:
+ nblocks += EXTN_INDEX_EXTRA_TRANS_BLOCKS+EXTN_DATA_TRANS_BLOCKS;
+ op = FSFILT_OP_UNLINK;
+ break;
+ }
+
+ switch(op) {
+ case FSFILT_OP_RMDIR:
+ case FSFILT_OP_UNLINK:
+ nblocks += EXTN_DELETE_TRANS_BLOCKS;
+ break;
+ case FSFILT_OP_RENAME:
+ /* modify additional directory */
+ nblocks += EXTN_DATA_TRANS_BLOCKS;
+ /* no break */
+ case FSFILT_OP_SYMLINK:
+ /* additional block + block bitmap + GDT for long symlink */
+ nblocks += 3;
+ /* no break */
+ case FSFILT_OP_CREATE:
+ case FSFILT_OP_MKDIR:
+ case FSFILT_OP_MKNOD:
+ /* modify one inode + block bitmap + GDT */
+ nblocks += 3;
+ /* no break */
+ case FSFILT_OP_LINK:
+ /* modify parent directory */
+ nblocks += EXTN_INDEX_EXTRA_TRANS_BLOCKS+EXTN_DATA_TRANS_BLOCKS;
+ break;
+ case FSFILT_OP_SETATTR:
+ /* Setattr on inode */
+ nblocks += 1;
+ break;
+ default: CERROR("unknown transaction start op %d\n", op);
+ LBUG();
+ }
+
+ LASSERT(current->journal_info == desc_private);
+ lock_kernel();
+ handle = journal_start(EXTN_JOURNAL(inode), nblocks);
+ unlock_kernel();
+
+ if (!IS_ERR(handle))
+ LASSERT(current->journal_info == handle);
+ return handle;
+}
+
+/*
+ * Calculate the number of buffer credits needed to write multiple pages in
+ * a single extN transaction. No, this shouldn't be here, but as yet extN
+ * doesn't have a nice API for calculating this sort of thing in advance.
+ *
+ * See comment above extN_writepage_trans_blocks for details. We assume
+ * no data journaling is being done, but it does allow for all of the pages
+ * being non-contiguous. If we are guaranteed contiguous pages we could
+ * reduce the number of (d)indirect blocks a lot.
+ *
+ * With N blocks per page and P pages, for each inode we have at most:
+ * N*P indirect
+ * min(N*P, blocksize/4 + 1) dindirect blocks
+ * niocount tindirect
+ *
+ * For the entire filesystem, we have at most:
+ * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
+ * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
+ * objcount inode blocks
+ * 1 superblock
+ * 2 * EXTN_SINGLEDATA_TRANS_BLOCKS for the quota files
+ *
+ * 1 EXTN_DATA_TRANS_BLOCKS for the last_rcvd update.
+ */
+static int fsfilt_extN_credits_needed(int objcount, struct fsfilt_objinfo *fso)
+{
+ struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
+ int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
+ int addrpp = EXTN_ADDR_PER_BLOCK(sb) * blockpp;
+ int nbitmaps = 0;
+ int ngdblocks = 0;
+ int needed = objcount + 1;
+ int i;
+
+ for (i = 0; i < objcount; i++, fso++) {
+ int nblocks = fso->fso_bufcnt * blockpp;
+ int ndindirect = min(nblocks, addrpp + 1);
+ int nindir = nblocks + ndindirect + 1;
+
+ nbitmaps += nindir + nblocks;
+ ngdblocks += nindir + nblocks;
+
+ needed += nindir;
+ }
+
+ /* Assumes extN and extN have same sb_info layout at the start. */
+ if (nbitmaps > EXTN_SB(sb)->s_groups_count)
+ nbitmaps = EXTN_SB(sb)->s_groups_count;
+ if (ngdblocks > EXTN_SB(sb)->s_gdb_count)
+ ngdblocks = EXTN_SB(sb)->s_gdb_count;
+
+ needed += nbitmaps + ngdblocks;
+
+ /* last_rcvd update */
+ needed += EXTN_DATA_TRANS_BLOCKS;
+
+#ifdef CONFIG_QUOTA
+ /* We assume that there will be 1 bit set in s_dquot.flags for each
+ * quota file that is active. This is at least true for now.
+ */
+ needed += hweight32(sb_any_quota_enabled(sb)) *
+ EXTN_SINGLEDATA_TRANS_BLOCKS;
+#endif
+
+ return needed;
+}
+
+/* We have to start a huge journal transaction here to hold all of the
+ * metadata for the pages being written here. This is necessitated by
+ * the fact that we do lots of prepare_write operations before we do
+ * any of the matching commit_write operations, so even if we split
+ * up to use "smaller" transactions none of them could complete until
+ * all of them were opened. By having a single journal transaction,
+ * we eliminate duplicate reservations for common blocks like the
+ * superblock and group descriptors or bitmaps.
+ *
+ * We will start the transaction here, but each prepare_write will
+ * add a refcount to the transaction, and each commit_write will
+ * remove a refcount. The transaction will be closed when all of
+ * the pages have been written.
+ */
+static void *fsfilt_extN_brw_start(int objcount, struct fsfilt_objinfo *fso,
+ int niocount, void *desc_private)
+{
+ journal_t *journal;
+ handle_t *handle;
+ int needed;
+ ENTRY;
+
+ LASSERT(current->journal_info == desc_private);
+ journal = EXTN_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
+ needed = fsfilt_extN_credits_needed(objcount, fso);
+
+ /* The number of blocks we could _possibly_ dirty can very large.
+ * We reduce our request if it is absurd (and we couldn't get that
+ * many credits for a single handle anyways).
+ *
+ * At some point we have to limit the size of I/Os sent at one time,
+ * increase the size of the journal, or we have to calculate the
+ * actual journal requirements more carefully by checking all of
+ * the blocks instead of being maximally pessimistic. It remains to
+ * be seen if this is a real problem or not.
+ */
+ if (needed > journal->j_max_transaction_buffers) {
+ CERROR("want too many journal credits (%d) using %d instead\n",
+ needed, journal->j_max_transaction_buffers);
+ needed = journal->j_max_transaction_buffers;
+ }
+
+ lock_kernel();
+ handle = journal_start(journal, needed);
+ unlock_kernel();
+ if (IS_ERR(handle)) {
+ CERROR("can't get handle for %d credits: rc = %ld\n", needed,
+ PTR_ERR(handle));
+ } else {
+ LASSERT(handle->h_buffer_credits >= needed);
+ LASSERT(current->journal_info == handle);
+ }
+
+ RETURN(handle);
+}
+
+static int fsfilt_extN_commit(struct inode *inode, void *h, int force_sync)
+{
+ int rc;
+ handle_t *handle = h;
+
+ LASSERT(current->journal_info == handle);
+ if (force_sync)
+ handle->h_sync = 1; /* recovery likes this */
+
+ lock_kernel();
+ rc = journal_stop(handle);
+ unlock_kernel();
+
+ LASSERT(current->journal_info == NULL);
+ return rc;
+}
+
+static int fsfilt_extN_setattr(struct dentry *dentry, void *handle,
+ struct iattr *iattr, int do_trunc)
+{
+ struct inode *inode = dentry->d_inode;
+ int rc;
+
+ lock_kernel();
+
+ /* A _really_ horrible hack to avoid removing the data stored
+ * in the block pointers; this is really the "small" stripe MD data.
+ * We can avoid further hackery by virtue of the MDS file size being
+ * zero all the time (which doesn't invoke block truncate at unlink
+ * time), so we assert we never change the MDS file size from zero. */
+ if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
+ /* ATTR_SIZE would invoke truncate: clear it */
+ iattr->ia_valid &= ~ATTR_SIZE;
+ EXTN_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
+
+ /* make sure _something_ gets set - so new inode
+ * goes to disk (probably won't work over XFS */
+ if (!(iattr->ia_valid & (ATTR_MODE | ATTR_MTIME | ATTR_CTIME))){
+ iattr->ia_valid |= ATTR_MODE;
+ iattr->ia_mode = inode->i_mode;
+ }
+ }
+
+ /* Don't allow setattr to change file type */
+ iattr->ia_mode = (inode->i_mode & S_IFMT)|(iattr->ia_mode & ~S_IFMT);
+
+ if (inode->i_op->setattr) {
+ rc = inode->i_op->setattr(dentry, iattr);
+ } else {
+ rc = inode_change_ok(inode, iattr);
+ if (!rc)
+ rc = inode_setattr(inode, iattr);
+ }
+
+ unlock_kernel();
+
+ return rc;
+}
+
+static int fsfilt_extN_set_md(struct inode *inode, void *handle,
+ void *lmm, int lmm_size)
+{
+ int rc;
+
+ /* Nasty hack city - store stripe MD data in the block pointers if
+ * it will fit, because putting it in an EA currently kills the MDS
+ * performance. We'll fix this with "fast EAs" in the future.
+ */
+ if (inode->i_blocks == 0 && lmm_size <= sizeof(EXTN_I(inode)->i_data) -
+ sizeof(EXTN_I(inode)->i_data[0])) {
+ /* XXX old_size is debugging only */
+ int old_size = EXTN_I(inode)->i_data[0];
+ if (old_size != 0) {
+ LASSERT(old_size < sizeof(EXTN_I(inode)->i_data));
+ CERROR("setting EA on %lu again... interesting\n",
+ inode->i_ino);
+ }
+
+ EXTN_I(inode)->i_data[0] = cpu_to_le32(lmm_size);
+ memcpy(&EXTN_I(inode)->i_data[1], lmm, lmm_size);
+ mark_inode_dirty(inode);
+ return 0;
+ } else {
+ down(&inode->i_sem);
+ lock_kernel();
+ rc = extN_xattr_set(handle, inode, EXTN_XATTR_INDEX_LUSTRE,
+ XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size, 0);
+ unlock_kernel();
+ up(&inode->i_sem);
+ }
+
+ if (rc)
+ CERROR("error adding MD data to inode %lu: rc = %d\n",
+ inode->i_ino, rc);
+ return rc;
+}
+
+static int fsfilt_extN_get_md(struct inode *inode, void *lmm, int lmm_size)
+{
+ int rc;
+
+ if (inode->i_blocks == 0 && EXTN_I(inode)->i_data[0]) {
+ int size = le32_to_cpu(EXTN_I(inode)->i_data[0]);
+ LASSERT(size < sizeof(EXTN_I(inode)->i_data));
+ if (lmm) {
+ if (size > lmm_size)
+ return -ERANGE;
+ memcpy(lmm, &EXTN_I(inode)->i_data[1], size);
+ }
+ return size;
+ }
+
+ down(&inode->i_sem);
+ lock_kernel();
+ rc = extN_xattr_get(inode, EXTN_XATTR_INDEX_LUSTRE,
+ XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size);
+ unlock_kernel();
+ up(&inode->i_sem);
+
+ /* This gives us the MD size */
+ if (lmm == NULL)
+ return (rc == -ENODATA) ? 0 : rc;
+
+ if (rc < 0) {
+ CDEBUG(D_INFO, "error getting EA %s from inode %lu: "
+ "rc = %d\n", XATTR_LUSTRE_MDS_OBJID, inode->i_ino, rc);
+ memset(lmm, 0, lmm_size);
+ return (rc == -ENODATA) ? 0 : rc;
+ }
+
+ return rc;
+}
+
+static ssize_t fsfilt_extN_readpage(struct file *file, char *buf, size_t count,
+ loff_t *off)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ int rc = 0;
+
+ if (S_ISREG(inode->i_mode))
+ rc = file->f_op->read(file, buf, count, off);
+ else {
+ const int blkbits = inode->i_sb->s_blocksize_bits;
+ const int blksize = inode->i_sb->s_blocksize;
+
+ CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
+ count, inode->i_ino, *off);
+ while (count > 0) {
+ struct buffer_head *bh;
+
+ bh = NULL;
+ if (*off < inode->i_size) {
+ int err = 0;
+
+ bh = extN_bread(NULL, inode, *off >> blkbits,
+ 0, &err);
+
+ CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
+
+ if (bh) {
+ memcpy(buf, bh->b_data, blksize);
+ brelse(bh);
+ } else if (err) {
+ /* XXX in theory we should just fake
+ * this buffer and continue like extN,
+ * especially if this is a partial read
+ */
+ CERROR("error read dir %lu+%llu: %d\n",
+ inode->i_ino, *off, err);
+ RETURN(err);
+ }
+ }
+ if (!bh) {
+ struct extN_dir_entry_2 *fake = (void *)buf;
+
+ CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
+ memset(fake, 0, sizeof(*fake));
+ fake->rec_len = cpu_to_le32(blksize);
+ }
+ count -= blksize;
+ buf += blksize;
+ *off += blksize;
+ rc += blksize;
+ }
+ }
+
+ return rc;
+}
+
+static void fsfilt_extN_cb_func(struct journal_callback *jcb, int error)
+{
+ struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
+
+ fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
+
+ OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
+ atomic_dec(&fcb_cache_count);
+}
+
+static int fsfilt_extN_set_last_rcvd(struct obd_device *obd, __u64 last_rcvd,
+ void *handle, fsfilt_cb_t cb_func,
+ void *cb_data)
+{
+ struct fsfilt_cb_data *fcb;
+
+ OBD_SLAB_ALLOC(fcb, fcb_cache, GFP_NOFS, sizeof *fcb);
+ if (fcb == NULL)
+ RETURN(-ENOMEM);
+
+ atomic_inc(&fcb_cache_count);
+ fcb->cb_func = cb_func;
+ fcb->cb_obd = obd;
+ fcb->cb_last_rcvd = last_rcvd;
+ fcb->cb_data = cb_data;
+
+ CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
+ lock_kernel();
+ journal_callback_set(handle, fsfilt_extN_cb_func,
+ (struct journal_callback *)fcb);
+ unlock_kernel();
+
+ return 0;
+}
+
+static int fsfilt_extN_journal_data(struct file *filp)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+
+ EXTN_I(inode)->i_flags |= EXTN_JOURNAL_DATA_FL;
+
+ return 0;
+}
+
+/*
+ * We need to hack the return value for the free inode counts because
+ * the current EA code requires one filesystem block per inode with EAs,
+ * so it is possible to run out of blocks before we run out of inodes.
+ *
+ * This can be removed when the extN EA code is fixed.
+ */
+static int fsfilt_extN_statfs(struct super_block *sb, struct obd_statfs *osfs)
+{
+ struct kstatfs sfs;
+ int rc = vfs_statfs(sb, &sfs);
+
+ if (!rc && sfs.f_bfree < sfs.f_ffree) {
+ sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
+ sfs.f_ffree = sfs.f_bfree;
+ }
+
+ statfs_pack(osfs, &sfs);
+ return rc;
+}
+
+static int fsfilt_extN_sync(struct super_block *sb)
+{
+ return extN_force_commit(sb);
+}
+
+extern int extN_prep_san_write(struct inode *inode, long *blocks,
+ int nblocks, loff_t newsize);
+static int fsfilt_extN_prep_san_write(struct inode *inode, long *blocks,
+ int nblocks, loff_t newsize)
+{
+ return extN_prep_san_write(inode, blocks, nblocks, newsize);
+}
+
+static int fsfilt_extN_read_record(struct file * file, void *buf,
+ int size, loff_t *offs)
+{
+ struct buffer_head *bh;
+ unsigned long block, boffs;
+ struct inode *inode = file->f_dentry->d_inode;
+ int err;
+
+ if (inode->i_size < *offs + size) {
+ CERROR("file size %llu is too short for read %u@%llu\n",
+ inode->i_size, size, *offs);
+ return -EIO;
+ }
+
+ block = *offs >> inode->i_blkbits;
+ bh = extN_bread(NULL, inode, block, 0, &err);
+ if (!bh) {
+ CERROR("can't read block: %d\n", err);
+ return err;
+ }
+
+ boffs = (unsigned)*offs % bh->b_size;
+ if (boffs + size > bh->b_size) {
+ CERROR("request crosses block's border. offset %llu, size %u\n",
+ *offs, size);
+ brelse(bh);
+ return -EIO;
+ }
+
+ memcpy(buf, bh->b_data + boffs, size);
+ brelse(bh);
+ *offs += size;
+ return 0;
+}
+
+static int fsfilt_extN_write_record(struct file *file, void *buf, int size,
+ loff_t *offs, int force_sync)
+{
+ struct buffer_head *bh;
+ unsigned long block, boffs;
+ struct inode *inode = file->f_dentry->d_inode;
+ loff_t old_size = inode->i_size;
+ journal_t *journal;
+ handle_t *handle;
+ int err;
+
+ journal = EXTN_SB(inode->i_sb)->s_journal;
+ handle = journal_start(journal, EXTN_DATA_TRANS_BLOCKS + 2);
+ if (IS_ERR(handle)) {
+ CERROR("can't start transaction\n");
+ return PTR_ERR(handle);
+ }
+
+ block = *offs >> inode->i_blkbits;
+ if (*offs + size > inode->i_size) {
+ down(&inode->i_sem);
+ if (*offs + size > inode->i_size)
+ inode->i_size = ((loff_t)block + 1) << inode->i_blkbits;
+ up(&inode->i_sem);
+ }
+
+ bh = extN_bread(handle, inode, block, 1, &err);
+ if (!bh) {
+ CERROR("can't read/create block: %d\n", err);
+ goto out;
+ }
+
+ /* This is a hack only needed because extN_get_block_handle() updates
+ * i_disksize after marking the inode dirty in extN_splice_branch().
+ * We will fix that when we get a chance, as extN_mark_inode_dirty()
+ * is not without cost, nor is it even exported.
+ */
+ if (inode->i_size > old_size)
+ mark_inode_dirty(inode);
+
+ boffs = (unsigned)*offs % bh->b_size;
+ if (boffs + size > bh->b_size) {
+ CERROR("request crosses block's border. offset %llu, size %u\n",
+ *offs, size);
+ err = -EIO;
+ goto out;
+ }
+
+ err = extN_journal_get_write_access(handle, bh);
+ if (err) {
+ CERROR("journal_get_write_access() returned error %d\n", err);
+ goto out;
+ }
+ memcpy(bh->b_data + boffs, buf, size);
+ err = extN_journal_dirty_metadata(handle, bh);
+ if (err) {
+ CERROR("journal_dirty_metadata() returned error %d\n", err);
+ goto out;
+ }
+
+ if (force_sync)
+ handle->h_sync = 1; /* recovery likes this */
+out:
+ if (bh)
+ brelse(bh);
+ journal_stop(handle);
+ if (err == 0)
+ *offs += size;
+ return err;
+}
+
+static struct fsfilt_operations fsfilt_extN_ops = {
+ fs_type: "extN",
+ fs_owner: THIS_MODULE,
+ fs_start: fsfilt_extN_start,
+ fs_brw_start: fsfilt_extN_brw_start,
+ fs_commit: fsfilt_extN_commit,
+ fs_setattr: fsfilt_extN_setattr,
+ fs_set_md: fsfilt_extN_set_md,
+ fs_get_md: fsfilt_extN_get_md,
+ fs_readpage: fsfilt_extN_readpage,
+ fs_journal_data: fsfilt_extN_journal_data,
+ fs_set_last_rcvd: fsfilt_extN_set_last_rcvd,
+ fs_statfs: fsfilt_extN_statfs,
+ fs_sync: fsfilt_extN_sync,
+ fs_prep_san_write: fsfilt_extN_prep_san_write,
+ fs_write_record: fsfilt_extN_write_record,
+ fs_read_record: fsfilt_extN_read_record,
+};
+
+static int __init fsfilt_extN_init(void)
+{
+ int rc;
+
+ //rc = extN_xattr_register();
+ fcb_cache = kmem_cache_create("fsfilt_extN_fcb",
+ sizeof(struct fsfilt_cb_data), 0,
+ 0, NULL, NULL);
+ if (!fcb_cache) {
+ CERROR("error allocating fsfilt journal callback cache\n");
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ rc = fsfilt_register_ops(&fsfilt_extN_ops);
+
+ if (rc)
+ kmem_cache_destroy(fcb_cache);
+out:
+ return rc;
+}
+
+static void __exit fsfilt_extN_exit(void)
+{
+ int rc;
+
+ fsfilt_unregister_ops(&fsfilt_extN_ops);
+ rc = kmem_cache_destroy(fcb_cache);
+
+ if (rc || atomic_read(&fcb_cache_count)) {
+ CERROR("can't free fsfilt callback cache: count %d, rc = %d\n",
+ atomic_read(&fcb_cache_count), rc);
+ }
+
+ //rc = extN_xattr_unregister();
+}
+
+MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_DESCRIPTION("Lustre extN Filesystem Helper v0.1");
+MODULE_LICENSE("GPL");
+
+module_init(fsfilt_extN_init);
+module_exit(fsfilt_extN_exit);
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * lustre/lib/fsfilt_reiserfs.c
+ * Lustre filesystem abstraction routines
+ *
+ * Copyright (C) 2002 Cluster File Systems, Inc.
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * NOTE - According to Hans Reiser, this could actually be implemented more
+ * efficiently than creating a directory and putting ASCII objids in it.
+ * Instead, we should return the reiserfs object ID as the lustre objid
+ * (although I'm not sure what impact that would have on backup/restore).
+ */
+
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+#include <linux/init.h>
+#include <asm/statfs.h>
+#endif
+#include <linux/kp30.h>
+#include <linux/lustre_fsfilt.h>
+#include <linux/obd.h>
+#include <linux/obd_class.h>
+#include <linux/module.h>
+
+static void *fsfilt_reiserfs_start(struct inode *inode, int op,
+ void *desc_private)
+{
+ return (void *)0xf00f00be;
+}
+
+static void *fsfilt_reiserfs_brw_start(int objcount, struct fsfilt_objinfo *fso,
+ int niocount, void *desc_private)
+{
+ return (void *)0xf00f00be;
+}
+
+static int fsfilt_reiserfs_commit(struct inode *inode, void *handle,
+ int force_sync)
+{
+ if (handle != (void *)0xf00f00be) {
+ CERROR("bad handle %p", handle);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fsfilt_reiserfs_setattr(struct dentry *dentry, void *handle,
+ struct iattr *iattr, int do_trunc)
+{
+ struct inode *inode = dentry->d_inode;
+ int rc;
+
+ lock_kernel();
+
+ /* A _really_ horrible hack to avoid removing the data stored
+ * in the block pointers; this is really the "small" stripe MD data.
+ * We can avoid further hackery by virtue of the MDS file size being
+ * zero all the time (which doesn't invoke block truncate at unlink
+ * time), so we assert we never change the MDS file size from zero.
+ */
+ if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
+ /* ATTR_SIZE would invoke truncate: clear it */
+ iattr->ia_valid &= ~ATTR_SIZE;
+ inode->i_size = iattr->ia_size;
+
+ /* make sure _something_ gets set - so new inode
+ * goes to disk (probably won't work over XFS
+ */
+ if (!iattr->ia_valid & ATTR_MODE) {
+ iattr->ia_valid |= ATTR_MODE;
+ iattr->ia_mode = inode->i_mode;
+ }
+ }
+ if (inode->i_op->setattr)
+ rc = inode->i_op->setattr(dentry, iattr);
+ else
+ rc = inode_setattr(inode, iattr);
+
+ unlock_kernel();
+
+ return rc;
+}
+
+static int fsfilt_reiserfs_set_md(struct inode *inode, void *handle,
+ void *lmm, int lmm_size)
+{
+ /* XXX write stripe data into MDS file itself */
+ CERROR("not implemented yet\n");
+
+ return -ENOSYS;
+}
+
+static int fsfilt_reiserfs_get_md(struct inode *inode, void *lmm, int lmm_size)
+{
+ if (lmm == NULL)
+ return inode->i_size;
+
+ CERROR("not implemented yet\n");
+ return -ENOSYS;
+}
+
+static ssize_t fsfilt_reiserfs_readpage(struct file *file, char *buf, size_t count,
+ loff_t *offset)
+{
+ return file->f_op->read(file, buf, count, offset);
+}
+
+static int fsfilt_reiserfs_set_last_rcvd(struct obd_device *obd,
+ __u64 last_rcvd, void *handle,
+ fsfilt_cb_t cb_func, void *cb_data)
+{
+ static long next = 0;
+
+ if (time_after(jiffies, next)) {
+ CERROR("no journal callback kernel patch, faking it...\n");
+ next = jiffies + 300 * HZ;
+ }
+
+ cb_func(obd, last_rcvd, cb_data, 0);
+
+ return 0;
+}
+
+static int fsfilt_reiserfs_journal_data(struct file *filp)
+{
+ CERROR("not implemented yet\n");
+ return 0;
+}
+
+static int fsfilt_reiserfs_statfs(struct super_block *sb, struct obd_statfs *osfs)
+{
+ struct statfs sfs;
+ int rc = vfs_statfs(sb, &sfs);
+
+ statfs_pack(osfs, &sfs);
+ return rc;
+}
+
+static int fsfilt_reiserfs_sync(struct super_block *sb)
+{
+ CERROR("not implemented yet\n");
+ return -ENOSYS;
+}
+
+static struct fsfilt_operations fsfilt_reiserfs_ops = {
+ fs_type: "reiserfs",
+ fs_owner: THIS_MODULE,
+ fs_start: fsfilt_reiserfs_start,
+ fs_brw_start: fsfilt_reiserfs_brw_start,
+ fs_commit: fsfilt_reiserfs_commit,
+ fs_setattr: fsfilt_reiserfs_setattr,
+ fs_set_md: fsfilt_reiserfs_set_md,
+ fs_get_md: fsfilt_reiserfs_get_md,
+ fs_readpage: fsfilt_reiserfs_readpage,
+ fs_journal_data: fsfilt_reiserfs_journal_data,
+ fs_set_last_rcvd: fsfilt_reiserfs_set_last_rcvd,
+ fs_statfs: fsfilt_reiserfs_statfs,
+ fs_sync: fsfilt_reiserfs_sync,
+};
+
+static int __init fsfilt_reiserfs_init(void)
+{
+ return fsfilt_register_ops(&fsfilt_reiserfs_ops);
+}
+
+static void __exit fsfilt_reiserfs_exit(void)
+{
+ fsfilt_unregister_ops(&fsfilt_reiserfs_ops);
+}
+
+MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_DESCRIPTION("Lustre reiserfs Filesystem Helper v0.1");
+MODULE_LICENSE("GPL");
+
+module_init(fsfilt_reiserfs_init);
+module_exit(fsfilt_reiserfs_exit);
--- /dev/null
+int fsfilt_ext3_init(void);
+void fsfilt_ext3_exit(void);
+
+int fsfilt_extN_init(void);
+void fsfilt_extN_exit(void);
+
+int fsfilt_reiser_init(void);
+void fsfilt_reiser_exit(void);
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * lustre/lib/fsfilt_ext3.c
+ * Lustre filesystem abstraction routines
+ *
+ * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef EXPORT_SYMTAB
+# define EXPORT_SYMTAB
+#endif
+
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <asm/unistd.h>
+#include <linux/jbd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/ext3_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/version.h>
+/* XXX ugh */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ #include <linux/ext3_xattr.h>
+#else
+ #include <linux/../../fs/ext3/xattr.h>
+#endif
+#include <linux/kp30.h>
+#include <linux/lustre_fsfilt.h>
+#include <linux/obd.h>
+#include <linux/obd_class.h>
+#include <linux/module.h>
+#include <linux/lustre_compat25.h>
+#include <linux/lvfs.h>
+#include "lvfs_internal.h"
+
+#include <linux/obd.h>
+#include <linux/lustre_lib.h>
+
+/* Debugging check only needed during development */
+#ifdef OBD_CTXT_DEBUG
+# define ASSERT_CTXT_MAGIC(magic) LASSERT((magic) == OBD_RUN_CTXT_MAGIC)
+# define ASSERT_NOT_KERNEL_CTXT(msg) LASSERT(!segment_eq(get_fs(), get_ds()))
+# define ASSERT_KERNEL_CTXT(msg) LASSERT(segment_eq(get_fs(), get_ds()))
+#else
+# define ASSERT_CTXT_MAGIC(magic) do {} while(0)
+# define ASSERT_NOT_KERNEL_CTXT(msg) do {} while(0)
+# define ASSERT_KERNEL_CTXT(msg) do {} while(0)
+#endif
+
+/* push / pop to root of obd store */
+void push_ctxt(struct obd_run_ctxt *save, struct obd_run_ctxt *new_ctx,
+ struct obd_ucred *uc)
+{
+ //ASSERT_NOT_KERNEL_CTXT("already in kernel context!\n");
+ ASSERT_CTXT_MAGIC(new_ctx->magic);
+ OBD_SET_CTXT_MAGIC(save);
+
+ /*
+ CDEBUG(D_INFO,
+ "= push %p->%p = cur fs %p pwd %p:d%d:i%d (%*s), pwdmnt %p:%d\n",
+ save, current, current->fs, current->fs->pwd,
+ atomic_read(¤t->fs->pwd->d_count),
+ atomic_read(¤t->fs->pwd->d_inode->i_count),
+ current->fs->pwd->d_name.len, current->fs->pwd->d_name.name,
+ current->fs->pwdmnt,
+ atomic_read(¤t->fs->pwdmnt->mnt_count));
+ */
+
+ save->fs = get_fs();
+ LASSERT(atomic_read(¤t->fs->pwd->d_count));
+ LASSERT(atomic_read(&new_ctx->pwd->d_count));
+ save->pwd = dget(current->fs->pwd);
+ save->pwdmnt = mntget(current->fs->pwdmnt);
+ save->ngroups = current->ngroups;
+
+ LASSERT(save->pwd);
+ LASSERT(save->pwdmnt);
+ LASSERT(new_ctx->pwd);
+ LASSERT(new_ctx->pwdmnt);
+
+ if (uc) {
+ save->ouc.ouc_fsuid = current->fsuid;
+ save->ouc.ouc_fsgid = current->fsgid;
+ save->ouc.ouc_cap = current->cap_effective;
+ save->ouc.ouc_suppgid1 = current->groups[0];
+ save->ouc.ouc_suppgid2 = current->groups[1];
+
+ current->fsuid = uc->ouc_fsuid;
+ current->fsgid = uc->ouc_fsgid;
+ current->cap_effective = uc->ouc_cap;
+ current->ngroups = 0;
+
+ if (uc->ouc_suppgid1 != -1)
+ current->groups[current->ngroups++] = uc->ouc_suppgid1;
+ if (uc->ouc_suppgid2 != -1)
+ current->groups[current->ngroups++] = uc->ouc_suppgid2;
+ }
+ set_fs(new_ctx->fs);
+ set_fs_pwd(current->fs, new_ctx->pwdmnt, new_ctx->pwd);
+
+ /*
+ CDEBUG(D_INFO,
+ "= push %p->%p = cur fs %p pwd %p:d%d:i%d (%*s), pwdmnt %p:%d\n",
+ new_ctx, current, current->fs, current->fs->pwd,
+ atomic_read(¤t->fs->pwd->d_count),
+ atomic_read(¤t->fs->pwd->d_inode->i_count),
+ current->fs->pwd->d_name.len, current->fs->pwd->d_name.name,
+ current->fs->pwdmnt,
+ atomic_read(¤t->fs->pwdmnt->mnt_count));
+ */
+}
+EXPORT_SYMBOL(push_ctxt);
+
+void pop_ctxt(struct obd_run_ctxt *saved, struct obd_run_ctxt *new_ctx,
+ struct obd_ucred *uc)
+{
+ //printk("pc0");
+ ASSERT_CTXT_MAGIC(saved->magic);
+ //printk("pc1");
+ ASSERT_KERNEL_CTXT("popping non-kernel context!\n");
+
+ /*
+ CDEBUG(D_INFO,
+ " = pop %p==%p = cur %p pwd %p:d%d:i%d (%*s), pwdmnt %p:%d\n",
+ new_ctx, current, current->fs, current->fs->pwd,
+ atomic_read(¤t->fs->pwd->d_count),
+ atomic_read(¤t->fs->pwd->d_inode->i_count),
+ current->fs->pwd->d_name.len, current->fs->pwd->d_name.name,
+ current->fs->pwdmnt,
+ atomic_read(¤t->fs->pwdmnt->mnt_count));
+ */
+
+ LASSERT(current->fs->pwd == new_ctx->pwd);
+ LASSERT(current->fs->pwdmnt == new_ctx->pwdmnt);
+
+ set_fs(saved->fs);
+ set_fs_pwd(current->fs, saved->pwdmnt, saved->pwd);
+
+ dput(saved->pwd);
+ mntput(saved->pwdmnt);
+ if (uc) {
+ current->fsuid = saved->ouc.ouc_fsuid;
+ current->fsgid = saved->ouc.ouc_fsgid;
+ current->cap_effective = saved->ouc.ouc_cap;
+ current->ngroups = saved->ngroups;
+ current->groups[0] = saved->ouc.ouc_suppgid1;
+ current->groups[1] = saved->ouc.ouc_suppgid2;
+ }
+
+ /*
+ CDEBUG(D_INFO,
+ "= pop %p->%p = cur fs %p pwd %p:d%d:i%d (%*s), pwdmnt %p:%d\n",
+ saved, current, current->fs, current->fs->pwd,
+ atomic_read(¤t->fs->pwd->d_count),
+ atomic_read(¤t->fs->pwd->d_inode->i_count),
+ current->fs->pwd->d_name.len, current->fs->pwd->d_name.name,
+ current->fs->pwdmnt,
+ atomic_read(¤t->fs->pwdmnt->mnt_count));
+ */
+}
+EXPORT_SYMBOL(pop_ctxt);
+
+/* utility to make a file */
+struct dentry *simple_mknod(struct dentry *dir, char *name, int mode)
+{
+ struct dentry *dchild;
+ int err = 0;
+ ENTRY;
+
+ ASSERT_KERNEL_CTXT("kernel doing mknod outside kernel context\n");
+ CDEBUG(D_INODE, "creating file %*s\n", (int)strlen(name), name);
+
+ dchild = ll_lookup_one_len(name, dir, strlen(name));
+ if (IS_ERR(dchild))
+ GOTO(out_up, dchild);
+
+ if (dchild->d_inode) {
+ if (!S_ISREG(dchild->d_inode->i_mode))
+ GOTO(out_err, err = -EEXIST);
+
+ GOTO(out_up, dchild);
+ }
+
+ err = ll_vfs_create(dir->d_inode, dchild, (mode & ~S_IFMT) | S_IFREG,
+ NULL);
+ if (err)
+ GOTO(out_err, err);
+
+ RETURN(dchild);
+
+out_err:
+ dput(dchild);
+ dchild = ERR_PTR(err);
+out_up:
+ return dchild;
+}
+EXPORT_SYMBOL(simple_mknod);
+
+/* utility to make a directory */
+struct dentry *simple_mkdir(struct dentry *dir, char *name, int mode)
+{
+ struct dentry *dchild;
+ int err = 0;
+ ENTRY;
+
+ ASSERT_KERNEL_CTXT("kernel doing mkdir outside kernel context\n");
+ CDEBUG(D_INODE, "creating directory %*s\n", (int)strlen(name), name);
+ dchild = ll_lookup_one_len(name, dir, strlen(name));
+ if (IS_ERR(dchild))
+ GOTO(out_up, dchild);
+
+ if (dchild->d_inode) {
+ if (!S_ISDIR(dchild->d_inode->i_mode))
+ GOTO(out_err, err = -ENOTDIR);
+
+ GOTO(out_up, dchild);
+ }
+
+ err = vfs_mkdir(dir->d_inode, dchild, mode);
+ if (err)
+ GOTO(out_err, err);
+
+ RETURN(dchild);
+
+out_err:
+ dput(dchild);
+ dchild = ERR_PTR(err);
+out_up:
+ return dchild;
+}
+EXPORT_SYMBOL(simple_mkdir);
+
+/*
+ * Read a file from within kernel context. Prior to calling this
+ * function we should already have done a push_ctxt().
+ */
+int lustre_fread(struct file *file, void *buf, int len, loff_t *off)
+{
+ ASSERT_KERNEL_CTXT("kernel doing read outside kernel context\n");
+ if (!file || !file->f_op || !file->f_op->read || !off)
+ RETURN(-ENOSYS);
+
+ return file->f_op->read(file, buf, len, off);
+}
+EXPORT_SYMBOL(lustre_fread);
+
+/*
+ * Write a file from within kernel context. Prior to calling this
+ * function we should already have done a push_ctxt().
+ */
+int lustre_fwrite(struct file *file, const void *buf, int len, loff_t *off)
+{
+ ENTRY;
+ ASSERT_KERNEL_CTXT("kernel doing write outside kernel context\n");
+ if (!file)
+ RETURN(-ENOENT);
+ if (!file->f_op)
+ RETURN(-ENOSYS);
+ if (!off)
+ RETURN(-EINVAL);
+
+ if (!file->f_op->write)
+ RETURN(-EROFS);
+
+ RETURN(file->f_op->write(file, buf, len, off));
+}
+EXPORT_SYMBOL(lustre_fwrite);
+
+/*
+ * Sync a file from within kernel context. Prior to calling this
+ * function we should already have done a push_ctxt().
+ */
+int lustre_fsync(struct file *file)
+{
+ ENTRY;
+ ASSERT_KERNEL_CTXT("kernel doing sync outside kernel context\n");
+ if (!file || !file->f_op || !file->f_op->fsync)
+ RETURN(-ENOSYS);
+
+ RETURN(file->f_op->fsync(file, file->f_dentry, 0));
+}
+EXPORT_SYMBOL(lustre_fsync);
+
+struct l_file *l_dentry_open(struct obd_run_ctxt *ctxt, struct l_dentry *de,
+ int flags)
+{
+ mntget(ctxt->pwdmnt);
+ return dentry_open(de, ctxt->pwdmnt, flags);
+}
+EXPORT_SYMBOL(l_dentry_open);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+
+static int __init lvfs_linux_init(void)
+{
+ RETURN(0);
+}
+
+static void __exit lvfs_linux_exit(void)
+{
+
+ return;
+}
+
+MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_DESCRIPTION("Lustre VFS Filesystem Helper v0.1");
+MODULE_LICENSE("GPL");
+
+module_init(lvfs_linux_init);
+module_exit(lvfs_linux_exit);
+
+#else
+
+#warning "lvfs_linux_init() and fsfilt_ext3_exit() aren't called on 2.6. MUST be fixed"
+
+
+#endif
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Copyright (C) 2001-2003 Cluster File Systems, Inc.
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * OST<->MDS recovery logging infrastructure.
+ *
+ * Invariants in implementation:
+ * - we do not share logs among different OST<->MDS connections, so that
+ * if an OST or MDS fails it need only look at log(s) relevant to itself
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/fs.h>
+#include <linux/obd_class.h>
+#include <linux/lustre_log.h>
+#include <portals/list.h>
+
+/* Allocate a new log or catalog handle */
+struct llog_handle *llog_alloc_handle(void)
+{
+ struct llog_handle *loghandle;
+ ENTRY;
+
+ OBD_ALLOC(loghandle, sizeof(*loghandle));
+ if (loghandle == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
+
+ sema_init(&loghandle->lgh_lock, 1);
+
+ RETURN(loghandle);
+}
+EXPORT_SYMBOL(llog_alloc_handle);
+
+
+void llog_free_handle(struct llog_handle *loghandle)
+{
+ if (!loghandle)
+ return;
+
+ if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)
+ list_del_init(&loghandle->u.phd.phd_entry);
+ if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
+ LASSERT(list_empty(&loghandle->u.chd.chd_head));
+
+ OBD_FREE(loghandle->lgh_hdr, LLOG_CHUNK_SIZE);
+ OBD_FREE(loghandle, sizeof(*loghandle));
+}
+EXPORT_SYMBOL(llog_free_handle);
+
+
+int llog_cancel_rec(struct llog_handle *loghandle, int index)
+{
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ int rc = 0;
+ ENTRY;
+
+ CDEBUG(D_HA, "canceling %d in log "LPX64"\n",
+ index, loghandle->lgh_id.lgl_oid);
+
+ if (!ext2_clear_bit(index, llh->llh_bitmap)) {
+ CERROR("catalog index %u already clear?\n", index);
+ LBUG();
+ }
+
+ llh->llh_count--;
+
+ if (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY &&
+ llh->llh_count == 1 &&
+ loghandle->lgh_last_idx == LLOG_BITMAP_BYTES * 8) {
+ rc = llog_destroy(loghandle);
+ if (rc)
+ CERROR("failure destroying log after last cancel: %d\n",
+ rc);
+ LASSERT(rc == 0);
+ RETURN(rc);
+ }
+
+ rc = llog_write_rec(loghandle, &llh->llh_hdr, NULL, 0, NULL, 0);
+ if (rc)
+ CERROR("failure re-writing header %d\n", rc);
+ LASSERT(rc == 0);
+ RETURN(rc);
+}
+EXPORT_SYMBOL(llog_cancel_rec);
+
+int llog_init_handle(struct llog_handle *handle, int flags,
+ struct obd_uuid *uuid)
+{
+ int rc;
+ struct llog_log_hdr *llh;
+ ENTRY;
+ LASSERT(handle->lgh_hdr == NULL);
+
+ OBD_ALLOC(llh, sizeof(*llh));
+ if (llh == NULL)
+ RETURN(-ENOMEM);
+
+ handle->lgh_hdr = llh;
+ rc = llog_read_header(handle);
+ if (rc == 0) {
+ LASSERT(llh->llh_flags == flags);
+ LASSERT(obd_uuid_equals(uuid, &llh->llh_tgtuuid));
+ RETURN(0);
+ } else if (rc != LLOG_EEMPTY) {
+ GOTO(out, rc);
+ }
+ rc = 0;
+
+ llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
+ llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+ llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
+ llh->llh_timestamp = LTIME_S(CURRENT_TIME);
+ llh->llh_flags = flags;
+ memcpy(&llh->llh_tgtuuid, uuid, sizeof(llh->llh_tgtuuid));
+ llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
+
+ if (flags & LLOG_F_IS_CAT)
+ INIT_LIST_HEAD(&handle->u.chd.chd_head);
+ else if (llh->llh_flags & LLOG_F_IS_PLAIN)
+ INIT_LIST_HEAD(&handle->u.phd.phd_entry);
+ else
+ LBUG();
+ out:
+ if (rc)
+ OBD_FREE(llh, sizeof(*llh));
+ return(rc);
+}
+EXPORT_SYMBOL(llog_init_handle);
+
+int llog_process_log(struct llog_handle *loghandle, llog_cb_t cb, void *data)
+{
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ void *buf;
+ __u64 cur_offset = LLOG_CHUNK_SIZE;
+ int rc = 0, index = 0;
+ ENTRY;
+
+ OBD_ALLOC(buf, PAGE_SIZE);
+ if (!buf)
+ RETURN(-ENOMEM);
+
+ while (rc == 0) {
+ struct llog_rec_hdr *rec;
+
+ /* there is likely a more efficient way than this */
+ while (index < LLOG_BITMAP_BYTES * 8 &&
+ !ext2_test_bit(index, llh->llh_bitmap))
+ ++index;
+
+ if (index >= LLOG_BITMAP_BYTES * 8)
+ break;
+
+ rc = llog_next_block(loghandle, 0, index,
+ &cur_offset, buf, PAGE_SIZE);
+ if (rc)
+ RETURN(rc);
+
+ rec = buf;
+
+ /* skip records in buffer until we are at the one we want */
+ while (rec->lrh_index < index) {
+ if (rec->lrh_index == 0)
+ RETURN(0); /* no more records */
+
+ cur_offset += rec->lrh_len;
+ rec = ((void *)rec + rec->lrh_len);
+
+ if ((void *)rec > buf + PAGE_SIZE) {
+ CERROR("log index %u not in log @ "LPU64"\n",
+ index, cur_offset);
+ LBUG(); /* record not in this buffer? */
+ }
+
+ rc = cb(loghandle, rec, data);
+ ++index;
+ }
+ }
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(llog_process_log);
+
+#if 0
+int filter_log_cancel(struct obd_export *exp, struct lov_stripe_md *lsm,
+ int num_cookies, struct llog_cookie *logcookies,
+ int flags)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct obd_run_ctxt saved;
+ int rc;
+ ENTRY;
+
+ push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
+ rc = llog_cancel_records(obd->u.filter.fo_catalog, num_cookies,
+ logcookies);
+ pop_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
+
+ RETURN(rc);
+}
+
+int llog_write_header(struct llog_handle *loghandle, int size)
+{
+ struct llog_log_hdr *llh;
+ int rc;
+ ENTRY;
+ LASSERT(sizeof(*llh) == LLOG_CHUNK_SIZE);
+
+ if (loghandle->lgh_file->f_dentry->d_inode->i_size)
+ RETURN(-EBUSY);
+
+ llh = loghandle->lgh_hdr;
+ llh->llh_size = size;
+ llh->llh_hdr.lrh_type = LLOG_OBJECT_MAGIC;
+ llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = sizeof(*llh);
+ llh->llh_timestamp = LTIME_S(CURRENT_TIME);
+ llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
+
+ /* write the header record in the log */
+ rc = llog_write_rec(loghandle, &llh->llh_hdr, NULL, 0, NULL, 0);
+ if (rc > 0)
+ rc = 0;
+ RETURN(rc);
+}
+#endif
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Copyright (C) 2001-2003 Cluster File Systems, Inc.
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * OST<->MDS recovery logging infrastructure.
+ *
+ * Invariants in implementation:
+ * - we do not share logs among different OST<->MDS connections, so that
+ * if an OST or MDS fails it need only look at log(s) relevant to itself
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/fs.h>
+#include <linux/obd_class.h>
+#include <linux/lustre_log.h>
+#include <portals/list.h>
+
+/* Create a new log handle and add it to the open list.
+ * This log handle will be closed when all of the records in it are removed.
+ *
+ * Assumes caller has already pushed us into the kernel context and is locking.
+ */
+static struct llog_handle *llog_cat_new_log(struct llog_handle *cathandle)
+{
+ struct llog_handle *loghandle;
+ struct llog_log_hdr *llh;
+ struct llog_logid_rec rec;
+ int rc, index, bitmap_size, i;
+ ENTRY;
+
+ /* does this need a tgt uuid */
+ rc = llog_create(cathandle->lgh_obd, &loghandle, NULL, NULL);
+ if (rc)
+ RETURN(ERR_PTR(rc));
+
+ rc = llog_init_handle(loghandle,
+ LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
+ &cathandle->lgh_hdr->llh_tgtuuid);
+ if (rc)
+ GOTO(out_destroy, rc);
+
+ /* Find first free entry */
+ llh = cathandle->lgh_hdr;
+ bitmap_size = sizeof(llh->llh_bitmap) * 8;
+ for (i = 0, index = llh->llh_count; i < bitmap_size; i++, index++) {
+ index %= bitmap_size;
+ if (ext2_set_bit(index, llh->llh_bitmap)) {
+ /* XXX This should trigger log clean up or similar */
+ CERROR("catalog index %d is still in use\n", index);
+ } else {
+ llh->llh_count = (index + 1) % bitmap_size;
+ break;
+ }
+ }
+ if (i == bitmap_size) {
+ CERROR("no free catalog slots for log...\n");
+ GOTO(out_destroy, rc = -ENOSPC);
+ }
+
+ CDEBUG(D_HA, "new recovery log "LPX64": catalog index %u\n",
+ loghandle->lgh_id.lgl_oid, index);
+
+ /* build the record for this log in the catalog */
+ rec.lid_hdr.lrh_len = sizeof(rec);
+ rec.lid_hdr.lrh_index = index;
+ rec.lid_hdr.lrh_type = LLOG_HDR_MAGIC;
+ rec.lid_id = loghandle->lgh_id;
+ rec.lid_tail.lrt_len = sizeof(rec);
+ rec.lid_tail.lrt_index = index;
+
+ /* update the catalog: header and record */
+ rc = llog_write_rec(cathandle, &rec.lid_hdr,
+ &loghandle->u.phd.phd_cookie, 1, NULL, index);
+ if (rc < 0) {
+ GOTO(out_destroy, rc);
+ }
+
+ cathandle->u.chd.chd_current_log = loghandle;
+ list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
+
+ out_destroy:
+ llog_destroy(loghandle);
+
+ RETURN(loghandle);
+}
+EXPORT_SYMBOL(llog_cat_new_log);
+
+/* Assumes caller has already pushed us into the kernel context and is locking.
+ * We return a lock on the handle to ensure nobody yanks it from us.
+ */
+int llog_cat_id2handle(struct llog_handle *cathandle, struct llog_handle **res,
+ struct llog_logid *logid)
+{
+ struct llog_handle *loghandle;
+ int rc = 0;
+ ENTRY;
+
+ if (cathandle == NULL)
+ RETURN(-EBADF);
+
+ list_for_each_entry(loghandle, &cathandle->u.chd.chd_head, u.phd.phd_entry) {
+ struct llog_logid *cgl = &loghandle->lgh_id;
+ if (cgl->lgl_oid == logid->lgl_oid) {
+ if (cgl->lgl_ogen != logid->lgl_ogen) {
+ CERROR("log "LPX64" generation %x != %x\n",
+ logid->lgl_oid, cgl->lgl_ogen,
+ logid->lgl_ogen);
+ continue;
+ }
+ GOTO(out, rc = 0);
+ }
+ }
+
+ rc = llog_create(cathandle->lgh_obd, &loghandle, logid, NULL);
+ if (rc) {
+ CERROR("error opening log id "LPX64":%x: rc %d\n",
+ logid->lgl_oid, logid->lgl_ogen, rc);
+ } else {
+ list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
+ }
+
+out:
+ *res = loghandle;
+ RETURN(rc);
+}
+
+void llog_cat_put(struct llog_handle *cathandle)
+{
+ struct llog_handle *loghandle, *n;
+ ENTRY;
+
+ list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
+ u.phd.phd_entry)
+ llog_close(loghandle);
+ llog_close(cathandle);
+ EXIT;
+}
+EXPORT_SYMBOL(llog_cat_put);
+
+/* Return the currently active log handle. If the current log handle doesn't
+ * have enough space left for the current record, start a new one.
+ *
+ * If reclen is 0, we only want to know what the currently active log is,
+ * otherwise we get a lock on this log so nobody can steal our space.
+ *
+ * Assumes caller has already pushed us into the kernel context and is locking.
+ */
+static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
+ int create)
+{
+ struct llog_handle *loghandle = NULL;
+ ENTRY;
+
+ loghandle = cathandle->u.chd.chd_current_log;
+ if (loghandle) {
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ if (loghandle->lgh_last_idx < sizeof(llh->llh_bitmap) * 8)
+ RETURN(loghandle);
+ }
+
+ if (create)
+ loghandle = llog_cat_new_log(cathandle);
+ RETURN(loghandle);
+}
+
+/* Add a single record to the recovery log(s) using a catalog
+ * Returns as llog_write_record
+ *
+ * Assumes caller has already pushed us into the kernel context.
+ */
+int llog_cat_add_rec(struct llog_handle *cathandle, struct llog_rec_hdr *rec,
+ struct llog_cookie *reccookie, void *buf)
+{
+ struct llog_handle *loghandle;
+ int rc;
+ ENTRY;
+
+ LASSERT(rec->lrh_len <= LLOG_CHUNK_SIZE);
+ down(&cathandle->lgh_lock);
+ loghandle = llog_cat_current_log(cathandle, 1);
+ if (IS_ERR(loghandle)) {
+ up(&cathandle->lgh_lock);
+ RETURN(PTR_ERR(loghandle));
+ }
+ down(&loghandle->lgh_lock);
+ up(&cathandle->lgh_lock);
+
+ rc = llog_write_rec(loghandle, rec, reccookie, 1, buf, -1);
+
+ up(&loghandle->lgh_lock);
+ RETURN(rc);
+}
+EXPORT_SYMBOL(llog_cat_add_rec);
+
+/* For each cookie in the cookie array, we clear the log in-use bit and either:
+ * - the log is empty, so mark it free in the catalog header and delete it
+ * - the log is not empty, just write out the log header
+ *
+ * The cookies may be in different log files, so we need to get new logs
+ * each time.
+ *
+ * Assumes caller has already pushed us into the kernel context.
+ */
+int llog_cat_cancel_records(struct llog_handle *cathandle, int count,
+ struct llog_cookie *cookies)
+{
+ int i, rc = 0;
+ ENTRY;
+
+ down(&cathandle->lgh_lock);
+ for (i = 0; i < count; i++, cookies++) {
+ struct llog_handle *loghandle;
+ struct llog_log_hdr *llh;
+ struct llog_logid *lgl = &cookies->lgc_lgl;
+ int res;
+
+ rc = llog_cat_id2handle(cathandle, &loghandle, lgl);
+ if (res) {
+ CERROR("Cannot find log "LPX64"\n", lgl->lgl_oid);
+ break;
+ }
+
+ down(&loghandle->lgh_lock);
+ llh = loghandle->lgh_hdr;
+ CDEBUG(D_HA, "cancelling "LPX64" index %u: %u\n",
+ lgl->lgl_oid, cookies->lgc_index,
+ ext2_test_bit(cookies->lgc_index, llh->llh_bitmap));
+ if (!ext2_clear_bit(cookies->lgc_index, llh->llh_bitmap)) {
+ CERROR("log index %u in "LPX64":%x already clear?\n",
+ cookies->lgc_index, lgl->lgl_oid, lgl->lgl_ogen);
+ } else if (--llh->llh_count == 1 &&
+ loghandle != llog_cat_current_log(cathandle, 0)) {
+ rc = llog_close(loghandle);
+ } else {
+ int ret = llog_write_rec(loghandle, &llh->llh_hdr,
+ NULL, 0, NULL, 0);
+ if (ret != 0) {
+ CERROR("error cancelling index %u: rc %d\n",
+ cookies->lgc_index, ret);
+ /* XXX mark handle bad? */
+ if (!rc)
+ rc = ret;
+ }
+ }
+ up(&loghandle->lgh_lock);
+ }
+ up(&cathandle->lgh_lock);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(llog_cat_cancel_records);
+
+#if 0
+/* Assumes caller has already pushed us into the kernel context. */
+int llog_cat_init(struct llog_handle *cathandle, struct obd_uuid *tgtuuid)
+{
+ struct llog_log_hdr *llh;
+ loff_t offset = 0;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(sizeof(*llh) == LLOG_CHUNK_SIZE);
+
+ down(&cathandle->lgh_lock);
+ llh = cathandle->lgh_hdr;
+
+ if (cathandle->lgh_file->f_dentry->d_inode->i_size == 0) {
+ llog_write_rec(cathandle, &llh->llh_hdr, NULL, 0, NULL, 0);
+
+write_hdr:
+ rc = lustre_fwrite(cathandle->lgh_file, llh, LLOG_CHUNK_SIZE,
+ &offset);
+ if (rc != LLOG_CHUNK_SIZE) {
+ CERROR("error writing catalog header: rc %d\n", rc);
+ OBD_FREE(llh, sizeof(*llh));
+ if (rc >= 0)
+ rc = -ENOSPC;
+ } else
+ rc = 0;
+ } else {
+ rc = lustre_fread(cathandle->lgh_file, llh, LLOG_CHUNK_SIZE,
+ &offset);
+ if (rc != LLOG_CHUNK_SIZE) {
+ CERROR("error reading catalog header: rc %d\n", rc);
+ /* Can we do much else if the header is bad? */
+ goto write_hdr;
+ } else
+ rc = 0;
+ }
+
+ cathandle->lgh_tgtuuid = &llh->llh_tgtuuid;
+ up(&cathandle->lgh_lock);
+ RETURN(rc);
+}
+EXPORT_SYMBOL(llog_cat_init);
+
+#endif
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Copyright (C) 2001-2003 Cluster File Systems, Inc.
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * OST<->MDS recovery logging infrastructure.
+ *
+ * Invariants in implementation:
+ * - we do not share logs among different OST<->MDS connections, so that
+ * if an OST or MDS fails it need only look at log(s) relevant to itself
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/fs.h>
+#include <linux/obd_class.h>
+#include <linux/lustre_log.h>
+#include <portals/list.h>
+#include <linux/lvfs.h>
+
+static int llog_lvfs_pad(struct l_file *file, int len, int index)
+{
+ struct llog_rec_hdr rec;
+ struct llog_rec_tail tail;
+ int rc;
+ ENTRY;
+
+ LASSERT(len >= LLOG_MIN_REC_SIZE && (len & 0xf) == 0);
+
+ tail.lrt_len = rec.lrh_len = len;
+ tail.lrt_index = rec.lrh_index = index;
+ rec.lrh_type = 0;
+
+ rc = lustre_fwrite(file, &rec, sizeof(rec), &file->f_pos);
+ if (rc != sizeof(rec)) {
+ CERROR("error writing padding record: rc %d\n", rc);
+ GOTO(out, rc < 0 ? rc : rc = -EIO);
+ }
+
+ file->f_pos += len - sizeof(rec) - sizeof(tail);
+ rc = lustre_fwrite(file, &tail, sizeof(tail), &file->f_pos);
+ if (rc != sizeof(tail)) {
+ CERROR("error writing padding record: rc %d\n", rc);
+ GOTO(out, rc < 0 ? rc : rc = -EIO);
+ }
+ rc = 0;
+ out:
+ RETURN(rc);
+}
+
+static int llog_lvfs_write_blob(struct l_file *file, struct llog_rec_hdr *rec,
+ void *buf, loff_t off)
+{
+ int rc;
+ struct llog_rec_tail end;
+ loff_t saved_off = file->f_pos;
+ int buflen;
+
+ ENTRY;
+ file->f_pos = off;
+
+ if (!buf) {
+ rc = lustre_fwrite(file, rec, rec->lrh_len, &file->f_pos);
+ if (rc != rec->lrh_len) {
+ CERROR("error writing log record: rc %d\n", rc);
+ GOTO(out, rc < 0 ? rc : rc = -ENOSPC);
+ }
+ GOTO(out, rc = 0);
+ }
+
+ /* the buf case */
+ buflen = rec->lrh_len;
+ rec->lrh_len = sizeof(*rec) + size_round(buflen) + sizeof(end);
+ rc = lustre_fwrite(file, rec, sizeof(*rec), &file->f_pos);
+ if (rc != sizeof(*rec)) {
+ CERROR("error writing log hdr: rc %d\n", rc);
+ GOTO(out, rc < 0 ? rc : rc = -ENOSPC);
+ }
+
+ rc = lustre_fwrite(file, buf, buflen, &file->f_pos);
+ if (rc != buflen) {
+ CERROR("error writing log buffer: rc %d\n", rc);
+ GOTO(out, rc < 0 ? rc : rc = -ENOSPC);
+ }
+
+ file->f_pos += size_round(buflen) - buflen;
+ end.lrt_len = rec->lrh_len;
+ end.lrt_index = rec->lrh_index;
+ rc = lustre_fwrite(file, &end, sizeof(end), &file->f_pos);
+ if (rc != sizeof(end)) {
+ CERROR("error writing log tail: rc %d\n", rc);
+ GOTO(out, rc < 0 ? rc : rc = -ENOSPC);
+ }
+
+ rc = 0;
+ out:
+ if (saved_off > file->f_pos)
+ file->f_pos = saved_off;
+ LASSERT(rc <= 0);
+ RETURN(rc);
+}
+
+static int llog_lvfs_read_blob(struct l_file *file, void *buf, int size,
+ loff_t off)
+{
+ loff_t offset = off;
+ int rc;
+ ENTRY;
+
+ rc = lustre_fread(file, buf, size, &offset);
+ if (rc != size) {
+ CERROR("error reading log record: rc %d\n", rc);
+ RETURN(-EIO);
+ }
+ RETURN(0);
+}
+
+static int llog_lvfs_read_header(struct llog_handle *handle)
+{
+ struct llog_rec_tail tail;
+ int rc;
+ ENTRY;
+
+ LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
+
+ if (handle->lgh_file->f_dentry->d_inode->i_size == 0) {
+ CERROR("not reading header from 0-byte log\n");
+ RETURN(LLOG_EEMPTY);
+ }
+
+ rc = llog_lvfs_read_blob(handle->lgh_file, handle->lgh_hdr,
+ LLOG_CHUNK_SIZE, 0);
+ if (rc)
+ CERROR("error reading log header\n");
+
+ rc = llog_lvfs_read_blob(handle->lgh_file, &tail, sizeof(tail),
+ handle->lgh_file->f_dentry->d_inode->i_size -
+ sizeof(tail));
+ if (rc)
+ CERROR("error reading log tail\n");
+
+ handle->lgh_last_idx = tail.lrt_index;
+
+ RETURN(rc);
+}
+
+/* returns negative in on error; 0 if success && reccookie == 0; 1 otherwise */
+/* appends if idx == -1, otherwise overwrites record idx. */
+static int llog_lvfs_write_rec(struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec,
+ struct llog_cookie *reccookie, int cookiecount,
+ void *buf, int idx)
+{
+ struct llog_log_hdr *llh;
+ int reclen = rec->lrh_len, index, rc;
+ struct file *file;
+ loff_t offset;
+ size_t left;
+ ENTRY;
+
+ llh = loghandle->lgh_hdr;
+ file = loghandle->lgh_file;
+
+ if (idx != -1) {
+ loff_t saved_offset;
+
+ /* no header: only allowed to insert record 0 */
+ if (idx != 0 && !file->f_dentry->d_inode->i_size) {
+ CERROR("idx != -1 in empty log\n");
+ LBUG();
+ }
+
+ if (!loghandle->lgh_hdr->llh_size != rec->lrh_len)
+ RETURN(-EINVAL);
+
+ rc = llog_lvfs_write_blob(file, &llh->llh_hdr, NULL, 0);
+ /* we are done if we only write the header or on error */
+ if (rc || idx == 0)
+ RETURN(rc);
+
+ saved_offset = sizeof(*llh) + idx * rec->lrh_len;
+ rc = llog_lvfs_write_blob(file, rec, buf, saved_offset);
+ if (rc)
+ RETURN(rc);
+ }
+
+ /* Make sure that records don't cross a chunk boundary, so we can
+ * process them page-at-a-time if needed. If it will cross a chunk
+ * boundary, write in a fake (but referenced) entry to pad the chunk.
+ *
+ * We know that llog_current_log() will return a loghandle that is
+ * big enough to hold reclen, so all we care about is padding here.
+ */
+ left = LLOG_CHUNK_SIZE - (file->f_pos & (LLOG_CHUNK_SIZE - 1));
+ if (buf)
+ reclen = sizeof(*rec) + size_round(rec->lrh_len) +
+ sizeof(struct llog_rec_tail);
+
+ if (left != 0 && left < reclen) {
+ loghandle->lgh_last_idx++;
+ rc = llog_lvfs_pad(file, left, loghandle->lgh_last_idx);
+ if (rc)
+ RETURN(rc);
+ }
+
+ index = loghandle->lgh_last_idx++;
+ rec->lrh_index = index;
+ if (ext2_set_bit(index, llh->llh_bitmap)) {
+ CERROR("argh, index %u already set in log bitmap?\n", index);
+ LBUG(); /* should never happen */
+ }
+ llh->llh_count++;
+
+ offset = 0;
+ rc = llog_lvfs_write_blob(file, &llh->llh_hdr, NULL, 0);
+ if (rc)
+ RETURN(rc);
+
+ rc = llog_lvfs_write_blob(file, rec, buf, file->f_pos);
+ if (rc)
+ RETURN(rc);
+
+ CDEBUG(D_HA, "added record "LPX64": idx: %u, %u bytes\n",
+ loghandle->lgh_id.lgl_oid, index, rec->lrh_len);
+ if (rc == 0 && reccookie) {
+ reccookie->lgc_lgl = loghandle->lgh_id;
+ reccookie->lgc_index = index;
+ rc = 1;
+ }
+ RETURN(rc);
+}
+
+static int llog_lvfs_next_block(struct llog_handle *loghandle, int cur_idx,
+ int next_idx, __u64 *cur_offset, void *buf,
+ int len)
+{
+ int rc;
+ ENTRY;
+
+ if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
+ RETURN(-EINVAL);
+
+ CDEBUG(D_OTHER, "looking for log index %u (cur idx %u off "LPU64"\n",
+ next_idx, cur_idx, *cur_offset);
+
+ /* We can skip reading at least as many log blocks as the number of
+ * minimum sized log records we are skipping. If it turns out that we
+ * are not far enough along the log (because the actual records are
+ * larger than minimum size) we just skip some more records. */
+ while ((*cur_offset = (*cur_offset +
+ (next_idx - cur_idx) * LLOG_MIN_REC_SIZE) &
+ ~(LLOG_CHUNK_SIZE - 1)) <
+ loghandle->lgh_file->f_dentry->d_inode->i_size) {
+ struct llog_rec_hdr *rec;
+
+ rc = lustre_fread(loghandle->lgh_file, buf, LLOG_CHUNK_SIZE,
+ cur_offset);
+ if (rc)
+ RETURN(rc);
+
+ rec = buf;
+ /* sanity check that the start of the new buffer is no farther
+ * than the record that we wanted. This shouldn't happen. */
+ if (rec->lrh_index > next_idx) {
+ CERROR("missed desired record? %u > %u\n",
+ rec->lrh_index, next_idx);
+ RETURN(-ENOENT);
+ }
+
+ /* Check if last record in this buffer is higher than what we
+ * are looking for, or is zero (implying that this is the last
+ * buffer in the log). In conjunction with the previous test,
+ * this means that the record we are looking for is in the
+ * current buffer, or the client asked for a record beyond the
+ * end of the log, which is the client's problem. */
+ rec = buf + LLOG_CHUNK_SIZE - sizeof(__u32);
+ if (rec->lrh_index == 0)
+ RETURN(0);
+
+ cur_idx = rec->lrh_index;
+ if (cur_idx >= next_idx) {
+ while (rc == 0 && (len -= LLOG_CHUNK_SIZE) > 0) {
+ buf += LLOG_CHUNK_SIZE;
+ *cur_offset += LLOG_CHUNK_SIZE;
+
+ rc = lustre_fread(loghandle->lgh_file,
+ buf, LLOG_CHUNK_SIZE,
+ cur_offset);
+ }
+
+ RETURN(rc);
+ }
+ }
+
+ RETURN(-ENOENT);
+}
+
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+static int llog_lvfs_create(struct obd_device *obd, struct llog_handle **res,
+ struct llog_logid *logid, char *name)
+{
+ char logname[24];
+ struct llog_handle *handle;
+ struct l_dentry *dchild;
+ struct obdo *oa = NULL;
+ int rc = 0, cleanup_phase = 1;
+ int open_flags = O_RDWR | O_CREAT | O_LARGEFILE;
+ ENTRY;
+
+ handle = llog_alloc_handle();
+ if (handle == NULL)
+ RETURN(-ENOMEM);
+ *res = handle;
+
+ if (logid != NULL) {
+ dchild = obd_lvfs_fid2dentry(obd->obd_log_exp, logid->lgl_oid,
+ logid->lgl_ogr);
+ if (IS_ERR(dchild)) {
+ rc = PTR_ERR(dchild);
+ CERROR("error looking up log file "LPX64":"LPX64
+ ": rc %d\n",
+ logid->lgl_oid, logid->lgl_ogr, rc);
+ GOTO(cleanup, rc);
+ }
+
+ cleanup_phase = 2;
+ if (dchild->d_inode == NULL) {
+ rc = -ENOENT;
+ CERROR("nonexistent log file "LPX64":"LPX64": rc %d\n",
+ logid->lgl_oid, logid->lgl_ogr, rc);
+ GOTO(cleanup, rc);
+ }
+
+ handle->lgh_file = l_dentry_open(&obd->obd_ctxt, dchild,
+ O_RDWR | O_LARGEFILE);
+ if (IS_ERR(handle->lgh_file)) {
+ rc = PTR_ERR(handle->lgh_file);
+ CERROR("error opening logfile "LPX64":"LPX64": rc %d\n",
+ logid->lgl_oid, logid->lgl_ogr, rc);
+ GOTO(cleanup, rc);
+ }
+ } else if (name) {
+ LASSERT(strlen(name) <= 18);
+ sprintf(logname, "LOGS/%s", name);
+
+ handle->lgh_file = l_filp_open(logname, open_flags, 0644);
+ if (IS_ERR(handle->lgh_file)) {
+ rc = PTR_ERR(handle->lgh_file);
+ CERROR("logfile creation %s: %d\n", logname, rc);
+ GOTO(cleanup, rc);
+ }
+ handle->lgh_id.lgl_oid =
+ handle->lgh_file->f_dentry->d_inode->i_ino;
+ handle->lgh_id.lgl_ogen =
+ handle->lgh_file->f_dentry->d_inode->i_generation;
+ } else {
+ oa = obdo_alloc();
+ if (oa == NULL)
+ GOTO(cleanup, rc = -ENOMEM);
+ /* XXX */
+ oa->o_gr = 1;
+ oa->o_valid = OBD_MD_FLGROUP;
+ rc = obd_create(obd->obd_log_exp, oa, NULL, NULL);
+ if (rc)
+ GOTO(cleanup, rc);
+ dchild = obd_lvfs_fid2dentry(obd->obd_log_exp, oa->o_id,
+ oa->o_gr);
+ if (IS_ERR(dchild))
+ GOTO(cleanup, rc = PTR_ERR(dchild));
+ cleanup_phase = 2;
+ handle->lgh_file = l_dentry_open(&obd->obd_ctxt, dchild,
+ open_flags);
+ if (IS_ERR(handle->lgh_file))
+ GOTO(cleanup, rc = PTR_ERR(handle->lgh_file));
+ handle->lgh_id.lgl_oid = oa->o_id;
+ handle->lgh_id.lgl_ogr = oa->o_gr;
+ }
+
+ handle->lgh_obd = obd;
+ RETURN(rc);
+cleanup:
+ switch (cleanup_phase) {
+ case 2:
+ l_dput(dchild);
+ case 1:
+ llog_free_handle(handle);
+ }
+ obdo_free(oa);
+ return rc;
+}
+
+static int llog_lvfs_close(struct llog_handle *handle)
+{
+ int rc;
+ ENTRY;
+
+ rc = filp_close(handle->lgh_file, 0);
+ if (rc)
+ CERROR("error closing log: rc %d\n", rc);
+
+ llog_free_handle(handle);
+ RETURN(rc);
+}
+
+static int llog_lvfs_destroy(struct llog_handle *handle)
+{
+ struct obdo *oa;
+ int rc;
+ ENTRY;
+
+ oa = obdo_alloc();
+ if (oa == NULL)
+ RETURN(-ENOMEM);
+
+ oa->o_id = handle->lgh_id.lgl_oid;
+ oa->o_gr = handle->lgh_id.lgl_ogr;
+ oa->o_generation = handle->lgh_id.lgl_ogen;
+ oa->o_valid = OBD_MD_FLGROUP | OBD_MD_FLGENER;
+
+ rc = llog_lvfs_close(handle);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = obd_destroy(handle->lgh_obd->obd_log_exp, oa, NULL, NULL);
+ out:
+ obdo_free(oa);
+ RETURN(rc);
+}
+
+#if 0
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+int mds_log_close(struct llog_handle *cathandle, struct llog_handle *loghandle)
+{
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ struct mds_obd *mds = &cathandle->lgh_obd->u.mds;
+ struct dentry *dchild = NULL;
+ int rc;
+ ENTRY;
+
+ /* If we are going to delete this log, grab a ref before we close
+ * it so we don't have to immediately do another lookup.
+ */
+ if (llh->llh_hdr.lrh_type != LLOG_CATALOG_MAGIC && llh->llh_count == 0){
+ CDEBUG(D_INODE, "deleting log file "LPX64":%x\n",
+ loghandle->lgh_cookie.lgc_lgl.lgl_oid,
+ loghandle->lgh_cookie.lgc_lgl.lgl_ogen);
+ down(&mds->mds_logs_dir->d_inode->i_sem);
+ dchild = dget(loghandle->lgh_file->f_dentry);
+ llog_delete_log(cathandle, loghandle);
+ } else {
+ CDEBUG(D_INODE, "closing log file "LPX64":%x\n",
+ loghandle->lgh_cookie.lgc_lgl.lgl_oid,
+ loghandle->lgh_cookie.lgc_lgl.lgl_ogen);
+ }
+
+ rc = filp_close(loghandle->lgh_file, 0);
+
+ llog_free_handle(loghandle); /* also removes loghandle from list */
+
+ if (dchild) {
+ int err = vfs_unlink(mds->mds_logs_dir->d_inode, dchild);
+ if (err) {
+ CERROR("error unlinking empty log %*s: rc %d\n",
+ dchild->d_name.len, dchild->d_name.name, err);
+ if (!rc)
+ rc = err;
+ }
+ l_dput(dchild);
+ up(&mds->mds_logs_dir->d_inode->i_sem);
+ }
+ RETURN(rc);
+}
+
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+struct llog_handle *mds_log_open(struct obd_device *obd,
+ struct llog_cookie *logcookie)
+{
+ struct ll_fid fid = { .id = logcookie->lgc_lgl.lgl_oid,
+ .generation = logcookie->lgc_lgl.lgl_ogen,
+ .f_type = S_IFREG };
+ struct llog_handle *loghandle;
+ struct dentry *dchild;
+ int rc;
+ ENTRY;
+
+ loghandle = llog_alloc_handle();
+ if (loghandle == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
+
+ down(&obd->u.mds.mds_logs_dir->d_inode->i_sem);
+ dchild = mds_fid2dentry(&obd->u.mds, &fid, NULL);
+ up(&obd->u.mds.mds_logs_dir->d_inode->i_sem);
+ if (IS_ERR(dchild)) {
+ rc = PTR_ERR(dchild);
+ CERROR("error looking up log file "LPX64":%x: rc %d\n",
+ fid.id, fid.generation, rc);
+ GOTO(out, rc);
+ }
+
+ if (dchild->d_inode == NULL) {
+ rc = -ENOENT;
+ CERROR("nonexistent log file "LPX64":%x: rc %d\n",
+ fid.id, fid.generation, rc);
+ GOTO(out_put, rc);
+ }
+
+ /* dentry_open does a dput(de) and mntput(mds->mds_vfsmnt) on error */
+ mntget(obd->u.mds.mds_vfsmnt);
+ loghandle->lgh_file = dentry_open(dchild, obd->u.mds.mds_vfsmnt,
+ O_RDWR | O_LARGEFILE);
+ if (IS_ERR(loghandle->lgh_file)) {
+ rc = PTR_ERR(loghandle->lgh_file);
+ CERROR("error opening logfile "LPX64":%x: rc %d\n",
+ fid.id, fid.generation, rc);
+ GOTO(out, rc);
+ }
+ memcpy(&loghandle->lgh_cookie, logcookie, sizeof(*logcookie));
+ loghandle->lgh_log_create = mds_log_create;
+ loghandle->lgh_log_open = mds_log_open;
+ loghandle->lgh_log_close = mds_log_close;
+ loghandle->lgh_obd = obd;
+
+ RETURN(loghandle);
+
+out_put:
+ l_dput(dchild);
+out:
+ llog_free_handle(loghandle);
+ return ERR_PTR(rc);
+}
+
+
+
+struct llog_handle *mds_get_catalog(struct obd_device *obd)
+{
+ struct mds_server_data *msd = obd->u.mds.mds_server_data;
+ struct obd_run_ctxt saved;
+ struct llog_handle *cathandle = NULL;
+ int rc = 0;
+ ENTRY;
+
+ push_ctxt(&saved, &obd->u.mds.mds_ctxt, NULL);
+
+ if (msd->msd_catalog_oid) {
+ struct llog_cookie catcookie;
+
+ catcookie.lgc_lgl.lgl_oid = le64_to_cpu(msd->msd_catalog_oid);
+ catcookie.lgc_lgl.lgl_ogen = le32_to_cpu(msd->msd_catalog_ogen);
+ cathandle = mds_log_open(obd, &catcookie);
+ if (IS_ERR(cathandle)) {
+ CERROR("error opening catalog "LPX64":%x: rc %d\n",
+ catcookie.lgc_lgl.lgl_oid,
+ catcookie.lgc_lgl.lgl_ogen,
+ (int)PTR_ERR(cathandle));
+ msd->msd_catalog_oid = 0;
+ msd->msd_catalog_ogen = 0;
+ }
+ /* ORPHANS FIXME: compare catalog UUID to msd_peeruuid */
+ }
+
+ if (!msd->msd_catalog_oid) {
+ struct llog_logid *lgl;
+
+ cathandle = mds_log_create(obd, "LOGS/catalog");
+ if (IS_ERR(cathandle)) {
+ CERROR("error creating new catalog: rc %d\n",
+ (int)PTR_ERR(cathandle));
+ GOTO(out, cathandle);
+ }
+ lgl = &cathandle->lgh_cookie.lgc_lgl;
+ msd->msd_catalog_oid = cpu_to_le64(lgl->lgl_oid);
+ msd->msd_catalog_ogen = cpu_to_le32(lgl->lgl_ogen);
+ rc = mds_update_server_data(obd, 1);
+ if (rc) {
+ CERROR("error writing new catalog to disk: rc %d\n",rc);
+ GOTO(out_handle, rc);
+ }
+ }
+
+ rc = llog_init_catalog(cathandle, &obd->u.mds.mds_lov_name);
+
+out:
+ pop_ctxt(&saved, &obd->u.mds.mds_ctxt, NULL);
+ RETURN(cathandle);
+
+out_handle:
+ mds_log_close(cathandle, cathandle);
+ cathandle = ERR_PTR(rc);
+ goto out;
+
+}
+
+static struct llog_handle *filter_log_create(struct obd_device *obd);
+
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+static int filter_log_close(struct llog_handle *cathandle,
+ struct llog_handle *loghandle)
+{
+ struct llog_object_hdr *llh = loghandle->lgh_hdr;
+ struct file *file = loghandle->lgh_file;
+ struct dentry *dparent = NULL, *dchild = NULL;
+ struct lustre_handle parent_lockh;
+ struct llog_logid *lgl = &loghandle->lgh_cookie.lgc_lgl;
+ int rc;
+ ENTRY;
+
+ /* If we are going to delete this log, grab a ref before we close
+ * it so we don't have to immediately do another lookup. */
+ if (llh->llh_hdr.lth_type != LLOG_CATALOG_MAGIC && llh->llh_count == 0){
+ CDEBUG(D_INODE, "deleting log file "LPX64":%x\n",
+ lgl->lgl_oid, lgl->lgl_ogen);
+ dparent = filter_parent_lock(loghandle->lgh_obd, S_IFREG,
+ lgl->lgl_oid,LCK_PW,&parent_lockh);
+ if (IS_ERR(dparent)) {
+ rc = PTR_ERR(dparent);
+ CERROR("error locking parent, orphan log %*s: rc %d\n",
+ file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name, rc);
+ RETURN(rc);
+ } else {
+ dchild = dget(file->f_dentry);
+ llog_delete_log(cathandle, loghandle);
+ }
+ } else {
+ CDEBUG(D_INODE, "closing log file "LPX64":%x\n",
+ lgl->lgl_oid, lgl->lgl_ogen);
+ }
+
+ rc = filp_close(file, 0);
+
+ llog_free_handle(loghandle); /* also removes loghandle from list */
+
+ if (dchild != NULL) {
+ int err = vfs_unlink(dparent->d_inode, dchild);
+ if (err) {
+ CERROR("error unlinking empty log %*s: rc %d\n",
+ dchild->d_name.len, dchild->d_name.name, err);
+ if (!rc)
+ rc = err;
+ }
+ f_dput(dchild);
+ ldlm_lock_decref(&parent_lockh, LCK_PW);
+ }
+ RETURN(rc);
+}
+
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+static struct llog_handle *filter_log_open(struct obd_device *obd,
+ struct llog_cookie *logcookie)
+{
+ struct llog_logid *lgl = &logcookie->lgc_lgl;
+ struct llog_handle *loghandle;
+ struct dentry *dchild;
+ int rc;
+ ENTRY;
+
+ loghandle = llog_alloc_handle();
+ if (!loghandle)
+ RETURN(ERR_PTR(-ENOMEM));
+
+ dchild = filter_fid2dentry(obd, NULL, S_IFREG, lgl->lgl_oid);
+ if (IS_ERR(dchild))
+ GOTO(out_handle, rc = PTR_ERR(dchild));
+
+ if (dchild->d_inode == NULL) {
+ CERROR("logcookie references non-existent object %*s\n",
+ dchild->d_name.len, dchild->d_name.name);
+ GOTO(out_dentry, rc = -ENOENT);
+ }
+
+ if (dchild->d_inode->i_generation != lgl->lgl_ogen) {
+ CERROR("logcookie for %*s had different generation %x != %x\n",
+ dchild->d_name.len, dchild->d_name.name,
+ dchild->d_inode->i_generation, lgl->lgl_ogen);
+ GOTO(out_dentry, rc = -ESTALE);
+ }
+
+ /* dentry_open does a dput(dchild) and mntput(mnt) on error */
+ mntget(obd->u.filter.fo_vfsmnt);
+ loghandle->lgh_file = dentry_open(dchild, obd->u.filter.fo_vfsmnt,
+ O_RDWR);
+ if (IS_ERR(loghandle->lgh_file)) {
+ rc = PTR_ERR(loghandle->lgh_file);
+ CERROR("error opening logfile %*s: rc %d\n",
+ dchild->d_name.len, dchild->d_name.name, rc);
+ GOTO(out_dentry, rc);
+ }
+ memcpy(&loghandle->lgh_cookie, logcookie, sizeof(*logcookie));
+ RETURN(loghandle);
+
+out_dentry:
+ f_dput(dchild);
+out_handle:
+ llog_free_handle(loghandle);
+ RETURN(ERR_PTR(rc));
+}
+
+#endif
+
+
+struct llog_operations llog_lvfs_ops = {
+ lop_write_rec: llog_lvfs_write_rec,
+ lop_next_block: llog_lvfs_next_block,
+ lop_read_header: llog_lvfs_read_header,
+ lop_create: llog_lvfs_create,
+ lop_destroy: llog_lvfs_destroy,
+ lop_close: llog_lvfs_close,
+ // lop_cancel: llog_lvfs_cancel,
+};
+
+EXPORT_SYMBOL(llog_lvfs_ops);
* If the append happened first, we should have truncated the file down.
*
* We pick the CHUNK_SIZE_MAX and APPEND_SIZE_MAX so that we cross a stripe.
+ *
+ * compile: mpicc -g -Wall -o write_append_truncate write_append_truncate.c
+ * run: mpirun -np 2 -machlist <hostlist file> write_append_truncate <file>
+ * or: pdsh -w <two hosts> write_append_truncate <file>
+ * or: prun -n 2 [-N 2] write_append_truncate <file>
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include "mpi.h"
+#define DEFAULT_ITER 50000
#define CHUNK_SIZE_MAX 123456
#define CHUNK_CHAR 'C'
void usage(char *prog)
{
printf("usage: %s <filename> [nloops]\n", prog);
- printf("%s must be run on 2 nodes\n", prog);
+ printf("%s must be run with 2 processes\n", prog);
+ MPI_Finalize();
exit(1);
}
printf(fmt, ap);
+ MPI_Finalize();
exit(1);
}
int main(int argc, char *argv[])
{
int n, nloops = 0, fd;
- int rank, ret;
+ int rank, size, ret;
int chunk_size, append_size, trunc_offset;
char append_buf[APPEND_SIZE_MAX];
char chunk_buf[CHUNK_SIZE_MAX];
if (error != MPI_SUCCESS)
rprintf(-1, -1, "MPI_Comm_rank failed: %d\n", error);
- error = MPI_Comm_size(MPI_COMM_WORLD, &n);
+ error = MPI_Comm_size(MPI_COMM_WORLD, &size);
if (error != MPI_SUCCESS)
rprintf(rank, -1, "MPI_Comm_size failed: %d\n", error);
- if (n != 2)
- rprintf(rank, -1, "%s: must run with 2 processes, not %d\n",
- prog, n);
+ if (size < 2)
+ rprintf(rank, -1, "%s: must run with at least 2 processes\n",
+ prog);
memset(append_buf, APPEND_CHAR, APPEND_SIZE_MAX);
memset(chunk_buf, CHUNK_CHAR, CHUNK_SIZE_MAX);
if (argc == 3)
nloops = strtoul(argv[2], NULL, 0);
if (nloops == 0)
- nloops = 100000;
+ nloops = DEFAULT_ITER;
if (rank == 0) {
fd = open(fname, O_WRONLY|O_CREAT|O_TRUNC, 0666);
trunc_offset = chunk_size + rand()%append_size;
if (rank == 0) {
if (n % 1000 == 0)
- printf("loop %5d: chunk %6d/%#06x, "
- "append %6d/%#06x, trunc @ %6d/%#06x\n",
+ printf("loop %5d: chunk %6d/%#07x, "
+ "append %6d/%#07x, trunc @ %6d/%#07x\n",
n, chunk_size, chunk_size, append_size,
append_size, trunc_offset, trunc_offset);
rprintf(rank, n, "start MPI_Barrier: %d\n",error);
/* Do the race */
- if (rank == n % 2) {
+ if (rank == n % size) {
//
done = 0;
do {
}
done += ret;
} while (done != append_size);
- } else if (rank == 1 - n % 2) {
+ } else if (rank == (n + 1) % size) {
ret = truncate(fname, (off_t)trunc_offset);
if (ret != 0)
rprintf(rank, n, "truncate @ %u: %s\n",
/* Check case 2: first truncate then append */
if (memcmp(read_buf+chunk_size, trunc_buf,
trunc_offset-chunk_size)) {
- printf("loop %d: append-after-TRUNC"
- " bad [%d-%d]/[%#x-%#x] != 0\n",
+ printf("loop %d: append-after-TRUNC bad"
+ " [%d-%d]/[%#x-%#x] != 0\n",
n, chunk_size, trunc_offset - 1,
chunk_size, trunc_offset - 1);
error = 1;
} else if (memcmp(read_buf+trunc_offset,
append_buf, append_size)) {
- printf("loop %d: APPEND-after-trunc"
- " bad [%d-%d]/[%#x-%#x] != %c\n",
+ printf("loop %d: APPEND-after-trunc bad"
+ " [%d-%d]/[%#x-%#x] != %c\n",
n, trunc_offset, append_size - 1,
trunc_offset, append_size - 1,
APPEND_CHAR);
if (rank == 0) {
char command[4096];
- printf("loop %5d: chunk %6d/%#06x, "
- "append %6d/%#06x, trunc @ %6d/%#06x\n",
+ printf("loop %5d: chunk %6d/%#07x, "
+ "append %6d/%#07x, trunc @ %6d/%#07x\n",
n, chunk_size, chunk_size, append_size,
append_size, trunc_offset, trunc_offset);
}
}
- printf("Finished after %d loops\n", n);
+ printf("rank %d, loop %d: finished\n", rank, n);
+ close(fd);
+
+ if (rank == 0) {
+ error = unlink(fname);
+ if (error < 0)
+ rprintf("unlink %s failed: %s\n",fname,strerror(errno));
+ }
MPI_Finalize();
return 0;