--- /dev/null
+diff -ru ../kernel-2.4.9/drivers/block/blkpg.c ../kernel-2.4.9-lustre/drivers/block/blkpg.c
+--- ../kernel-2.4.9/drivers/block/blkpg.c Wed Oct 31 17:50:05 2001
++++ ../kernel-2.4.9-lustre/drivers/block/blkpg.c Mon May 13 14:35:35 2002
+@@ -326,7 +326,43 @@
+
+ EXPORT_SYMBOL(blk_ioctl);
+
+- /*********************
++#define NUM_DEV_NO_WRITE 16
++static int dev_no_write[NUM_DEV_NO_WRITE];
++
++/*
++ * Debug code for turning block devices "read-only" (will discard writes
++ * silently). This is for filesystem crash/recovery testing.
++ */
++void dev_set_rdonly(kdev_t dev, int no_write)
++{
++ if (dev) {
++ printk(KERN_WARNING "Turning device %s read-only\n",
++ bdevname(dev));
++ dev_no_write[no_write] = 0xdead0000 + dev;
++ }
++}
++
++int dev_check_rdonly(kdev_t dev) {
++ int i;
++
++ for (i = 0; i < NUM_DEV_NO_WRITE; i++) {
++ if ((dev_no_write[i] & 0xffff0000) == 0xdead0000 &&
++ dev == (dev_no_write[i] & 0xffff))
++ return 1;
++ }
++ return 0;
++}
++
++void dev_clear_rdonly(int no_write) {
++ dev_no_write[no_write] = 0;
++}
++
++EXPORT_SYMBOL(dev_set_rdonly);
++EXPORT_SYMBOL(dev_check_rdonly);
++EXPORT_SYMBOL(dev_clear_rdonly);
++
++
++/*********************
+ * get_last_sector()
+ *
+ * Description: This function will read any inaccessible blocks at the end
+diff -ru ../kernel-2.4.9/drivers/block/loop.c ../kernel-2.4.9-lustre/drivers/block/loop.c
+--- ../kernel-2.4.9/drivers/block/loop.c Wed Oct 31 17:50:05 2001
++++ ../kernel-2.4.9-lustre/drivers/block/loop.c Mon May 13 14:23:05 2002
+@@ -482,6 +482,11 @@
+ spin_unlock_irq(&lo->lo_lock);
+
+ if (rw == WRITE) {
++#ifdef CONFIG_DEV_RDONLY
++ if (dev_check_rdonly(rbh->b_rdev))
++ goto err;
++#endif
++
+ if (lo->lo_flags & LO_FLAGS_READ_ONLY)
+ goto err;
+ } else if (rw == READA) {
+diff -ru ../kernel-2.4.9/drivers/ide/ide-disk.c ../kernel-2.4.9-lustre/drivers/ide/ide-disk.c
+--- ../kernel-2.4.9/drivers/ide/ide-disk.c Wed Oct 31 17:50:21 2001
++++ ../kernel-2.4.9-lustre/drivers/ide/ide-disk.c Mon May 13 14:23:05 2002
+@@ -374,6 +374,12 @@
+ */
+ static ide_startstop_t do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block)
+ {
++#ifdef CONFIG_DEV_RDONLY
++ if (rq->cmd == WRITE && dev_check_rdonly(rq->rq_dev)) {
++ ide_end_request(1, HWGROUP(drive));
++ return ide_stopped;
++ }
++#endif
+ if (IDE_CONTROL_REG)
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(0x00, IDE_FEATURE_REG);
+diff -ru ../kernel-2.4.9/fs/jbd/commit.c ../kernel-2.4.9-lustre/fs/jbd/commit.c
+--- ../kernel-2.4.9/fs/jbd/commit.c Wed Oct 31 17:51:37 2001
++++ ../kernel-2.4.9-lustre/fs/jbd/commit.c Mon May 13 14:23:05 2002
+@@ -462,7 +462,7 @@
+ transaction's t_log_list queue, and metadata buffers are on
+ the t_iobuf_list queue.
+
+- Wait for the transactions in reverse order. That way we are
++ Wait for the buffers in reverse order. That way we are
+ less likely to be woken up until all IOs have completed, and
+ so we incur less scheduling load.
+ */
+@@ -583,6 +583,21 @@
+ journal_unlock_journal_head(descriptor);
+ }
+ lock_journal(journal);
++
++ /* Call any callbacks that had been registered for handles in this
++ * transaction. It is up to the callback to free any allocated
++ * memory.
++ */
++ {
++ struct list_head *p, *n;
++ list_for_each_safe(p, n, &commit_transaction->t_jcb) {
++ struct journal_callback *jcb;
++
++ jcb = list_entry(p, struct journal_callback, jcb_list);
++ list_del(p);
++ jcb->jcb_func(jcb);
++ }
++ }
+
+ /* End of a transaction! Finally, we can do checkpoint
+ processing: any buffers committed as a result of this
+diff -ru ../kernel-2.4.9/fs/jbd/journal.c ../kernel-2.4.9-lustre/fs/jbd/journal.c
+--- ../kernel-2.4.9/fs/jbd/journal.c Wed Oct 31 17:51:37 2001
++++ ../kernel-2.4.9-lustre/fs/jbd/journal.c Mon May 13 14:23:05 2002
+@@ -56,6 +56,7 @@
+ #endif
+ EXPORT_SYMBOL(journal_flush);
+ EXPORT_SYMBOL(journal_revoke);
++EXPORT_SYMBOL(journal_callback_set);
+
+ EXPORT_SYMBOL(journal_init_dev);
+ EXPORT_SYMBOL(journal_init_inode);
+diff -ru ../kernel-2.4.9/fs/jbd/transaction.c ../kernel-2.4.9-lustre/fs/jbd/transaction.c
+--- ../kernel-2.4.9/fs/jbd/transaction.c Sat Jan 26 01:42:21 2002
++++ ../kernel-2.4.9-lustre/fs/jbd/transaction.c Mon May 13 14:23:05 2002
+@@ -59,6 +59,7 @@
+ transaction->t_state = T_RUNNING;
+ transaction->t_tid = journal->j_transaction_sequence++;
+ transaction->t_expires = jiffies + journal->j_commit_interval;
++ INIT_LIST_HEAD(&transaction->t_jcb);
+
+ /* Set up the commit timer for the new transaction. */
+ J_ASSERT (!journal->j_commit_timer_active);
+@@ -202,6 +203,20 @@
+ return 0;
+ }
+
++/* Allocate a new handle. This should probably be in a slab... */
++static handle_t *get_handle(int nblocks)
++{
++ handle_t *handle = jbd_kmalloc(sizeof (handle_t), GFP_NOFS);
++ if (!handle)
++ return NULL;
++ memset(handle, 0, sizeof (handle_t));
++ handle->h_buffer_credits = nblocks;
++ handle->h_ref = 1;
++ INIT_LIST_HEAD(&handle->h_jcb);
++
++ return handle;
++}
++
+ /*
+ * Obtain a new handle.
+ *
+@@ -228,14 +243,11 @@
+ handle->h_ref++;
+ return handle;
+ }
+-
+- handle = jbd_kmalloc(sizeof (handle_t), GFP_NOFS);
++
++ handle = get_handle(nblocks);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+- memset (handle, 0, sizeof (handle_t));
+
+- handle->h_buffer_credits = nblocks;
+- handle->h_ref = 1;
+ current->journal_info = handle;
+
+ err = start_this_handle(journal, handle);
+@@ -334,14 +346,11 @@
+
+ if (is_journal_aborted(journal))
+ return ERR_PTR(-EIO);
+-
+- handle = jbd_kmalloc(sizeof (handle_t), GFP_NOFS);
++
++ handle = get_handle(nblocks);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+- memset (handle, 0, sizeof (handle_t));
+
+- handle->h_buffer_credits = nblocks;
+- handle->h_ref = 1;
+ current->journal_info = handle;
+
+ err = try_start_this_handle(journal, handle);
+@@ -1344,6 +1353,27 @@
+ #endif
+
+ /*
++ * Register a callback function for this handle. The function will be
++ * called when the transaction that this handle is part of has been
++ * committed to disk. There is no guarantee of ordering between handles
++ * within a single transaction, nor between callbacks registered on the
++ * same handle.
++ *
++ * The caller is responsible for allocating the journal_callback struct.
++ * This is to allow the caller to add as much extra data to the callback
++ * as needed, but reduce the overhead of multiple allocations. The caller
++ * should use the journal_callback_size() function to reserve enough space
++ * for the JBD-private part of the callback struct.
++ */
++void journal_callback_set(handle_t *handle, void (*func)(void *), void *cb_data)
++{
++ struct journal_callback *jcb = cb_data;
++
++ list_add(&jcb->jcb_list, &handle->h_jcb);
++ jcb->jcb_func = func;
++}
++
++/*
+ * All done for a particular handle.
+ *
+ * There is not much action needed here. We just return any remaining
+@@ -1409,7 +1439,11 @@
+ wake_up(&journal->j_wait_transaction_locked);
+ }
+
+- /*
++ /* Move callbacks from the handle to the transaction. */
++ list_splice(&handle->h_jcb, &transaction->t_jcb);
++ INIT_LIST_HEAD(&handle->h_jcb);
++
++ /*
+ * If the handle is marked SYNC, we need to set another commit
+ * going! We also want to force a commit if the current
+ * transaction is occupying too much of the log, or if the
+diff -ru ../kernel-2.4.9/include/linux/blkdev.h ../kernel-2.4.9-lustre/include/linux/blkdev.h
+--- ../kernel-2.4.9/include/linux/blkdev.h Thu May 9 12:59:13 2002
++++ ../kernel-2.4.9-lustre/include/linux/blkdev.h Mon May 13 14:23:05 2002
+@@ -257,5 +257,9 @@
+ #define blk_started_io(nsects) \
+ atomic_add(nsects, &queued_sectors);
+
++#define CONFIG_DEV_RDONLY
++void dev_set_rdonly(kdev_t, int);
++int dev_check_rdonly(kdev_t);
++void dev_clear_rdonly(int);
+ #endif
+
+diff -ru ../kernel-2.4.9/include/linux/jbd.h ../kernel-2.4.9-lustre/include/linux/jbd.h
+--- ../kernel-2.4.9/include/linux/jbd.h Thu May 9 12:59:02 2002
++++ ../kernel-2.4.9-lustre/include/linux/jbd.h Mon May 13 14:23:05 2002
+@@ -251,6 +251,13 @@
+ return bh->b_private;
+ }
+
++#define HAVE_JOURNAL_CALLBACK
++struct journal_callback {
++ struct list_head jcb_list;
++ void (*jcb_func)(void *);
++ /* user data goes here */
++};
++
+ struct jbd_revoke_table_s;
+
+ /* The handle_t type represents a single atomic update being performed
+@@ -281,6 +288,12 @@
+ operations */
+ int h_err;
+
++ /* List of application registered callbacks for this handle.
++ * The function(s) will be called after the transaction that
++ * this handle is part of has been committed to disk.
++ */
++ struct list_head h_jcb;
++
+ /* Flags */
+ unsigned int h_sync: 1; /* sync-on-close */
+ unsigned int h_jdata: 1; /* force data journaling */
+@@ -400,6 +413,10 @@
+
+ /* How many handles used this transaction? */
+ int t_handle_count;
++
++ /* List of registered callback functions for this transaction.
++ * Called when the transaction is committed. */
++ struct list_head t_jcb;
+ };
+
+
+@@ -549,7 +566,7 @@
+ #define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
+ #define JFS_LOADED 0x010 /* The journal superblock has been loaded */
+
+-/*
++/*
+ * Function declarations for the journaling transaction and buffer
+ * management
+ */
+@@ -647,6 +664,8 @@
+ extern int journal_try_to_free_buffers(journal_t *, struct page *, int);
+ extern int journal_stop(handle_t *);
+ extern int journal_flush (journal_t *);
++extern void journal_callback_set(handle_t *handle, void (*func)(void *),
++ void *cb_data);
+
+ extern void journal_lock_updates (journal_t *);
+ extern void journal_unlock_updates (journal_t *);