Whamcloud - gitweb
- remainder of rmdir changes
[fs/lustre-release.git] / lustre / patches / patch-2.4.9-chaos14
1 diff -ru ../kernel-2.4.9/drivers/block/blkpg.c ../kernel-2.4.9-lustre/drivers/block/blkpg.c
2 --- ../kernel-2.4.9/drivers/block/blkpg.c       Wed Oct 31 17:50:05 2001
3 +++ ../kernel-2.4.9-lustre/drivers/block/blkpg.c        Mon May 13 14:35:35 2002
4 @@ -326,7 +326,43 @@
5  
6  EXPORT_SYMBOL(blk_ioctl);
7  
8 - /*********************
9 +#define NUM_DEV_NO_WRITE 16
10 +static int dev_no_write[NUM_DEV_NO_WRITE];
11 +
12 +/*
13 + * Debug code for turning block devices "read-only" (will discard writes
14 + * silently).  This is for filesystem crash/recovery testing.
15 + */
16 +void dev_set_rdonly(kdev_t dev, int no_write)
17 +{
18 +       if (dev) {
19 +               printk(KERN_WARNING "Turning device %s read-only\n", 
20 +                      bdevname(dev));
21 +               dev_no_write[no_write] = 0xdead0000 + dev;
22 +       }
23 +}
24 +
25 +int dev_check_rdonly(kdev_t dev) {
26 +       int i;
27 +
28 +       for (i = 0; i < NUM_DEV_NO_WRITE; i++) {
29 +               if ((dev_no_write[i] & 0xffff0000) == 0xdead0000 &&
30 +                   dev == (dev_no_write[i] & 0xffff))
31 +                       return 1;
32 +       }
33 +       return 0;
34 +}
35 +
36 +void dev_clear_rdonly(int no_write) {
37 +       dev_no_write[no_write] = 0;
38 +}
39 +
40 +EXPORT_SYMBOL(dev_set_rdonly);
41 +EXPORT_SYMBOL(dev_check_rdonly);
42 +EXPORT_SYMBOL(dev_clear_rdonly);
43 +
44 +
45 +/*********************
46    * get_last_sector()
47    *  
48    * Description: This function will read any inaccessible blocks at the end
49 diff -ru ../kernel-2.4.9/drivers/block/loop.c ../kernel-2.4.9-lustre/drivers/block/loop.c
50 --- ../kernel-2.4.9/drivers/block/loop.c        Wed Oct 31 17:50:05 2001
51 +++ ../kernel-2.4.9-lustre/drivers/block/loop.c Mon May 13 14:23:05 2002
52 @@ -482,6 +482,11 @@
53         spin_unlock_irq(&lo->lo_lock);
54  
55         if (rw == WRITE) {
56 +#ifdef CONFIG_DEV_RDONLY
57 +               if (dev_check_rdonly(rbh->b_rdev))
58 +                       goto err;
59 +#endif
60 +
61                 if (lo->lo_flags & LO_FLAGS_READ_ONLY)
62                         goto err;
63         } else if (rw == READA) {
64 diff -ru ../kernel-2.4.9/drivers/ide/ide-disk.c ../kernel-2.4.9-lustre/drivers/ide/ide-disk.c
65 --- ../kernel-2.4.9/drivers/ide/ide-disk.c      Wed Oct 31 17:50:21 2001
66 +++ ../kernel-2.4.9-lustre/drivers/ide/ide-disk.c       Mon May 13 14:23:05 2002
67 @@ -374,6 +374,12 @@
68   */
69  static ide_startstop_t do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block)
70  {
71 +#ifdef CONFIG_DEV_RDONLY
72 +       if (rq->cmd == WRITE && dev_check_rdonly(rq->rq_dev)) {
73 +               ide_end_request(1, HWGROUP(drive));
74 +               return ide_stopped;
75 +       }
76 +#endif
77         if (IDE_CONTROL_REG)
78                 OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
79         OUT_BYTE(0x00, IDE_FEATURE_REG);
80 diff -ru ../kernel-2.4.9/fs/jbd/commit.c ../kernel-2.4.9-lustre/fs/jbd/commit.c
81 --- ../kernel-2.4.9/fs/jbd/commit.c     Wed Oct 31 17:51:37 2001
82 +++ ../kernel-2.4.9-lustre/fs/jbd/commit.c      Mon May 13 14:23:05 2002
83 @@ -462,7 +462,7 @@
84             transaction's t_log_list queue, and metadata buffers are on
85             the t_iobuf_list queue.
86  
87 -          Wait for the transactions in reverse order.  That way we are
88 +          Wait for the buffers in reverse order.  That way we are
89            less likely to be woken up until all IOs have completed, and
90            so we incur less scheduling load.
91         */
92 @@ -566,8 +566,10 @@
93  
94         jbd_debug(3, "JBD: commit phase 6\n");
95  
96 -       if (is_journal_aborted(journal))
97 +       if (is_journal_aborted(journal)) {
98 +               unlock_journal(journal);
99                 goto skip_commit;
100 +       }
101  
102         /* Done it all: now write the commit record.  We should have
103          * cleaned up our previous buffers by now, so if we are in abort
104 @@ -577,9 +579,10 @@
105         descriptor = journal_get_descriptor_buffer(journal);
106         if (!descriptor) {
107                 __journal_abort_hard(journal);
108 +               unlock_journal(journal);
109                 goto skip_commit;
110         }
111 -       
112 +
113         /* AKPM: buglet - add `i' to tmp! */
114         for (i = 0; i < jh2bh(descriptor)->b_size; i += 512) {
115                 journal_header_t *tmp =
116 @@ -600,7 +603,6 @@
117                 put_bh(bh);             /* One for getblk() */
118                 journal_unlock_journal_head(descriptor);
119         }
120 -       lock_journal(journal);
121  
122         /* End of a transaction!  Finally, we can do checkpoint
123             processing: any buffers committed as a result of this
124 @@ -609,6 +611,25 @@
125  
126  skip_commit:
127  
128 +       /* Call any callbacks that had been registered for handles in this
129 +        * transaction.  It is up to the callback to free any allocated
130 +        * memory.
131 +        */
132 +       if (!list_empty(&commit_transaction->t_jcb)) {
133 +               struct list_head *p, *n;
134 +               int error = is_journal_aborted(journal);
135 +
136 +               list_for_each_safe(p, n, &commit_transaction->t_jcb) {
137 +                       struct journal_callback *jcb;
138 +
139 +                       jcb = list_entry(p, struct journal_callback, jcb_list);
140 +                       list_del(p);
141 +                       jcb->jcb_func(jcb, error);
142 +               }
143 +       }
144 +
145 +       lock_journal(journal);
146 +
147         jbd_debug(3, "JBD: commit phase 7\n");
148  
149         J_ASSERT(commit_transaction->t_sync_datalist == NULL);
150 diff -ru ../kernel-2.4.9/fs/jbd/journal.c ../kernel-2.4.9-lustre/fs/jbd/journal.c
151 --- ../kernel-2.4.9/fs/jbd/journal.c    Wed Oct 31 17:51:37 2001
152 +++ ../kernel-2.4.9-lustre/fs/jbd/journal.c     Mon May 13 14:23:05 2002
153 @@ -56,6 +56,7 @@
154  #endif
155  EXPORT_SYMBOL(journal_flush);
156  EXPORT_SYMBOL(journal_revoke);
157 +EXPORT_SYMBOL(journal_callback_set);
158  
159  EXPORT_SYMBOL(journal_init_dev);
160  EXPORT_SYMBOL(journal_init_inode);
161 diff -ru ../kernel-2.4.9/fs/jbd/transaction.c ../kernel-2.4.9-lustre/fs/jbd/transaction.c
162 --- ../kernel-2.4.9/fs/jbd/transaction.c        Sat Jan 26 01:42:21 2002
163 +++ ../kernel-2.4.9-lustre/fs/jbd/transaction.c Mon May 13 14:23:05 2002
164 @@ -59,6 +59,7 @@
165         transaction->t_state = T_RUNNING;
166         transaction->t_tid = journal->j_transaction_sequence++;
167         transaction->t_expires = jiffies + journal->j_commit_interval;
168 +       INIT_LIST_HEAD(&transaction->t_jcb);
169  
170         /* Set up the commit timer for the new transaction. */
171         J_ASSERT (!journal->j_commit_timer_active);
172 @@ -202,6 +203,20 @@
173         return 0;
174  }
175  
176 +/* Allocate a new handle.  This should probably be in a slab... */
177 +static handle_t *get_handle(int nblocks)
178 +{
179 +       handle_t *handle = jbd_kmalloc(sizeof (handle_t), GFP_NOFS);
180 +       if (!handle)
181 +               return NULL;
182 +       memset(handle, 0, sizeof (handle_t));
183 +       handle->h_buffer_credits = nblocks;
184 +       handle->h_ref = 1;
185 +       INIT_LIST_HEAD(&handle->h_jcb);
186 +
187 +       return handle;
188 +}
189 +
190  /*
191   * Obtain a new handle.  
192   *
193 @@ -228,14 +243,11 @@
194                 handle->h_ref++;
195                 return handle;
196         }
197 -       
198 -       handle = jbd_kmalloc(sizeof (handle_t), GFP_NOFS);
199 +
200 +       handle = get_handle(nblocks);
201         if (!handle)
202                 return ERR_PTR(-ENOMEM);
203 -       memset (handle, 0, sizeof (handle_t));
204  
205 -       handle->h_buffer_credits = nblocks;
206 -       handle->h_ref = 1;
207         current->journal_info = handle;
208  
209         err = start_this_handle(journal, handle);
210 @@ -334,14 +346,11 @@
211         
212         if (is_journal_aborted(journal))
213                 return ERR_PTR(-EIO);
214 -       
215 -       handle = jbd_kmalloc(sizeof (handle_t), GFP_NOFS);
216 +
217 +       handle = get_handle(nblocks);
218         if (!handle)
219                 return ERR_PTR(-ENOMEM);
220 -       memset (handle, 0, sizeof (handle_t));
221  
222 -       handle->h_buffer_credits = nblocks;
223 -       handle->h_ref = 1;
224         current->journal_info = handle;
225  
226         err = try_start_this_handle(journal, handle);
227 @@ -1328,6 +1337,29 @@
228  #endif
229  
230  /*
231 + * Register a callback function for this handle.  The function will be
232 + * called when the transaction that this handle is part of has been
233 + * committed to disk with the original callback data struct and the
234 + * error status of the journal as parameters.  There is no guarantee of
235 + * ordering between handles within a single transaction, nor between
236 + * callbacks registered on the same handle.
237 + *
238 + * The caller is responsible for allocating the journal_callback struct.
239 + * This is to allow the caller to add as much extra data to the callback
240 + * as needed, but reduce the overhead of multiple allocations.  The caller
241 + * allocated struct must start with a struct journal_callback at offset 0,
242 + * and has the caller-specific data afterwards.
243 + */
244 +void journal_callback_set(handle_t *handle, void (*func)(void *, int),
245 +                         void *cb_data)
246 +{
247 +       struct journal_callback *jcb = cb_data;
248 +
249 +       list_add(&jcb->jcb_list, &handle->h_jcb);
250 +       jcb->jcb_func = func;
251 +}
252 +
253 +/*
254   * All done for a particular handle.
255   *
256   * There is not much action needed here.  We just return any remaining
257 @@ -1409,7 +1439,10 @@
258                         wake_up(&journal->j_wait_transaction_locked);
259         }
260  
261 -       /* 
262 +       /* Move callbacks from the handle to the transaction. */
263 +       list_splice(&handle->h_jcb, &transaction->t_jcb);
264 +
265 +       /*
266          * If the handle is marked SYNC, we need to set another commit
267          * going!  We also want to force a commit if the current
268          * transaction is occupying too much of the log, or if the
269 diff -ru ../kernel-2.4.9/include/linux/blkdev.h ../kernel-2.4.9-lustre/include/linux/blkdev.h
270 --- ../kernel-2.4.9/include/linux/blkdev.h      Thu May  9 12:59:13 2002
271 +++ ../kernel-2.4.9-lustre/include/linux/blkdev.h       Mon May 13 14:23:05 2002
272 @@ -257,5 +257,9 @@
273  #define blk_started_io(nsects)                         \
274         atomic_add(nsects, &queued_sectors);
275  
276 +#define CONFIG_DEV_RDONLY
277 +void dev_set_rdonly(kdev_t, int);
278 +int dev_check_rdonly(kdev_t);
279 +void dev_clear_rdonly(int);
280  #endif
281  
282 diff -ru ../kernel-2.4.9/include/linux/jbd.h ../kernel-2.4.9-lustre/include/linux/jbd.h
283 --- ../kernel-2.4.9/include/linux/jbd.h Thu May  9 12:59:02 2002
284 +++ ../kernel-2.4.9-lustre/include/linux/jbd.h  Mon May 13 14:23:05 2002
285 @@ -251,6 +251,13 @@
286         return bh->b_private;
287  }
288  
289 +#define HAVE_JOURNAL_CALLBACK_STATUS
290 +struct journal_callback {
291 +       struct list_head jcb_list;
292 +       void (*jcb_func)(void *cb_data, int error);
293 +       /* user data goes here */
294 +};
295 +
296  struct jbd_revoke_table_s;
297  
298  /* The handle_t type represents a single atomic update being performed
299 @@ -281,6 +288,12 @@
300            operations */
301         int                     h_err;
302  
303 +       /* List of application registered callbacks for this handle.
304 +        * The function(s) will be called after the transaction that
305 +        * this handle is part of has been committed to disk.
306 +        */
307 +       struct list_head        h_jcb;
308 +
309         /* Flags */
310         unsigned int    h_sync:         1;      /* sync-on-close */
311         unsigned int    h_jdata:        1;      /* force data journaling */
312 @@ -400,6 +413,10 @@
313  
314         /* How many handles used this transaction? */
315         int t_handle_count;
316 +
317 +       /* List of registered callback functions for this transaction.
318 +        * Called when the transaction is committed. */
319 +       struct list_head        t_jcb;
320  };
321  
322  
323 @@ -647,6 +664,8 @@
324  extern int      journal_try_to_free_buffers(journal_t *, struct page *, int);
325  extern int      journal_stop(handle_t *);
326  extern int      journal_flush (journal_t *);
327 +extern void     journal_callback_set(handle_t *handle, void (*func)(void *),
328 +                                     void *cb_data);
329  
330  extern void     journal_lock_updates (journal_t *);
331  extern void     journal_unlock_updates (journal_t *);