Whamcloud - gitweb
b=20298 (Merge head ldiskfs and b1_8 ldiskfs)
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext4-mmp-sles11.patch
1 Index: linux-2.6.27.21-0.1/fs/ext4/super.c
2 ===================================================================
3 --- linux-2.6.27.21-0.1.orig/fs/ext4/super.c    2009-07-07 14:36:58.000000000 +0530
4 +++ linux-2.6.27.21-0.1/fs/ext4/super.c 2009-07-07 14:38:06.000000000 +0530
5 @@ -39,6 +39,8 @@
6  #include <linux/log2.h>
7  #include <linux/crc16.h>
8  #include <asm/uaccess.h>
9 +#include <linux/kthread.h>
10 +#include <linux/utsname.h>
11  
12  #include "ext4.h"
13  #include "ext4_jbd2.h"
14 @@ -599,6 +601,8 @@
15                 invalidate_bdev(sbi->journal_bdev);
16                 ext4_blkdev_remove(sbi);
17         }
18 +       if (sbi->s_mmp_tsk)
19 +               kthread_stop(sbi->s_mmp_tsk);
20         sb->s_fs_info = NULL;
21         kfree(sbi);
22         return;
23 @@ -807,7 +811,6 @@
24         if (!test_opt(sb, DELALLOC))
25                 seq_puts(seq, ",nodelalloc");
26  
27 -
28         if (sbi->s_stripe)
29                 seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
30         /*
31 @@ -830,6 +833,325 @@
32  }
33  
34  
35 +
36 +/*
37 + * Write the MMP block using WRITE_SYNC to try to get the block on-disk
38 + * faster.
39 + */
40 +static int write_mmp_block(struct buffer_head *bh)
41 +{
42 +       mark_buffer_dirty(bh);
43 +       lock_buffer(bh);
44 +       bh->b_end_io = end_buffer_write_sync;
45 +       get_bh(bh);
46 +       submit_bh(WRITE_SYNC, bh);
47 +       wait_on_buffer(bh);
48 +       if (unlikely(!buffer_uptodate(bh)))
49 +               return 1;
50 +
51 +       return 0;
52 +}
53 +
54 +/*
55 + * Read the MMP block. It _must_ be read from disk and hence we clear the
56 + * uptodate flag on the buffer.
57 + */
58 +static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
59 +                         unsigned long mmp_block)
60 +{
61 +       struct mmp_struct *mmp;
62 +
63 +       if (*bh)
64 +               clear_buffer_uptodate(*bh);
65 +
66 +       /* This would be sb_bread(sb, mmp_block), except we need to be sure
67 +        * that the MD RAID device cache has been bypassed, and that the read
68 +        * is not blocked in the elevator. */
69 +       if (!*bh)
70 +               *bh = sb_getblk(sb, mmp_block);
71 +       if (*bh) {
72 +               get_bh(*bh);
73 +               lock_buffer(*bh);
74 +               (*bh)->b_end_io = end_buffer_read_sync;
75 +               submit_bh(READ_SYNC, *bh);
76 +               wait_on_buffer(*bh);
77 +               if (!buffer_uptodate(*bh)) {
78 +                       brelse(*bh);
79 +                       *bh = NULL;
80 +               }
81 +       }
82 +       if (!*bh) {
83 +               ext4_warning(sb, __FUNCTION__,
84 +                            "Error while reading MMP block %lu", mmp_block);
85 +               return -EIO;
86 +       }
87 +
88 +       mmp = (struct mmp_struct *)((*bh)->b_data);
89 +       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
90 +               return -EINVAL;
91 +
92 +       return 0;
93 +}
94 +
95 +/*
96 + * Dump as much information as possible to help the admin.
97 + */
98 +static void dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
99 +                        const char *function, const char *msg)
100 +{
101 +       ext4_warning(sb, function, msg);
102 +       ext4_warning(sb, function, "MMP failure info: last update time: %llu, "
103 +                    "last update node: %s, last update device: %s\n",
104 +                    le64_to_cpu(mmp->mmp_time), mmp->mmp_nodename,
105 +                    mmp->mmp_bdevname);
106 +}
107 +
108 +/*
109 + * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
110 + */
111 +static int kmmpd(void *data)
112 +{
113 +       struct super_block *sb = (struct super_block *) data;
114 +       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
115 +       struct buffer_head *bh = NULL;
116 +       struct mmp_struct *mmp;
117 +       unsigned long mmp_block;
118 +       u32 seq = 0;
119 +       unsigned long failed_writes = 0;
120 +       int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
121 +       unsigned mmp_check_interval;
122 +       unsigned long last_update_time;
123 +       unsigned long diff;
124 +       int retval;
125 +
126 +       mmp_block = le64_to_cpu(es->s_mmp_block);
127 +       retval = read_mmp_block(sb, &bh, mmp_block);
128 +       if (retval)
129 +               goto failed;
130 +
131 +       mmp = (struct mmp_struct *)(bh->b_data);
132 +       mmp->mmp_time = cpu_to_le64(get_seconds());
133 +       /*
134 +        * Start with the higher mmp_check_interval and reduce it if
135 +        * the MMP block is being updated on time.
136 +        */
137 +       mmp_check_interval = max(5 * mmp_update_interval,
138 +                                EXT4_MMP_MIN_CHECK_INTERVAL);
139 +       mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
140 +       bdevname(bh->b_bdev, mmp->mmp_bdevname);
141 +
142 +       memcpy(mmp->mmp_nodename, init_utsname()->sysname,
143 +              sizeof(mmp->mmp_nodename));
144 +
145 +       while (!kthread_should_stop()) {
146 +               if (++seq > EXT4_MMP_SEQ_MAX)
147 +                       seq = 1;
148 +
149 +               mmp->mmp_seq = cpu_to_le32(seq);
150 +               mmp->mmp_time = cpu_to_le64(get_seconds());
151 +               last_update_time = jiffies;
152 +
153 +               retval = write_mmp_block(bh);
154 +               /*
155 +                * Don't spew too many error messages. Print one every
156 +                * (s_mmp_update_interval * 60) seconds.
157 +                */
158 +               if (retval && (failed_writes % 60) == 0) {
159 +                       ext4_error(sb, __FUNCTION__,
160 +                                  "Error writing to MMP block");
161 +                       failed_writes++;
162 +               }
163 +
164 +               if (!(le32_to_cpu(es->s_feature_incompat) &
165 +                   EXT4_FEATURE_INCOMPAT_MMP)) {
166 +                       ext4_warning(sb, __FUNCTION__, "kmmpd being stopped "
167 +                                    "since MMP feature has been disabled.");
168 +                       EXT4_SB(sb)->s_mmp_tsk = 0;
169 +                       goto failed;
170 +               }
171 +
172 +               if (sb->s_flags & MS_RDONLY) {
173 +                       ext4_warning(sb, __FUNCTION__, "kmmpd being stopped "
174 +                                    "since filesystem has been remounted as "
175 +                                    "readonly.");
176 +                       EXT4_SB(sb)->s_mmp_tsk = 0;
177 +                       goto failed;
178 +               }
179 +
180 +               diff = jiffies - last_update_time;
181 +               if (diff < mmp_update_interval * HZ)
182 +                       schedule_timeout_interruptible(EXT4_MMP_UPDATE_INTERVAL*
183 +                                                      HZ - diff);
184 +
185 +               /*
186 +                * We need to make sure that more than mmp_check_interval
187 +                * seconds have not passed since writing. If that has happened
188 +                * we need to check if the MMP block is as we left it.
189 +                */
190 +               diff = jiffies - last_update_time;
191 +               if (diff > mmp_check_interval * HZ) {
192 +                       struct buffer_head *bh_check = NULL;
193 +                       struct mmp_struct *mmp_check;
194 +
195 +                       retval = read_mmp_block(sb, &bh_check, mmp_block);
196 +                       if (retval) {
197 +                               EXT4_SB(sb)->s_mmp_tsk = 0;
198 +                               goto failed;
199 +                       }
200 +
201 +                       mmp_check = (struct mmp_struct *)(bh_check->b_data);
202 +                       if (mmp->mmp_time != mmp_check->mmp_time ||
203 +                           memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
204 +                                  sizeof(mmp->mmp_nodename)))
205 +                               dump_mmp_msg(sb, mmp_check, __FUNCTION__,
206 +                                            "Error while updating MMP info. "
207 +                                            "The filesystem seems to have "
208 +                                            "been multiply mounted.");
209 +
210 +                       put_bh(bh_check);
211 +               }
212 +
213 +               /*
214 +                * Adjust the mmp_check_interval depending on how much time
215 +                * it took for the MMP block to be written.
216 +                */
217 +               mmp_check_interval = max(5 * diff / HZ,
218 +                                (unsigned long) EXT4_MMP_MIN_CHECK_INTERVAL);
219 +               mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
220 +       }
221 +
222 +       /*
223 +        * Unmount seems to be clean.
224 +        */
225 +       mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
226 +       mmp->mmp_time = cpu_to_le64(get_seconds());
227 +
228 +       retval = write_mmp_block(bh);
229 +
230 +failed:
231 +       brelse(bh);
232 +       return retval;
233 +}
234 +
235 +/*
236 + * Get a random new sequence number but make sure it is not greater than
237 + * EXT4_MMP_SEQ_MAX.
238 + */
239 +static unsigned int mmp_new_seq(void)
240 +{
241 +       u32 new_seq;
242 +
243 +       do {
244 +               get_random_bytes(&new_seq, sizeof(u32));
245 +       } while (new_seq > EXT4_MMP_SEQ_MAX);
246 +
247 +       return new_seq;
248 +}
249 +
250 +/*
251 + * Protect the filesystem from being mounted more than once.
252 + */
253 +static int ext4_multi_mount_protect(struct super_block *sb,
254 +                                   unsigned long mmp_block)
255 +{
256 +       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
257 +       struct buffer_head *bh = NULL;
258 +       struct mmp_struct *mmp = NULL;
259 +       u32 seq;
260 +       unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
261 +       int retval;
262 +
263 +       if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
264 +           mmp_block >= ext4_blocks_count(es)) {
265 +               ext4_warning(sb, __FUNCTION__,
266 +                            "Invalid MMP block in superblock");
267 +               goto failed;
268 +       }
269 +
270 +       retval = read_mmp_block(sb, &bh, mmp_block);
271 +       if (retval)
272 +               goto failed;
273 +
274 +       mmp = (struct mmp_struct *)(bh->b_data);
275 +
276 +       if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
277 +               mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
278 +
279 +       /*
280 +        * If check_interval in MMP block is larger, use that instead of
281 +        * update_interval from the superblock.
282 +        */
283 +       if (mmp->mmp_check_interval > mmp_check_interval)
284 +               mmp_check_interval = mmp->mmp_check_interval;
285 +
286 +       seq = le32_to_cpu(mmp->mmp_seq);
287 +       if (seq == EXT4_MMP_SEQ_CLEAN)
288 +               goto skip;
289 +
290 +       if (seq == EXT4_MMP_SEQ_FSCK) {
291 +               dump_mmp_msg(sb, mmp, __FUNCTION__,
292 +                            "fsck is running on the filesystem");
293 +               goto failed;
294 +       }
295 +
296 +       schedule_timeout_uninterruptible(HZ * (2 * mmp_check_interval + 1));
297 +
298 +       retval = read_mmp_block(sb, &bh, mmp_block);
299 +       if (retval)
300 +               goto failed;
301 +       mmp = (struct mmp_struct *)(bh->b_data);
302 +       if (seq != le32_to_cpu(mmp->mmp_seq)) {
303 +               dump_mmp_msg(sb, mmp, __FUNCTION__,
304 +                            "Device is already active on another node.");
305 +               goto failed;
306 +       }
307 +
308 +skip:
309 +       /*
310 +        * write a new random sequence number.
311 +        */
312 +       mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
313 +
314 +       retval = write_mmp_block(bh);
315 +       if (retval)
316 +               goto failed;
317 +
318 +       /*
319 +        * wait for MMP interval and check mmp_seq.
320 +        */
321 +       schedule_timeout_uninterruptible(HZ * (2 * mmp_check_interval + 1));
322 +
323 +       retval = read_mmp_block(sb, &bh, mmp_block);
324 +       if (retval)
325 +               goto failed;
326 +       mmp = (struct mmp_struct *)(bh->b_data);
327 +       if (seq != le32_to_cpu(mmp->mmp_seq)) {
328 +               dump_mmp_msg(sb, mmp, __FUNCTION__,
329 +                            "Device is already active on another node.");
330 +               goto failed;
331 +       }
332 +
333 +       /*
334 +        * Start a kernel thread to update the MMP block periodically.
335 +        */
336 +       EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%02x:%02x",
337 +                                            MAJOR(sb->s_dev),
338 +                                            MINOR(sb->s_dev));
339 +       if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
340 +               EXT4_SB(sb)->s_mmp_tsk = 0;
341 +               ext4_warning(sb, __FUNCTION__, "Unable to create kmmpd thread "
342 +                            "for %s.", sb->s_id);
343 +               goto failed;
344 +       }
345 +
346 +       brelse(bh);
347 +       return 0;
348 +
349 +failed:
350 +       brelse(bh);
351 +       return 1;
352 +}
353 +
354  static struct inode *ext4_nfs_get_inode(struct super_block *sb,
355                 u64 ino, u32 generation)
356  {
357 @@ -2370,6 +2692,11 @@
358                           EXT4_HAS_INCOMPAT_FEATURE(sb,
359                                     EXT4_FEATURE_INCOMPAT_RECOVER));
360  
361 +       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
362 +           !(sb->s_flags & MS_RDONLY))
363 +               if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
364 +                       goto failed_mount3;
365 +
366         /*
367          * The first inode we look at is the journal inode.  Don't try
368          * root first: it may be modified in the journal!
369 @@ -2570,6 +2897,8 @@
370         percpu_counter_destroy(&sbi->s_freeinodes_counter);
371         percpu_counter_destroy(&sbi->s_dirs_counter);
372         percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
373 +       if (sbi->s_mmp_tsk)
374 +               kthread_stop(sbi->s_mmp_tsk);
375  failed_mount2:
376         for (i = 0; i < db_count; i++)
377                 brelse(sbi->s_group_desc[i]);
378 @@ -3085,7 +3414,7 @@
379         unsigned long old_sb_flags;
380         struct ext4_mount_options old_opts;
381         ext4_group_t g;
382 -       int err;
383 +       int err = 0;
384  #ifdef CONFIG_QUOTA
385         int i;
386  #endif
387 @@ -3210,6 +3539,13 @@
388                                 goto restore_opts;
389                         if (!ext4_setup_super(sb, es, 0))
390                                 sb->s_flags &= ~MS_RDONLY;
391 +                       if (EXT4_HAS_INCOMPAT_FEATURE(sb,
392 +                                                   EXT4_FEATURE_INCOMPAT_MMP))
393 +                               if (ext4_multi_mount_protect(sb,
394 +                                               le64_to_cpu(es->s_mmp_block))) {
395 +                                       err = -EROFS;
396 +                                       goto restore_opts;
397 +                               }
398                 }
399         }
400  #ifdef CONFIG_QUOTA
401 Index: linux-2.6.27.21-0.1/fs/ext4/ext4.h
402 ===================================================================
403 --- linux-2.6.27.21-0.1.orig/fs/ext4/ext4.h     2009-07-07 14:36:00.000000000 +0530
404 +++ linux-2.6.27.21-0.1/fs/ext4/ext4.h  2009-07-07 14:36:58.000000000 +0530
405 @@ -660,7 +660,7 @@
406         __le16  s_want_extra_isize;     /* New inodes should reserve # bytes */
407         __le32  s_flags;                /* Miscellaneous flags */
408         __le16  s_raid_stride;          /* RAID stride */
409 -       __le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
410 +       __le16  s_mmp_update_interval;  /* # seconds to wait in MMP checking */
411         __le64  s_mmp_block;            /* Block for multi-mount protection */
412         __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
413         __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
414 @@ -777,7 +777,8 @@
415                                          EXT4_FEATURE_INCOMPAT_META_BG| \
416                                          EXT4_FEATURE_INCOMPAT_EXTENTS| \
417                                          EXT4_FEATURE_INCOMPAT_64BIT| \
418 -                                        EXT4_FEATURE_INCOMPAT_FLEX_BG)
419 +                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
420 +                                        EXT4_FEATURE_INCOMPAT_MMP)
421  #define EXT4_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
422                                          EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
423                                          EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
424 @@ -981,6 +982,39 @@
425  #endif
426  
427  /*
428 + * This structure will be used for multiple mount protection. It will be
429 + * written into the block number saved in the s_mmp_block field in the
430 + * superblock. Programs that check MMP should assume that if
431 + * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
432 + * to use the filesystem, regardless of how old the timestamp is.
433 + */
434 +#define EXT4_MMP_MAGIC     0x004D4D50U /* ASCII for MMP */
435 +#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
436 +#define EXT4_MMP_SEQ_FSCK  0xE24D4D50U /* mmp_seq value when being fscked */
437 +#define EXT4_MMP_SEQ_MAX   0xE24D4D4FU /* maximum valid mmp_seq value */
438 +
439 +struct mmp_struct {
440 +       __le32  mmp_magic;
441 +       __le32  mmp_seq;
442 +       __le64  mmp_time;
443 +       char    mmp_nodename[64];
444 +       char    mmp_bdevname[32];
445 +       __le16  mmp_check_interval;
446 +       __le16  mmp_pad1;
447 +       __le32  mmp_pad2[227];
448 +};
449 +
450 +/*
451 + * Default interval in seconds to update the MMP sequence number.
452 + */
453 +#define EXT4_MMP_UPDATE_INTERVAL   1
454 +
455 +/*
456 + * Minimum interval for MMP checking in seconds.
457 + */
458 +#define EXT4_MMP_MIN_CHECK_INTERVAL    5
459 +
460 +/*
461   * Function prototypes
462   */
463  
464 Index: linux-2.6.27.21-0.1/fs/ext4/ext4_sb.h
465 ===================================================================
466 --- linux-2.6.27.21-0.1.orig/fs/ext4/ext4_sb.h  2009-07-07 14:36:00.000000000 +0530
467 +++ linux-2.6.27.21-0.1/fs/ext4/ext4_sb.h       2009-07-07 14:36:58.000000000 +0530
468 @@ -149,6 +149,8 @@
469  
470         unsigned int s_log_groups_per_flex;
471         struct flex_groups *s_flex_groups;
472 +
473 +       struct task_struct *s_mmp_tsk;  /* Kernel thread for multiple mount protection */
474  };
475  
476  #endif /* _EXT4_SB */