Whamcloud - gitweb
37923b2d951b3431c165bf7e89d562e808246800
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext4-mmp-sles11.patch
1 Index: linux-stage/fs/ext4/super.c
2 ===================================================================
3 --- linux-stage.orig/fs/ext4/super.c
4 +++ linux-stage/fs/ext4/super.c
5 @@ -39,6 +39,8 @@
6  #include <linux/log2.h>
7  #include <linux/crc16.h>
8  #include <asm/uaccess.h>
9 +#include <linux/kthread.h>
10 +#include <linux/utsname.h>
11  
12  #include "ext4.h"
13  #include "ext4_jbd2.h"
14 @@ -600,6 +602,8 @@ static void ext4_put_super(struct super_
15                 invalidate_bdev(sbi->journal_bdev);
16                 ext4_blkdev_remove(sbi);
17         }
18 +       if (sbi->s_mmp_tsk)
19 +               kthread_stop(sbi->s_mmp_tsk);
20         sb->s_fs_info = NULL;
21         kfree(sbi);
22         return;
23 @@ -808,7 +812,6 @@ static int ext4_show_options(struct seq_
24         if (!test_opt(sb, DELALLOC))
25                 seq_puts(seq, ",nodelalloc");
26  
27 -
28         if (sbi->s_stripe)
29                 seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
30         /*
31 @@ -831,6 +834,340 @@ static int ext4_show_options(struct seq_
32  }
33  
34  
35 +
36 +/*
37 + * Write the MMP block using WRITE_SYNC to try to get the block on-disk
38 + * faster.
39 + */
40 +static int write_mmp_block(struct buffer_head *bh)
41 +{
42 +       mark_buffer_dirty(bh);
43 +       lock_buffer(bh);
44 +       bh->b_end_io = end_buffer_write_sync;
45 +       get_bh(bh);
46 +       submit_bh(WRITE_SYNC, bh);
47 +       wait_on_buffer(bh);
48 +       if (unlikely(!buffer_uptodate(bh)))
49 +               return 1;
50 +
51 +       return 0;
52 +}
53 +
54 +/*
55 + * Read the MMP block. It _must_ be read from disk and hence we clear the
56 + * uptodate flag on the buffer.
57 + */
58 +static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
59 +                         unsigned long mmp_block)
60 +{
61 +       struct mmp_struct *mmp;
62 +
63 +       if (*bh)
64 +               clear_buffer_uptodate(*bh);
65 +
66 +       /* This would be sb_bread(sb, mmp_block), except we need to be sure
67 +        * that the MD RAID device cache has been bypassed, and that the read
68 +        * is not blocked in the elevator. */
69 +       if (!*bh)
70 +               *bh = sb_getblk(sb, mmp_block);
71 +       if (*bh) {
72 +               get_bh(*bh);
73 +               lock_buffer(*bh);
74 +               (*bh)->b_end_io = end_buffer_read_sync;
75 +               submit_bh(READ_SYNC, *bh);
76 +               wait_on_buffer(*bh);
77 +               if (!buffer_uptodate(*bh)) {
78 +                       brelse(*bh);
79 +                       *bh = NULL;
80 +               }
81 +       }
82 +       if (!*bh) {
83 +               ext4_warning(sb, __func__,
84 +                            "Error while reading MMP block %lu", mmp_block);
85 +               return -EIO;
86 +       }
87 +
88 +       mmp = (struct mmp_struct *)((*bh)->b_data);
89 +       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
90 +               return -EINVAL;
91 +
92 +       return 0;
93 +}
94 +
95 +/*
96 + * Dump as much information as possible to help the admin.
97 + */
98 +static void dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
99 +                        const char *function, const char *msg)
100 +{
101 +       ext4_warning(sb, function, msg);
102 +       ext4_warning(sb, function, "MMP failure info: last update time: %llu, "
103 +                    "last update node: %s, last update device: %s\n",
104 +                    (long long unsigned int)le64_to_cpu(mmp->mmp_time),
105 +                    mmp->mmp_nodename, mmp->mmp_bdevname);
106 +}
107 +
108 +/*
109 + * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
110 + */
111 +static int kmmpd(void *data)
112 +{
113 +       struct super_block *sb = (struct super_block *) data;
114 +       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
115 +       struct buffer_head *bh = NULL;
116 +       struct mmp_struct *mmp;
117 +       unsigned long mmp_block;
118 +       u32 seq = 0;
119 +       unsigned long failed_writes = 0;
120 +       int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
121 +       unsigned mmp_check_interval;
122 +       unsigned long last_update_time;
123 +       unsigned long diff;
124 +       int retval;
125 +
126 +       mmp_block = le64_to_cpu(es->s_mmp_block);
127 +       retval = read_mmp_block(sb, &bh, mmp_block);
128 +       if (retval)
129 +               goto failed;
130 +
131 +       mmp = (struct mmp_struct *)(bh->b_data);
132 +       mmp->mmp_time = cpu_to_le64(get_seconds());
133 +       /*
134 +        * Start with the higher mmp_check_interval and reduce it if
135 +        * the MMP block is being updated on time.
136 +        */
137 +       mmp_check_interval = max(5 * mmp_update_interval,
138 +                                EXT4_MMP_MIN_CHECK_INTERVAL);
139 +       mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
140 +       bdevname(bh->b_bdev, mmp->mmp_bdevname);
141 +
142 +       memcpy(mmp->mmp_nodename, init_utsname()->sysname,
143 +              sizeof(mmp->mmp_nodename));
144 +
145 +       while (!kthread_should_stop()) {
146 +               if (++seq > EXT4_MMP_SEQ_MAX)
147 +                       seq = 1;
148 +
149 +               mmp->mmp_seq = cpu_to_le32(seq);
150 +               mmp->mmp_time = cpu_to_le64(get_seconds());
151 +               last_update_time = jiffies;
152 +
153 +               retval = write_mmp_block(bh);
154 +               /*
155 +                * Don't spew too many error messages. Print one every
156 +                * (s_mmp_update_interval * 60) seconds.
157 +                */
158 +               if (retval && (failed_writes % 60) == 0) {
159 +                       ext4_error(sb, __func__,
160 +                                  "Error writing to MMP block");
161 +                       failed_writes++;
162 +               }
163 +
164 +               if (!(le32_to_cpu(es->s_feature_incompat) &
165 +                   EXT4_FEATURE_INCOMPAT_MMP)) {
166 +                       ext4_warning(sb, __func__, "kmmpd being stopped "
167 +                                    "since MMP feature has been disabled.");
168 +                       EXT4_SB(sb)->s_mmp_tsk = 0;
169 +                       goto failed;
170 +               }
171 +
172 +               if (sb->s_flags & MS_RDONLY) {
173 +                       ext4_warning(sb, __func__, "kmmpd being stopped "
174 +                                    "since filesystem has been remounted as "
175 +                                    "readonly.");
176 +                       EXT4_SB(sb)->s_mmp_tsk = 0;
177 +                       goto failed;
178 +               }
179 +
180 +               diff = jiffies - last_update_time;
181 +               if (diff < mmp_update_interval * HZ)
182 +                       schedule_timeout_interruptible(EXT4_MMP_UPDATE_INTERVAL*
183 +                                                      HZ - diff);
184 +
185 +               /*
186 +                * We need to make sure that more than mmp_check_interval
187 +                * seconds have not passed since writing. If that has happened
188 +                * we need to check if the MMP block is as we left it.
189 +                */
190 +               diff = jiffies - last_update_time;
191 +               if (diff > mmp_check_interval * HZ) {
192 +                       struct buffer_head *bh_check = NULL;
193 +                       struct mmp_struct *mmp_check;
194 +
195 +                       retval = read_mmp_block(sb, &bh_check, mmp_block);
196 +                       if (retval) {
197 +                               EXT4_SB(sb)->s_mmp_tsk = 0;
198 +                               goto failed;
199 +                       }
200 +
201 +                       mmp_check = (struct mmp_struct *)(bh_check->b_data);
202 +                       if (mmp->mmp_time != mmp_check->mmp_time ||
203 +                           memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
204 +                                  sizeof(mmp->mmp_nodename)))
205 +                               dump_mmp_msg(sb, mmp_check, __func__,
206 +                                            "Error while updating MMP info. "
207 +                                            "The filesystem seems to have "
208 +                                            "been multiply mounted.");
209 +
210 +                       put_bh(bh_check);
211 +               }
212 +
213 +               /*
214 +                * Adjust the mmp_check_interval depending on how much time
215 +                * it took for the MMP block to be written.
216 +                */
217 +               mmp_check_interval = max(5 * diff / HZ,
218 +                                (unsigned long) EXT4_MMP_MIN_CHECK_INTERVAL);
219 +               mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
220 +       }
221 +
222 +       /*
223 +        * Unmount seems to be clean.
224 +        */
225 +       mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
226 +       mmp->mmp_time = cpu_to_le64(get_seconds());
227 +
228 +       retval = write_mmp_block(bh);
229 +
230 +failed:
231 +       brelse(bh);
232 +       return retval;
233 +}
234 +
235 +/*
236 + * Get a random new sequence number but make sure it is not greater than
237 + * EXT4_MMP_SEQ_MAX.
238 + */
239 +static unsigned int mmp_new_seq(void)
240 +{
241 +       u32 new_seq;
242 +
243 +       do {
244 +               get_random_bytes(&new_seq, sizeof(u32));
245 +       } while (new_seq > EXT4_MMP_SEQ_MAX);
246 +
247 +       return new_seq;
248 +}
249 +
250 +/*
251 + * Protect the filesystem from being mounted more than once.
252 + */
253 +static int ext4_multi_mount_protect(struct super_block *sb,
254 +                                   unsigned long mmp_block)
255 +{
256 +       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
257 +       struct buffer_head *bh = NULL;
258 +       struct mmp_struct *mmp = NULL;
259 +       u32 seq;
260 +       unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
261 +       unsigned int wait_time = 0;
262 +       int retval;
263 +
264 +       if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
265 +           mmp_block >= ext4_blocks_count(es)) {
266 +               ext4_warning(sb, __func__,
267 +                            "Invalid MMP block in superblock");
268 +               goto failed;
269 +       }
270 +
271 +       retval = read_mmp_block(sb, &bh, mmp_block);
272 +       if (retval)
273 +               goto failed;
274 +
275 +       mmp = (struct mmp_struct *)(bh->b_data);
276 +
277 +       if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
278 +               mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
279 +
280 +       /*
281 +        * If check_interval in MMP block is larger, use that instead of
282 +        * update_interval from the superblock.
283 +        */
284 +       if (mmp->mmp_check_interval > mmp_check_interval)
285 +               mmp_check_interval = mmp->mmp_check_interval;
286 +
287 +       seq = le32_to_cpu(mmp->mmp_seq);
288 +       if (seq == EXT4_MMP_SEQ_CLEAN)
289 +               goto skip;
290 +
291 +       if (seq == EXT4_MMP_SEQ_FSCK) {
292 +               dump_mmp_msg(sb, mmp, __func__,
293 +                            "fsck is running on the filesystem");
294 +               goto failed;
295 +       }
296 +
297 +       wait_time = min(mmp_check_interval * 2 + 1,
298 +                       mmp_check_interval + 60);
299 +
300 +       /* Print MMP interval if more than 20 secs. */
301 +       if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
302 +               ext4_warning(sb, __func__, "MMP interval %u higher than "
303 +                            "expected, please wait.\n", wait_time * 2);
304 +
305 +       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
306 +               ext4_warning(sb, __func__, "MMP startup interrupted, failing mount\n");
307 +               goto failed;
308 +       }
309 +
310 +       retval = read_mmp_block(sb, &bh, mmp_block);
311 +       if (retval)
312 +               goto failed;
313 +       mmp = (struct mmp_struct *)(bh->b_data);
314 +       if (seq != le32_to_cpu(mmp->mmp_seq)) {
315 +               dump_mmp_msg(sb, mmp, __func__,
316 +                            "Device is already active on another node.");
317 +               goto failed;
318 +       }
319 +
320 +skip:
321 +       /*
322 +        * write a new random sequence number.
323 +        */
324 +       mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
325 +
326 +       retval = write_mmp_block(bh);
327 +       if (retval)
328 +               goto failed;
329 +
330 +       /*
331 +        * wait for MMP interval and check mmp_seq.
332 +        */
333 +       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
334 +               ext4_warning(sb, __func__, "MMP startup interrupted, failing mount\n");
335 +               goto failed;
336 +       }
337 +
338 +       retval = read_mmp_block(sb, &bh, mmp_block);
339 +       if (retval)
340 +               goto failed;
341 +       mmp = (struct mmp_struct *)(bh->b_data);
342 +       if (seq != le32_to_cpu(mmp->mmp_seq)) {
343 +               dump_mmp_msg(sb, mmp, __func__,
344 +                            "Device is already active on another node.");
345 +               goto failed;
346 +       }
347 +
348 +       /*
349 +        * Start a kernel thread to update the MMP block periodically.
350 +        */
351 +       EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%02x:%02x",
352 +                                            MAJOR(sb->s_dev),
353 +                                            MINOR(sb->s_dev));
354 +       if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
355 +               EXT4_SB(sb)->s_mmp_tsk = 0;
356 +               ext4_warning(sb, __func__, "Unable to create kmmpd thread "
357 +                            "for %s.", sb->s_id);
358 +               goto failed;
359 +       }
360 +
361 +       brelse(bh);
362 +       return 0;
363 +
364 +failed:
365 +       brelse(bh);
366 +       return 1;
367 +}
368 +
369  static struct inode *ext4_nfs_get_inode(struct super_block *sb,
370                 u64 ino, u32 generation)
371  {
372 @@ -2371,6 +2708,11 @@ static int ext4_fill_super(struct super_
373                           EXT4_HAS_INCOMPAT_FEATURE(sb,
374                                     EXT4_FEATURE_INCOMPAT_RECOVER));
375  
376 +       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
377 +           !(sb->s_flags & MS_RDONLY))
378 +               if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
379 +                       goto failed_mount3;
380 +
381         /*
382          * The first inode we look at is the journal inode.  Don't try
383          * root first: it may be modified in the journal!
384 @@ -2571,6 +2913,8 @@ failed_mount3:
385         percpu_counter_destroy(&sbi->s_freeinodes_counter);
386         percpu_counter_destroy(&sbi->s_dirs_counter);
387         percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
388 +       if (sbi->s_mmp_tsk)
389 +               kthread_stop(sbi->s_mmp_tsk);
390  failed_mount2:
391         for (i = 0; i < db_count; i++)
392                 brelse(sbi->s_group_desc[i]);
393 @@ -3086,7 +3430,7 @@ static int ext4_remount(struct super_blo
394         unsigned long old_sb_flags;
395         struct ext4_mount_options old_opts;
396         ext4_group_t g;
397 -       int err;
398 +       int err = 0;
399  #ifdef CONFIG_QUOTA
400         int i;
401  #endif
402 @@ -3211,6 +3555,13 @@ static int ext4_remount(struct super_blo
403                                 goto restore_opts;
404                         if (!ext4_setup_super(sb, es, 0))
405                                 sb->s_flags &= ~MS_RDONLY;
406 +                       if (EXT4_HAS_INCOMPAT_FEATURE(sb,
407 +                                                   EXT4_FEATURE_INCOMPAT_MMP))
408 +                               if (ext4_multi_mount_protect(sb,
409 +                                               le64_to_cpu(es->s_mmp_block))) {
410 +                                       err = -EROFS;
411 +                                       goto restore_opts;
412 +                               }
413                 }
414         }
415  #ifdef CONFIG_QUOTA
416 Index: linux-stage/fs/ext4/ext4.h
417 ===================================================================
418 --- linux-stage.orig/fs/ext4/ext4.h
419 +++ linux-stage/fs/ext4/ext4.h
420 @@ -660,7 +660,7 @@ struct ext4_super_block {
421         __le16  s_want_extra_isize;     /* New inodes should reserve # bytes */
422         __le32  s_flags;                /* Miscellaneous flags */
423         __le16  s_raid_stride;          /* RAID stride */
424 -       __le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
425 +       __le16  s_mmp_update_interval;  /* # seconds to wait in MMP checking */
426         __le64  s_mmp_block;            /* Block for multi-mount protection */
427         __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
428         __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
429 @@ -777,7 +777,8 @@ static inline int ext4_valid_inum(struct
430                                          EXT4_FEATURE_INCOMPAT_META_BG| \
431                                          EXT4_FEATURE_INCOMPAT_EXTENTS| \
432                                          EXT4_FEATURE_INCOMPAT_64BIT| \
433 -                                        EXT4_FEATURE_INCOMPAT_FLEX_BG)
434 +                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
435 +                                        EXT4_FEATURE_INCOMPAT_MMP)
436  #define EXT4_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
437                                          EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
438                                          EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
439 @@ -981,6 +982,39 @@ do {                                                                       \
440  #endif
441  
442  /*
443 + * This structure will be used for multiple mount protection. It will be
444 + * written into the block number saved in the s_mmp_block field in the
445 + * superblock. Programs that check MMP should assume that if
446 + * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
447 + * to use the filesystem, regardless of how old the timestamp is.
448 + */
449 +#define EXT4_MMP_MAGIC     0x004D4D50U /* ASCII for MMP */
450 +#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
451 +#define EXT4_MMP_SEQ_FSCK  0xE24D4D50U /* mmp_seq value when being fscked */
452 +#define EXT4_MMP_SEQ_MAX   0xE24D4D4FU /* maximum valid mmp_seq value */
453 +
454 +struct mmp_struct {
455 +       __le32  mmp_magic;
456 +       __le32  mmp_seq;
457 +       __le64  mmp_time;
458 +       char    mmp_nodename[64];
459 +       char    mmp_bdevname[32];
460 +       __le16  mmp_check_interval;
461 +       __le16  mmp_pad1;
462 +       __le32  mmp_pad2[227];
463 +};
464 +
465 +/*
466 + * Default interval in seconds to update the MMP sequence number.
467 + */
468 +#define EXT4_MMP_UPDATE_INTERVAL   1
469 +
470 +/*
471 + * Minimum interval for MMP checking in seconds.
472 + */
473 +#define EXT4_MMP_MIN_CHECK_INTERVAL    5
474 +
475 +/*
476   * Function prototypes
477   */
478  
479 Index: linux-stage/fs/ext4/ext4_sb.h
480 ===================================================================
481 --- linux-stage.orig/fs/ext4/ext4_sb.h
482 +++ linux-stage/fs/ext4/ext4_sb.h
483 @@ -149,6 +149,8 @@ struct ext4_sb_info {
484  
485         unsigned int s_log_groups_per_flex;
486         struct flex_groups *s_flex_groups;
487 +
488 +       struct task_struct *s_mmp_tsk;  /* Kernel thread for multiple mount protection */
489  };
490  
491  #endif /* _EXT4_SB */