1 Set the underlying block device "read only" and silently discard writes
2 to the device at the block layer. This allows the block device queue
3 to drain quickly for controlled failback of the device.
5 At one time it was required to avoid crashes in the JBD layer during
6 failover, but it may also be possible to just allow the inflight IO to
7 complete and have Lustre handle this more gracefully.
9 Index: linux-2.6.22.5/block/ll_rw_blk.c
10 ===================================================================
11 --- linux-2.6.22.5.orig/block/ll_rw_blk.c
12 +++ linux-2.6.22.5/block/ll_rw_blk.c
13 @@ -3101,6 +3101,8 @@ static inline int should_fail_request(st
15 #endif /* CONFIG_FAIL_MAKE_REQUEST */
17 +int dev_check_rdonly(struct block_device *bdev);
20 * generic_make_request: hand a buffer to its device driver for I/O
21 * @bio: The bio describing the location in memory and on the device.
22 @@ -3185,6 +3187,12 @@ end_io:
24 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
26 + /* this is cfs's dev_rdonly check */
27 + if (bio->bi_rw == WRITE &&
28 + dev_check_rdonly(bio->bi_bdev)) {
29 + bio_endio(bio, bio->bi_size, 0);
33 if (should_fail_request(bio))
35 @@ -3850,6 +3858,91 @@ void swap_io_context(struct io_context *
38 EXPORT_SYMBOL(swap_io_context);
40 + * Debug code for turning block devices "read-only" (will discard writes
41 + * silently). This is for filesystem crash/recovery testing.
45 + struct deventry *next;
48 +static struct deventry *devlist = NULL;
49 +static spinlock_t devlock = SPIN_LOCK_UNLOCKED;
51 +int dev_check_rdonly(struct block_device *bdev)
53 + struct deventry *cur;
54 + if (!bdev) return 0;
55 + spin_lock(&devlock);
58 + if (bdev->bd_dev == cur->dev) {
59 + spin_unlock(&devlock);
64 + spin_unlock(&devlock);
68 +void dev_set_rdonly(struct block_device *bdev)
70 + struct deventry *newdev, *cur;
74 + newdev = kmalloc(sizeof(struct deventry), GFP_KERNEL);
78 + spin_lock(&devlock);
81 + if (bdev->bd_dev == cur->dev) {
82 + spin_unlock(&devlock);
88 + newdev->dev = bdev->bd_dev;
89 + newdev->next = devlist;
91 + spin_unlock(&devlock);
92 + printk(KERN_WARNING "Turning device %s (%#x) read-only\n",
93 + bdev->bd_disk ? bdev->bd_disk->disk_name : "", bdev->bd_dev);
96 +void dev_clear_rdonly(struct block_device *bdev)
98 + struct deventry *cur, *last = NULL;
100 + spin_lock(&devlock);
103 + if (bdev->bd_dev == cur->dev) {
105 + last->next = cur->next;
107 + devlist = cur->next;
108 + spin_unlock(&devlock);
110 + printk(KERN_WARNING "Removing read-only on %s (%#x)\n",
111 + bdev->bd_disk ? bdev->bd_disk->disk_name :
112 + "unknown block", bdev->bd_dev);
118 + spin_unlock(&devlock);
121 +EXPORT_SYMBOL(dev_set_rdonly);
122 +EXPORT_SYMBOL(dev_clear_rdonly);
123 +EXPORT_SYMBOL(dev_check_rdonly);
127 Index: linux-2.6.22.5/fs/block_dev.c
128 ===================================================================
129 --- linux-2.6.22.5.orig/fs/block_dev.c
130 +++ linux-2.6.22.5/fs/block_dev.c
131 @@ -1294,6 +1294,7 @@ static int __blkdev_put(struct block_dev
132 if (bdev != bdev->bd_contains)
133 victim = bdev->bd_contains;
134 bdev->bd_contains = NULL;
135 + dev_clear_rdonly(bdev);
138 mutex_unlock(&bdev->bd_mutex);
139 Index: linux-2.6.22.5/include/linux/fs.h
140 ===================================================================
141 --- linux-2.6.22.5.orig/include/linux/fs.h
142 +++ linux-2.6.22.5/include/linux/fs.h
143 @@ -1744,6 +1744,10 @@ struct bio;
144 extern void submit_bio(int, struct bio *);
145 extern int bdev_read_only(struct block_device *);
147 +#define HAVE_CLEAR_RDONLY_ON_PUT
148 +extern void dev_set_rdonly(struct block_device *bdev);
149 +extern int dev_check_rdonly(struct block_device *bdev);
150 +extern void dev_clear_rdonly(struct block_device *bdev);
151 extern int set_blocksize(struct block_device *, int);
152 extern int sb_set_blocksize(struct super_block *, int);
153 extern int sb_min_blocksize(struct super_block *, int);