Whamcloud - gitweb
de7804d9d729a97c9f26c7a3856cc4a5929a93c6
[fs/lustre-release.git] / lustre / kernel_patches / patches / dev_read_only-2.6.27-vanilla.patch
1 Index: linux-2.6.27.21-0.1/block/blk-core.c
2 ===================================================================
3 --- linux-2.6.27.21-0.1.orig/block/blk-core.c   2009-04-23 02:12:51.000000000 -0600
4 +++ linux-2.6.27.21-0.1/block/blk-core.c        2009-05-22 08:38:02.000000000 -0600
5 @@ -1335,6 +1335,8 @@
6  
7  #endif /* CONFIG_FAIL_MAKE_REQUEST */
8  
9 +int dev_check_rdonly(struct block_device *bdev);
10 +
11  /*
12   * Check whether this bio extends beyond the end of the device.
13   */
14 @@ -1436,6 +1438,12 @@
15  
16                 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
17                         goto end_io;
18
19 +               /* this is cfs's dev_rdonly check */
20 +               if (bio_rw(bio) == WRITE && dev_check_rdonly(bio->bi_bdev)) {
21 +                       bio_endio(bio, bio->bi_size, 0);
22 +                       break;
23 +               }
24 +
25                 if (should_fail_request(bio))
26                         goto end_io;
27 @@ -2189,6 +2197,91 @@
28  }
29  EXPORT_SYMBOL(kblockd_flush_work);
30  
31 + /*
32 + * Debug code for turning block devices "read-only" (will discard writes
33 + * silently).  This is for filesystem crash/recovery testing.
34 + */
35 +struct deventry {
36 +       dev_t dev;
37 +       struct deventry *next;
38 +};
39 +
40 +static struct deventry *devlist = NULL;
41 +static spinlock_t devlock = SPIN_LOCK_UNLOCKED; 
42 +
43 +int dev_check_rdonly(struct block_device *bdev) 
44 +{
45 +       struct deventry *cur;
46 +       if (!bdev) return 0;
47 +       spin_lock(&devlock);
48 +       cur = devlist;
49 +       while(cur) {
50 +               if (bdev->bd_dev == cur->dev) {
51 +                       spin_unlock(&devlock);
52 +                       return 1;
53 +       }
54 +               cur = cur->next;
55 +       }
56 +       spin_unlock(&devlock);
57 +       return 0;
58 +}
59 +
60 +void dev_set_rdonly(struct block_device *bdev)
61 +{
62 +       struct deventry *newdev, *cur;
63 +
64 +       if (!bdev) 
65 +               return;
66 +       newdev = kmalloc(sizeof(struct deventry), GFP_KERNEL);
67 +       if (!newdev) 
68 +               return;
69 +       
70 +       spin_lock(&devlock);
71 +       cur = devlist;
72 +       while(cur) {
73 +               if (bdev->bd_dev == cur->dev) {
74 +                       spin_unlock(&devlock);
75 +                       kfree(newdev);
76 +                       return;
77 +               }
78 +               cur = cur->next;
79 +       }
80 +       newdev->dev = bdev->bd_dev;
81 +       newdev->next = devlist;
82 +       devlist = newdev;
83 +       spin_unlock(&devlock);
84 +       printk(KERN_WARNING "Turning device %s (%#x) read-only\n",
85 +              bdev->bd_disk ? bdev->bd_disk->disk_name : "", bdev->bd_dev);
86 +}
87 +
88 +void dev_clear_rdonly(struct block_device *bdev) 
89 +{
90 +       struct deventry *cur, *last = NULL;
91 +       if (!bdev) return;
92 +       spin_lock(&devlock);
93 +       cur = devlist;
94 +       while(cur) {
95 +               if (bdev->bd_dev == cur->dev) {
96 +                       if (last) 
97 +                               last->next = cur->next;
98 +                       else
99 +                               devlist = cur->next;
100 +                       spin_unlock(&devlock);
101 +                       kfree(cur);
102 +                       printk(KERN_WARNING "Removing read-only on %s (%#x)\n",
103 +                              bdev->bd_disk ? bdev->bd_disk->disk_name :
104 +                                              "unknown block", bdev->bd_dev);
105 +                       return;
106 +               }
107 +               last = cur;
108 +               cur = cur->next;
109 +       }
110 +       spin_unlock(&devlock);
111 +}
112 +
113 +EXPORT_SYMBOL(dev_set_rdonly);
114 +EXPORT_SYMBOL(dev_clear_rdonly);
115 +EXPORT_SYMBOL(dev_check_rdonly);
116  int __init blk_dev_init(void)
117  {
118         kblockd_workqueue = create_workqueue("kblockd");
119 Index: linux-2.6.27.21-0.1/fs/block_dev.c
120 ===================================================================
121 --- linux-2.6.27.21-0.1.orig/fs/block_dev.c     2009-04-23 02:12:56.000000000 -0600
122 +++ linux-2.6.27.21-0.1/fs/block_dev.c  2009-05-22 08:38:02.000000000 -0600
123 @@ -1208,6 +1208,7 @@
124                 if (bdev != bdev->bd_contains)
125                         victim = bdev->bd_contains;
126                 bdev->bd_contains = NULL;
127 +               dev_clear_rdonly(bdev);
128         }
129         unlock_kernel();
130         mutex_unlock(&bdev->bd_mutex);
131 Index: linux-2.6.27.21-0.1/include/linux/fs.h
132 ===================================================================
133 --- linux-2.6.27.21-0.1.orig/include/linux/fs.h 2009-05-22 08:38:00.000000000 -0600
134 +++ linux-2.6.27.21-0.1/include/linux/fs.h      2009-05-22 08:38:02.000000000 -0600
135 @@ -1898,6 +1898,10 @@
136  extern void submit_bio(int, struct bio *);
137  extern int bdev_read_only(struct block_device *);
138  #endif
139 +#define HAVE_CLEAR_RDONLY_ON_PUT
140 +extern void dev_set_rdonly(struct block_device *bdev);
141 +extern int dev_check_rdonly(struct block_device *bdev);
142 +extern void dev_clear_rdonly(struct block_device *bdev);
143  extern int set_blocksize(struct block_device *, int);
144  extern int sb_set_blocksize(struct super_block *, int);
145  extern int sb_min_blocksize(struct super_block *, int);