Whamcloud - gitweb
b=3244
[fs/lustre-release.git] / lustre / kernel_patches / patches / dev_read_only-2.6-suse.patch
1 diff -ur linux-2.6.5-lnxi.orig/drivers/block/ll_rw_blk.c linux-2.6.5-lnxi/drivers/block/ll_rw_blk.c
2 --- linux-2.6.5-lnxi.orig/drivers/block/ll_rw_blk.c     2005-04-11 10:16:14.278505679 -0700
3 +++ linux-2.6.5-lnxi/drivers/block/ll_rw_blk.c  2005-04-11 09:42:22.750936924 -0700
4 @@ -2458,6 +2458,8 @@ static inline void blk_partition_remap(s
5         }
6  }
7  
8 +int dev_check_rdonly(struct block_device *bdev);
9 +
10  /**
11   * generic_make_request: hand a buffer to its device driver for I/O
12   * @bio:  The bio describing the location in memory and on the device.
13 @@ -2546,6 +2548,13 @@ end_io:
14                 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
15                         goto end_io;
16  
17 +               /* this is cfs's dev_rdonly check */
18 +               if (bio->bi_rw == WRITE &&
19 +                               dev_check_rdonly(bio->bi_bdev)) {
20 +                       bio_endio(bio, bio->bi_size, 0);
21 +                       break;
22 +               }
23 +
24                 /*
25                  * If this device has partitions, remap block n
26                  * of partition p to block n+start(p) of the disk.
27 @@ -3078,6 +3087,92 @@ void swap_io_context(struct io_context *
28  }
29  
30  /*
31 + * Debug code for turning block devices "read-only" (will discard writes
32 + * silently).  This is for filesystem crash/recovery testing.
33 + */
34 +struct deventry {
35 +       dev_t dev;
36 +       struct deventry *next;
37 +};
38 +
39 +static struct deventry *devlist = NULL;
40 +static spinlock_t devlock = SPIN_LOCK_UNLOCKED; 
41 +
42 +int dev_check_rdonly(struct block_device *bdev) 
43 +{
44 +       struct deventry *cur;
45 +       if (!bdev) return 0;
46 +       spin_lock(&devlock);
47 +       cur = devlist;
48 +       while(cur) {
49 +               if (bdev->bd_dev == cur->dev) {
50 +                       spin_unlock(&devlock);
51 +                       return 1;
52 +       }
53 +               cur = cur->next;
54 +       }
55 +       spin_unlock(&devlock);
56 +       return 0;
57 +}
58 +
59 +void dev_set_rdonly(struct block_device *bdev)
60 +{
61 +       struct deventry *newdev, *cur;
62 +
63 +       if (!bdev) 
64 +               return;
65 +       newdev = kmalloc(sizeof(struct deventry), GFP_KERNEL);
66 +       if (!newdev) 
67 +               return;
68 +       
69 +       spin_lock(&devlock);
70 +       cur = devlist;
71 +       while(cur) {
72 +               if (bdev->bd_dev == cur->dev) {
73 +                       spin_unlock(&devlock);
74 +                       kfree(newdev);
75 +                       return;
76 +               }
77 +               cur = cur->next;
78 +       }
79 +       newdev->dev = bdev->bd_dev;
80 +       newdev->next = devlist;
81 +       devlist = newdev;
82 +       spin_unlock(&devlock);
83 +       printk(KERN_WARNING "Turning device %s (%#x) read-only\n",
84 +              bdev->bd_disk ? bdev->bd_disk->disk_name : "", bdev->bd_dev);
85 +}
86 +
87 +void dev_clear_rdonly(struct block_device *bdev) 
88 +{
89 +       struct deventry *cur, *last = NULL;
90 +       if (!bdev) return;
91 +       spin_lock(&devlock);
92 +       cur = devlist;
93 +       while(cur) {
94 +               if (bdev->bd_dev == cur->dev) {
95 +                       if (last) 
96 +                               last->next = cur->next;
97 +                       else
98 +                               devlist = cur->next;
99 +                       spin_unlock(&devlock);
100 +                       kfree(cur);
101 +                       printk(KERN_WARNING "Removing read-only on %s (%#x)\n",
102 +                              bdev->bd_disk ? bdev->bd_disk->disk_name :
103 +                                              "unknown block", bdev->bd_dev);
104 +                       return;
105 +               }
106 +               last = cur;
107 +               cur = cur->next;
108 +       }
109 +       spin_unlock(&devlock);
110 +}
111 +
112 +EXPORT_SYMBOL(dev_set_rdonly);
113 +EXPORT_SYMBOL(dev_clear_rdonly);
114 +EXPORT_SYMBOL(dev_check_rdonly);
115 +
116 +/*
117   * sysfs parts below
118   */
119  struct queue_sysfs_entry {
120 diff -ur linux-2.6.5-lnxi.orig/fs/block_dev.c linux-2.6.5-lnxi/fs/block_dev.c
121 --- linux-2.6.5-lnxi.orig/fs/block_dev.c        2004-11-11 07:28:30.000000000 -0800
122 +++ linux-2.6.5-lnxi/fs/block_dev.c     2005-04-11 09:49:01.891407856 -0700
123 @@ -739,6 +739,7 @@ int blkdev_put(struct block_device *bdev
124         }
125         unlock_kernel();
126         up(&bdev->bd_sem);
127 +       dev_clear_rdonly(bdev);
128         bdput(bdev);
129         return ret;
130  }
131 diff -ur linux-2.6.5-lnxi.orig/include/linux/fs.h linux-2.6.5-lnxi/include/linux/fs.h
132 --- linux-2.6.5-lnxi.orig/include/linux/fs.h    2004-11-11 07:28:45.000000000 -0800
133 +++ linux-2.6.5-lnxi/include/linux/fs.h 2005-04-11 09:43:27.423116140 -0700
134 @@ -1385,6 +1385,10 @@ extern void file_kill(struct file *f);
135  struct bio;
136  extern int submit_bio(int, struct bio *);
137  extern int bdev_read_only(struct block_device *);
138 +#define HAVE_CLEAR_RDONLY_ON_PUT
139 +void dev_set_rdonly(struct block_device *bdev);
140 +int dev_check_rdonly(struct block_device *bdev);
141 +void dev_clear_rdonly(struct block_device *bdev);
142  extern int set_blocksize(struct block_device *, int);
143  extern int sb_set_blocksize(struct super_block *, int);
144  extern int sb_min_blocksize(struct super_block *, int);