Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / dev_read_only-2.6-fc5.patch
1 diff -rup linux-2.6.16.i686.orig/block/ll_rw_blk.c linux-2.6.16.i686/block/ll_rw_blk.c
2 --- linux-2.6.16.i686.orig/block/ll_rw_blk.c    2007-05-29 15:24:36.000000000 +0300
3 +++ linux-2.6.16.i686/block/ll_rw_blk.c 2007-05-29 15:33:50.000000000 +0300
4 @@ -2940,6 +2940,8 @@ static void handle_bad_sector(struct bio
5         set_bit(BIO_EOF, &bio->bi_flags);
6  }
7  
8 +int dev_check_rdonly(struct block_device *bdev);
9 +
10  /**
11   * generic_make_request: hand a buffer to its device driver for I/O
12   * @bio:  The bio describing the location in memory and on the device.
13 @@ -3020,6 +3022,12 @@ end_io:
14  
15                 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
16                         goto end_io;
17 +               /* this is cfs's dev_rdonly check */
18 +               if (bio->bi_rw == WRITE &&
19 +                               dev_check_rdonly(bio->bi_bdev)) {
20 +                       bio_endio(bio, bio->bi_size, 0);
21 +                       break;
22 +               }
23  
24                 /*
25                  * If this device has partitions, remap block n
26 @@ -3593,6 +3601,91 @@ void swap_io_context(struct io_context *
27         *ioc2 = temp;
28  }
29  EXPORT_SYMBOL(swap_io_context);
30 + /*
31 + * Debug code for turning block devices "read-only" (will discard writes
32 + * silently).  This is for filesystem crash/recovery testing.
33 + */
34 +struct deventry {
35 +       dev_t dev;
36 +       struct deventry *next;
37 +};
38 +
39 +static struct deventry *devlist = NULL;
40 +static spinlock_t devlock = SPIN_LOCK_UNLOCKED; 
41 +
42 +int dev_check_rdonly(struct block_device *bdev) 
43 +{
44 +       struct deventry *cur;
45 +       if (!bdev) return 0;
46 +       spin_lock(&devlock);
47 +       cur = devlist;
48 +       while(cur) {
49 +               if (bdev->bd_dev == cur->dev) {
50 +                       spin_unlock(&devlock);
51 +                       return 1;
52 +       }
53 +               cur = cur->next;
54 +       }
55 +       spin_unlock(&devlock);
56 +       return 0;
57 +}
58 +
59 +void dev_set_rdonly(struct block_device *bdev)
60 +{
61 +       struct deventry *newdev, *cur;
62 +
63 +       if (!bdev) 
64 +               return;
65 +       newdev = kmalloc(sizeof(struct deventry), GFP_KERNEL);
66 +       if (!newdev) 
67 +               return;
68 +       
69 +       spin_lock(&devlock);
70 +       cur = devlist;
71 +       while(cur) {
72 +               if (bdev->bd_dev == cur->dev) {
73 +                       spin_unlock(&devlock);
74 +                       kfree(newdev);
75 +                       return;
76 +               }
77 +               cur = cur->next;
78 +       }
79 +       newdev->dev = bdev->bd_dev;
80 +       newdev->next = devlist;
81 +       devlist = newdev;
82 +       spin_unlock(&devlock);
83 +       printk(KERN_WARNING "Turning device %s (%#x) read-only\n",
84 +              bdev->bd_disk ? bdev->bd_disk->disk_name : "", bdev->bd_dev);
85 +}
86 +
87 +void dev_clear_rdonly(struct block_device *bdev) 
88 +{
89 +       struct deventry *cur, *last = NULL;
90 +       if (!bdev) return;
91 +       spin_lock(&devlock);
92 +       cur = devlist;
93 +       while(cur) {
94 +               if (bdev->bd_dev == cur->dev) {
95 +                       if (last) 
96 +                               last->next = cur->next;
97 +                       else
98 +                               devlist = cur->next;
99 +                       spin_unlock(&devlock);
100 +                       kfree(cur);
101 +                       printk(KERN_WARNING "Removing read-only on %s (%#x)\n",
102 +                              bdev->bd_disk ? bdev->bd_disk->disk_name :
103 +                                              "unknown block", bdev->bd_dev);
104 +                       return;
105 +               }
106 +               last = cur;
107 +               cur = cur->next;
108 +       }
109 +       spin_unlock(&devlock);
110 +}
111 +
112 +EXPORT_SYMBOL(dev_set_rdonly);
113 +EXPORT_SYMBOL(dev_clear_rdonly);
114 +EXPORT_SYMBOL(dev_check_rdonly);
115  
116  /*
117   * sysfs parts below
118 diff -rup linux-2.6.16.i686.orig/fs/block_dev.c linux-2.6.16.i686/fs/block_dev.c
119 --- linux-2.6.16.i686.orig/fs/block_dev.c       2006-03-20 07:53:29.000000000 +0200
120 +++ linux-2.6.16.i686/fs/block_dev.c    2007-05-29 15:35:00.000000000 +0300
121 @@ -60,6 +60,7 @@ static void kill_bdev(struct block_devic
122  {
123         invalidate_bdev(bdev, 1);
124         truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
125 +       dev_clear_rdonly(bdev);
126  }      
127  
128  int set_blocksize(struct block_device *bdev, int size)
129 diff -rup linux-2.6.16.i686.orig/include/linux/fs.h linux-2.6.16.i686/include/linux/fs.h
130 --- linux-2.6.16.i686.orig/include/linux/fs.h   2007-05-29 15:24:38.000000000 +0300
131 +++ linux-2.6.16.i686/include/linux/fs.h        2007-05-29 15:33:50.000000000 +0300
132 @@ -1541,6 +1541,10 @@ extern void file_kill(struct file *f);
133  struct bio;
134  extern void submit_bio(int, struct bio *);
135  extern int bdev_read_only(struct block_device *);
136 +#define HAVE_CLEAR_RDONLY_ON_PUT
137 +void dev_set_rdonly(struct block_device *bdev);
138 +int dev_check_rdonly(struct block_device *bdev);
139 +void dev_clear_rdonly(struct block_device *bdev);
140  extern int set_blocksize(struct block_device *, int);
141  extern int sb_set_blocksize(struct super_block *, int);
142  extern int sb_min_blocksize(struct super_block *, int);