1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/module.h>
23 #include <linux/major.h>
24 #include <linux/smp.h>
25 #include <linux/hdreg.h>
27 #define DEBUG_SUBSYSTEM S_PTLBD
29 #include <linux/lustre_ha.h>
30 #include <linux/obd_support.h>
31 #include <linux/lustre_idl.h>
32 #include <linux/obd_ptlbd.h>
36 * assign proper major number
38 * discover actual block sizes?
39 * allow more than one sector per io
41 * restrict single ops to sequential block io
42 * ddn target addresses need to be 32 bit
43 * cant get to addresses after 0xFFFF0000
46 #define PTLBD_MAJOR 253
47 #define PTLBD_MAX_MINOR 1
49 #define MAJOR_NR PTLBD_MAJOR
50 #define LOCAL_END_REQUEST
51 #include <linux/blk.h>
52 #include <linux/blkdev.h>
53 #include <linux/blkpg.h>
54 #include <linux/devfs_fs_kernel.h>
56 static int ptlbd_size_size[PTLBD_MAX_MINOR];
57 static int ptlbd_size[PTLBD_MAX_MINOR];
58 static int ptlbd_hardsect_size[PTLBD_MAX_MINOR];
59 static int ptlbd_max_sectors[PTLBD_MAX_MINOR];
60 //RHism static char ptlbd_dev_varyio[PTLBD_MAX_MINOR];
63 * per minor state, indexed by minor.
66 static struct ptlbd_obd *one_for_now;
68 void ptlbd_blk_register(struct ptlbd_obd *ptlbd)
75 static struct ptlbd_obd * ptlbd_get_minor(int minor)
78 if ( minor >= PTLBD_MAX_MINOR )
79 RETURN( ERR_PTR(-ENODEV) );
83 static struct ptlbd_obd * ptlbd_get_inode(struct inode *inode)
87 if ( inode == NULL ) /* can this really happen? */
88 RETURN( ERR_PTR(-EINVAL) );
90 return ptlbd_get_minor(MINOR(inode->i_rdev));
93 static int ptlbd_open(struct inode *inode, struct file *file)
95 struct ptlbd_obd *ptlbd = ptlbd_get_inode(inode);
100 RETURN(PTR_ERR(ptlbd));
102 if (! ptlbd->bd_import->imp_remote_handle.cookie)
103 if (ptlbd_do_connect(ptlbd))
111 static int ptlbd_ioctl(struct inode *inode, struct file *file,
112 unsigned int cmd, unsigned long arg)
114 struct ptlbd_obd *ptlbd;
116 __u16 major, minor, dev;
117 struct hd_geometry geo;
119 if ( ! capable(CAP_SYS_ADMIN) )
122 ptlbd = ptlbd_get_inode(inode);
124 RETURN( PTR_ERR(ptlbd) );
126 major = MAJOR(inode->i_rdev);
127 minor = MINOR(inode->i_rdev);
135 geo.cylinders = blk_size[major][minor]/
136 (geo.heads * geo.sectors);
137 if (copy_to_user((void *) arg, &geo, sizeof(geo)))
144 ret = copy_to_user((void *) arg,
145 & max_sectors[major][minor], sizeof(arg));
149 ret = blk_ioctl(dev, cmd, arg);
150 ptlbd_send_flush_req(ptlbd, PTLBD_FLUSH);
163 ret = blk_ioctl(dev, cmd, arg);
166 case BLKSECTSET: /* don't allow setting of max_sectors */
168 case BLKRRPART: /* not a partitionable device */
177 static int ptlbd_release(struct inode *inode, struct file *file)
179 struct ptlbd_obd *ptlbd = ptlbd_get_inode(inode);
183 RETURN( PTR_ERR(ptlbd) );
185 if (--ptlbd->refcount == 0)
186 ptlbd_do_disconnect(ptlbd);
191 static void ptlbd_end_request_havelock(struct request *req)
193 struct buffer_head *bh;
199 while( (bh = req->bh) != NULL ) {
200 blk_finished_io(bh->b_size >> 9);
201 req->bh = bh->b_reqnext;
202 bh->b_reqnext = NULL;
203 bh->b_end_io(bh, uptodate);
205 blkdev_release_request(req);
209 static void ptlbd_end_request_getlock(struct request *req)
213 spin_lock_irqsave(&io_request_lock, flags);
214 ptlbd_end_request_havelock(req);
215 spin_unlock_irqrestore(&io_request_lock, flags);
219 static void ptlbd_request(request_queue_t *q)
221 struct ptlbd_obd *ptlbd;
227 while ( !QUEUE_EMPTY ) {
229 ptlbd = ptlbd_get_minor(MINOR(req->rq_dev));
231 blkdev_dequeue_request(req);
233 if ( ptlbd->refcount <= 0 ) {
235 ptlbd_end_request_havelock(req);
239 spin_unlock_irq(&io_request_lock);
241 if ( req->cmd == READ )
246 errors = ptlbd_send_rw_req(ptlbd, cmd, req->bh);
248 spin_lock_irq(&io_request_lock);
251 req->errors += errors;
253 ptlbd_end_request_havelock(req);
257 static struct block_device_operations ptlbd_ops = {
258 .owner = THIS_MODULE,
260 .release = ptlbd_release,
261 .ioctl = ptlbd_ioctl,
264 int ptlbd_blk_init(void)
270 ret = register_blkdev(PTLBD_MAJOR, "ptlbd", &ptlbd_ops);
274 blk_size[PTLBD_MAJOR] = ptlbd_size;
275 blksize_size[PTLBD_MAJOR] = ptlbd_size_size;
276 hardsect_size[PTLBD_MAJOR] = ptlbd_hardsect_size;
277 max_sectors[PTLBD_MAJOR] = ptlbd_max_sectors;
279 blk_init_queue(BLK_DEFAULT_QUEUE(PTLBD_MAJOR), ptlbd_request);
280 blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
282 for ( i = 0 ; i < PTLBD_MAX_MINOR ; i++) {
283 ptlbd_size_size[i] = 4096;
284 /* avoid integer overflow */
285 ptlbd_size[i] = (16*1024*((1024*1024) >> BLOCK_SIZE_BITS));
286 ptlbd_hardsect_size[i] = 4096;
287 ptlbd_max_sectors[i] = PTLRPC_MAX_BRW_PAGES * (4096/512);
293 void ptlbd_blk_exit(void)
296 blk_cleanup_queue(BLK_DEFAULT_QUEUE(PTLBD_MAJOR));
297 unregister_blkdev(PTLBD_MAJOR, "ptlbd");