1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/module.h>
23 #include <linux/major.h>
24 #include <linux/smp.h>
25 #include <linux/hdreg.h>
27 #define DEBUG_SUBSYSTEM S_PTLBD
29 #include <linux/lustre_lite.h>
30 #include <linux/lustre_ha.h>
31 #include <linux/obd_support.h>
32 #include <linux/lustre_idl.h>
33 #include <linux/obd_ptlbd.h>
37 * assign proper major number
39 * discover actual block sizes?
40 * allow more than one sector per io
42 * restrict single ops to sequential block io
43 * ddn target addresses need to be 32 bit
44 * cant get to addresses after 0xFFFF0000
47 #define PTLBD_MAJOR 253
48 #define PTLBD_MAX_MINOR 1
50 #define MAJOR_NR PTLBD_MAJOR
51 #define LOCAL_END_REQUEST
52 #include <linux/blk.h>
53 #include <linux/blkdev.h>
54 #include <linux/blkpg.h>
55 #include <linux/devfs_fs_kernel.h>
57 static int ptlbd_size_size[PTLBD_MAX_MINOR];
58 static int ptlbd_size[PTLBD_MAX_MINOR];
59 static int ptlbd_hardsect_size[PTLBD_MAX_MINOR];
60 static int ptlbd_max_sectors[PTLBD_MAX_MINOR];
61 //RHism static char ptlbd_dev_varyio[PTLBD_MAX_MINOR];
64 * per minor state, indexed by minor.
67 static struct ptlbd_obd *one_for_now;
69 void ptlbd_blk_register(struct ptlbd_obd *ptlbd)
76 static struct ptlbd_obd * ptlbd_get_minor(int minor)
79 if ( minor >= PTLBD_MAX_MINOR )
80 RETURN( ERR_PTR(-ENODEV) );
84 static struct ptlbd_obd * ptlbd_get_inode(struct inode *inode)
88 if ( inode == NULL ) /* can this really happen? */
89 RETURN( ERR_PTR(-EINVAL) );
91 return ptlbd_get_minor(MINOR(inode->i_rdev));
94 static int ptlbd_open(struct inode *inode, struct file *file)
96 struct ptlbd_obd *ptlbd = ptlbd_get_inode(inode);
101 RETURN(PTR_ERR(ptlbd));
103 if (! ptlbd->bd_import->imp_remote_handle.cookie)
104 if (ptlbd_do_connect(ptlbd))
112 static int ptlbd_ioctl(struct inode *inode, struct file *file,
113 unsigned int cmd, unsigned long arg)
115 struct ptlbd_obd *ptlbd;
117 __u16 major, minor, dev;
118 struct hd_geometry geo;
120 if ( ! capable(CAP_SYS_ADMIN) )
123 ptlbd = ptlbd_get_inode(inode);
125 RETURN( PTR_ERR(ptlbd) );
127 major = MAJOR(inode->i_rdev);
128 minor = MINOR(inode->i_rdev);
136 geo.cylinders = blk_size[major][minor]/
137 (geo.heads * geo.sectors);
138 if (copy_to_user((void *) arg, &geo, sizeof(geo)))
145 ret = copy_to_user((void *) arg,
146 & max_sectors[major][minor], sizeof(arg));
150 ret = blk_ioctl(dev, cmd, arg);
151 ptlbd_send_flush_req(ptlbd, PTLBD_FLUSH);
164 ret = blk_ioctl(dev, cmd, arg);
167 case BLKSECTSET: /* don't allow setting of max_sectors */
169 case BLKRRPART: /* not a partitionable device */
178 static int ptlbd_release(struct inode *inode, struct file *file)
180 struct ptlbd_obd *ptlbd = ptlbd_get_inode(inode);
184 RETURN( PTR_ERR(ptlbd) );
186 if (--ptlbd->refcount == 0)
187 ptlbd_do_disconnect(ptlbd);
192 static void ptlbd_end_request_havelock(struct request *req)
194 struct buffer_head *bh;
200 while( (bh = req->bh) != NULL ) {
201 blk_finished_io(bh->b_size >> 9);
202 req->bh = bh->b_reqnext;
203 bh->b_reqnext = NULL;
204 bh->b_end_io(bh, uptodate);
206 blkdev_release_request(req);
210 static void ptlbd_end_request_getlock(struct request *req)
214 spin_lock_irqsave(&io_request_lock, flags);
215 ptlbd_end_request_havelock(req);
216 spin_unlock_irqrestore(&io_request_lock, flags);
220 static void ptlbd_request(request_queue_t *q)
222 struct ptlbd_obd *ptlbd;
228 while ( !QUEUE_EMPTY ) {
230 ptlbd = ptlbd_get_minor(MINOR(req->rq_dev));
232 blkdev_dequeue_request(req);
234 if ( ptlbd->refcount <= 0 ) {
236 ptlbd_end_request_havelock(req);
240 spin_unlock_irq(&io_request_lock);
242 if ( req->cmd == READ )
247 errors = ptlbd_send_rw_req(ptlbd, cmd, req->bh);
249 spin_lock_irq(&io_request_lock);
252 req->errors += errors;
254 ptlbd_end_request_havelock(req);
258 static struct block_device_operations ptlbd_ops = {
259 .owner = THIS_MODULE,
261 .release = ptlbd_release,
262 .ioctl = ptlbd_ioctl,
265 int ptlbd_blk_init(void)
271 ret = register_blkdev(PTLBD_MAJOR, "ptlbd", &ptlbd_ops);
275 blk_size[PTLBD_MAJOR] = ptlbd_size;
276 blksize_size[PTLBD_MAJOR] = ptlbd_size_size;
277 hardsect_size[PTLBD_MAJOR] = ptlbd_hardsect_size;
278 max_sectors[PTLBD_MAJOR] = ptlbd_max_sectors;
280 blk_init_queue(BLK_DEFAULT_QUEUE(PTLBD_MAJOR), ptlbd_request);
281 blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0);
283 for ( i = 0 ; i < PTLBD_MAX_MINOR ; i++) {
284 ptlbd_size_size[i] = 4096;
285 /* avoid integer overflow */
286 ptlbd_size[i] = (16*1024*((1024*1024) >> BLOCK_SIZE_BITS));
287 ptlbd_hardsect_size[i] = 4096;
288 ptlbd_max_sectors[i] = PTL_MD_MAX_IOV * (4096/512);
294 void ptlbd_blk_exit(void)
297 blk_cleanup_queue(BLK_DEFAULT_QUEUE(PTLBD_MAJOR));
298 unregister_blkdev(PTLBD_MAJOR, "ptlbd");