};
struct lloop_device {
- int lo_number;
- int lo_refcnt;
- loff_t lo_offset;
- loff_t lo_sizelimit;
- int lo_flags;
- int (*ioctl)(struct lloop_device *, int cmd,
- unsigned long arg);
+ int lo_number;
+ int lo_refcnt;
+ loff_t lo_offset;
+ loff_t lo_sizelimit;
+ int lo_flags;
+ int (*ioctl)(struct lloop_device *, int cmd,
+ unsigned long arg);
- struct file *lo_backing_file;
- struct block_device *lo_device;
- unsigned lo_blocksize;
+ struct file *lo_backing_file;
+ struct block_device *lo_device;
+ unsigned lo_blocksize;
- int old_gfp_mask;
+ int old_gfp_mask;
spinlock_t lo_lock;
struct bio *lo_bio;
int lo_state;
struct semaphore lo_sem;
struct mutex lo_ctl_mutex;
- cfs_atomic_t lo_pending;
- cfs_waitq_t lo_bh_wait;
+ cfs_atomic_t lo_pending;
+ wait_queue_head_t lo_bh_wait;
- struct request_queue *lo_queue;
+ struct request_queue *lo_queue;
- const struct lu_env *lo_env;
- struct cl_io lo_io;
- struct ll_dio_pages lo_pvec;
+ const struct lu_env *lo_env;
+ struct cl_io lo_io;
+ struct ll_dio_pages lo_pvec;
- /* data to handle bio for lustre. */
- struct lo_request_data {
- struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
- loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
- } lo_requests[1];
+ /* data to handle bio for lustre. */
+ struct lo_request_data {
+ struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
+ loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
+ } lo_requests[1];
};
/*
spin_unlock_irqrestore(&lo->lo_lock, flags);
cfs_atomic_inc(&lo->lo_pending);
- if (cfs_waitq_active(&lo->lo_bh_wait))
- cfs_waitq_signal(&lo->lo_bh_wait);
+ if (waitqueue_active(&lo->lo_bh_wait))
+ wake_up(&lo->lo_bh_wait);
}
/*
loop_add_bio(lo, old_bio);
LL_MRF_RETURN(0);
err:
- cfs_bio_io_error(old_bio, old_bio->bi_size);
+ bio_io_error(old_bio);
LL_MRF_RETURN(0);
}
while (bio) {
struct bio *tmp = bio->bi_next;
bio->bi_next = NULL;
- cfs_bio_endio(bio, bio->bi_size, ret);
+ bio_endio(bio, ret);
bio = tmp;
}
}
up(&lo->lo_sem);
for (;;) {
- cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+ wait_event(lo->lo_bh_wait, loop_active(lo));
if (!cfs_atomic_read(&lo->lo_pending)) {
int exiting = 0;
spin_lock_irq(&lo->lo_lock);
int error;
loff_t size;
- if (!cfs_try_module_get(THIS_MODULE))
- return -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
error = -EBUSY;
if (lo->lo_state != LLOOP_UNBOUND)
#endif
/* queue parameters */
- CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
- blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)PAGE_CACHE_SIZE);
blk_queue_max_hw_sectors(lo->lo_queue,
LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
return 0;
out:
- /* This is safe: open() is still holding a reference. */
- cfs_module_put(THIS_MODULE);
- return error;
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return error;
}
static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
spin_lock_irq(&lo->lo_lock);
lo->lo_state = LLOOP_RUNDOWN;
spin_unlock_irq(&lo->lo_lock);
- cfs_waitq_signal(&lo->lo_bh_wait);
+ wake_up(&lo->lo_bh_wait);
down(&lo->lo_sem);
lo->lo_backing_file = NULL;
bd_set_size(bdev, 0);
mapping_set_gfp_mask(filp->f_mapping, gfp);
lo->lo_state = LLOOP_UNBOUND;
- fput(filp);
- /* This is safe: open() is still holding a reference. */
- cfs_module_put(THIS_MODULE);
- return 0;
+ fput(filp);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return 0;
}
-#ifdef HAVE_BLKDEV_PUT_2ARGS
static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct lloop_device *lo = bdev->bd_disk->private_data;
-#else
-static int lo_open(struct inode *inode, struct file *file)
-{
- struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
-#endif
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
return 0;
}
-#ifdef HAVE_BLKDEV_PUT_2ARGS
-static int lo_release(struct gendisk *disk, fmode_t mode)
-{
- struct lloop_device *lo = disk->private_data;
+#ifdef HAVE_BLKDEV_RELEASE_RETURN_INT
+static int
#else
-static int lo_release(struct inode *inode, struct file *file)
-{
- struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
+static void
#endif
+lo_release(struct gendisk *disk, fmode_t mode)
+{
+ struct lloop_device *lo = disk->private_data;
mutex_lock(&lo->lo_ctl_mutex);
- --lo->lo_refcnt;
+ --lo->lo_refcnt;
mutex_unlock(&lo->lo_ctl_mutex);
-
- return 0;
+#ifdef HAVE_BLKDEV_RELEASE_RETURN_INT
+ return 0;
+#endif
}
/* lloop device node's ioctl function. */
-#ifdef HAVE_BLKDEV_PUT_2ARGS
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct lloop_device *lo = bdev->bd_disk->private_data;
struct inode *inode = NULL;
int err = 0;
-#else
-static int lo_ioctl(struct inode *inode, struct file *unused,
- unsigned int cmd, unsigned long arg)
-{
- struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
- struct block_device *bdev = inode->i_bdev;
- int err = 0;
-#endif
mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_DETACH: {
err = loop_clr_fd(lo, bdev, 2);
if (err == 0)
- ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
break;
}
err = loop_set_fd(lo, NULL, bdev, file);
if (err) {
fput(file);
- ll_blkdev_put(bdev, 0);
+ blkdev_put(bdev, 0);
}
break;
bdev = lo->lo_device;
err = loop_clr_fd(lo, bdev, 1);
if (err == 0)
- ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
break;
}
mutex_init(&lo->lo_ctl_mutex);
sema_init(&lo->lo_sem, 0);
- cfs_waitq_init(&lo->lo_bh_wait);
+ init_waitqueue_head(&lo->lo_bh_wait);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
disk->major = lloop_major;
blk_cleanup_queue(loop_dev[i].lo_queue);
put_disk(disks[i]);
}
- if (ll_unregister_blkdev(lloop_major, "lloop"))
- CWARN("lloop: cannot unregister blkdev\n");
- else
- CDEBUG(D_CONFIG, "unregistered lloop major %d\n", lloop_major);
+ unregister_blkdev(lloop_major, "lloop");
OBD_FREE(disks, max_loop * sizeof(*disks));
OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));