* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <lustre_lite.h>
#include "llite_internal.h"
-#define LLOOP_MAX_SEGMENTS PTLRPC_MAX_BRW_PAGES
+#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
/* Possible states of device */
enum {
int old_gfp_mask;
- cfs_spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
- int lo_state;
- cfs_semaphore_t lo_sem;
- cfs_mutex_t lo_ctl_mutex;
+ spinlock_t lo_lock;
+ struct bio *lo_bio;
+ struct bio *lo_biotail;
+ int lo_state;
+ struct semaphore lo_sem;
+ struct mutex lo_ctl_mutex;
cfs_atomic_t lo_pending;
cfs_waitq_t lo_bh_wait;
static int max_loop = MAX_LOOP_DEFAULT;
static struct lloop_device *loop_dev;
static struct gendisk **disks;
-static cfs_mutex_t lloop_mutex;
+static struct mutex lloop_mutex;
static void *ll_iocontrol_magic = NULL;
static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
bio_for_each_segment(bvec, bio, i) {
BUG_ON(bvec->bv_offset != 0);
- BUG_ON(bvec->bv_len != CFS_PAGE_SIZE);
+ BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
pages[page_count] = bvec->bv_page;
offsets[page_count] = offset;
* Unfortunately this is NOT easy to fix.
* Thoughts on solution:
* 0. Define a reserved pool for cl_pages, which could be a list of
- * pre-allocated cl_pages from cl_page_kmem;
+ * pre-allocated cl_pages;
* 1. Define a new operation in cl_object_operations{}, says clo_depth,
* which measures how many layers for this lustre object. Generally
* speaking, the depth would be 2, one for llite, and one for lovsub.
*/
static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
{
- unsigned long flags;
-
- cfs_spin_lock_irqsave(&lo->lo_lock, flags);
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
- cfs_spin_unlock_irqrestore(&lo->lo_lock, flags);
-
- cfs_atomic_inc(&lo->lo_pending);
- if (cfs_waitq_active(&lo->lo_bh_wait))
- cfs_waitq_signal(&lo->lo_bh_wait);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lo->lo_lock, flags);
+ if (lo->lo_biotail) {
+ lo->lo_biotail->bi_next = bio;
+ lo->lo_biotail = bio;
+ } else
+ lo->lo_bio = lo->lo_biotail = bio;
+ spin_unlock_irqrestore(&lo->lo_lock, flags);
+
+ cfs_atomic_inc(&lo->lo_pending);
+ if (cfs_waitq_active(&lo->lo_bh_wait))
+ cfs_waitq_signal(&lo->lo_bh_wait);
}
/*
*/
static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
{
- struct bio *first;
- struct bio **bio;
- unsigned int count = 0;
- unsigned int page_count = 0;
- int rw;
-
- cfs_spin_lock_irq(&lo->lo_lock);
- first = lo->lo_bio;
- if (unlikely(first == NULL)) {
- cfs_spin_unlock_irq(&lo->lo_lock);
- return 0;
- }
+ struct bio *first;
+ struct bio **bio;
+ unsigned int count = 0;
+ unsigned int page_count = 0;
+ int rw;
+
+ spin_lock_irq(&lo->lo_lock);
+ first = lo->lo_bio;
+ if (unlikely(first == NULL)) {
+ spin_unlock_irq(&lo->lo_lock);
+ return 0;
+ }
/* TODO: need to split the bio, too bad. */
LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
lo->lo_bio = NULL;
}
*req = first;
- cfs_spin_unlock_irq(&lo->lo_lock);
- return count;
+ spin_unlock_irq(&lo->lo_lock);
+ return count;
}
static ll_mrf_ret
CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
(unsigned long long)old_bio->bi_sector, old_bio->bi_size);
- cfs_spin_lock_irq(&lo->lo_lock);
- inactive = (lo->lo_state != LLOOP_BOUND);
- cfs_spin_unlock_irq(&lo->lo_lock);
+ spin_lock_irq(&lo->lo_lock);
+ inactive = (lo->lo_state != LLOOP_BOUND);
+ spin_unlock_irq(&lo->lo_lock);
if (inactive)
goto err;
int refcheck;
int ret = 0;
- daemonize("lloop%d", lo->lo_number);
-
set_user_nice(current, -20);
lo->lo_state = LLOOP_BOUND;
/*
* up sem, we are running
*/
- cfs_up(&lo->lo_sem);
-
- for (;;) {
- cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!cfs_atomic_read(&lo->lo_pending)) {
- int exiting = 0;
- cfs_spin_lock_irq(&lo->lo_lock);
- exiting = (lo->lo_state == LLOOP_RUNDOWN);
- cfs_spin_unlock_irq(&lo->lo_lock);
+ up(&lo->lo_sem);
+
+ for (;;) {
+ cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+ if (!cfs_atomic_read(&lo->lo_pending)) {
+ int exiting = 0;
+ spin_lock_irq(&lo->lo_lock);
+ exiting = (lo->lo_state == LLOOP_RUNDOWN);
+ spin_unlock_irq(&lo->lo_lock);
if (exiting)
break;
}
cl_env_put(env, &refcheck);
out:
- cfs_up(&lo->lo_sem);
+ up(&lo->lo_sem);
return ret;
}
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->lo_blocksize = CFS_PAGE_SIZE;
+ lo->lo_blocksize = PAGE_CACHE_SIZE;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
lo->lo_queue->unplug_fn = loop_unplug;
#endif
- /* queue parameters */
- CLASSERT(CFS_PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
- blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)CFS_PAGE_SIZE);
- blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9));
- blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
+ /* queue parameters */
+ CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+ blk_queue_logical_block_size(lo->lo_queue,
+ (unsigned short)PAGE_CACHE_SIZE);
+ blk_queue_max_hw_sectors(lo->lo_queue,
+ LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+ blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);
bd_set_size(bdev, size << 9);
- set_blocksize(bdev, lo->lo_blocksize);
+ set_blocksize(bdev, lo->lo_blocksize);
- cfs_create_thread(loop_thread, lo, CLONE_KERNEL);
- cfs_down(&lo->lo_sem);
- return 0;
+ kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
+ down(&lo->lo_sem);
+ return 0;
- out:
+out:
/* This is safe: open() is still holding a reference. */
cfs_module_put(THIS_MODULE);
return error;
if (filp == NULL)
return -EINVAL;
- cfs_spin_lock_irq(&lo->lo_lock);
- lo->lo_state = LLOOP_RUNDOWN;
- cfs_spin_unlock_irq(&lo->lo_lock);
- cfs_waitq_signal(&lo->lo_bh_wait);
+ spin_lock_irq(&lo->lo_lock);
+ lo->lo_state = LLOOP_RUNDOWN;
+ spin_unlock_irq(&lo->lo_lock);
+ cfs_waitq_signal(&lo->lo_bh_wait);
- cfs_down(&lo->lo_sem);
+ down(&lo->lo_sem);
lo->lo_backing_file = NULL;
lo->ioctl = NULL;
lo->lo_device = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
lo->lo_flags = 0;
- ll_invalidate_bdev(bdev, 0);
+ invalidate_bdev(bdev);
set_capacity(disks[lo->lo_number], 0);
bd_set_size(bdev, 0);
mapping_set_gfp_mask(filp->f_mapping, gfp);
return 0;
}
-#ifdef HAVE_BLKDEV_PUT_2ARGS
static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct lloop_device *lo = bdev->bd_disk->private_data;
-#else
-static int lo_open(struct inode *inode, struct file *file)
-{
- struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
-#endif
- cfs_mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
- cfs_mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
-#ifdef HAVE_BLKDEV_PUT_2ARGS
static int lo_release(struct gendisk *disk, fmode_t mode)
{
struct lloop_device *lo = disk->private_data;
-#else
-static int lo_release(struct inode *inode, struct file *file)
-{
- struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
-#endif
- cfs_mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
- cfs_mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
/* lloop device node's ioctl function. */
-#ifdef HAVE_BLKDEV_PUT_2ARGS
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct lloop_device *lo = bdev->bd_disk->private_data;
struct inode *inode = NULL;
int err = 0;
-#else
-static int lo_ioctl(struct inode *inode, struct file *unused,
- unsigned int cmd, unsigned long arg)
-{
- struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
- struct block_device *bdev = inode->i_bdev;
- int err = 0;
-#endif
- cfs_mutex_lock(&lloop_mutex);
+ mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_DETACH: {
err = loop_clr_fd(lo, bdev, 2);
if (err == 0)
- ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
break;
}
err = -EINVAL;
break;
}
- cfs_mutex_unlock(&lloop_mutex);
+ mutex_unlock(&lloop_mutex);
return err;
}
CWARN("Enter llop_ioctl\n");
- cfs_mutex_lock(&lloop_mutex);
+ mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_ATTACH: {
struct lloop_device *lo_free = NULL;
err = loop_set_fd(lo, NULL, bdev, file);
if (err) {
fput(file);
- ll_blkdev_put(bdev, 0);
+ blkdev_put(bdev, 0);
}
break;
bdev = lo->lo_device;
err = loop_clr_fd(lo, bdev, 1);
if (err == 0)
- ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
break;
}
}
out:
- cfs_mutex_unlock(&lloop_mutex);
+ mutex_unlock(&lloop_mutex);
out1:
if (rcp)
*rcp = err;
goto out_mem3;
}
- cfs_mutex_init(&lloop_mutex);
+ mutex_init(&lloop_mutex);
for (i = 0; i < max_loop; i++) {
struct lloop_device *lo = &loop_dev[i];
if (!lo->lo_queue)
goto out_mem4;
- cfs_mutex_init(&lo->lo_ctl_mutex);
- cfs_sema_init(&lo->lo_sem, 0);
- cfs_waitq_init(&lo->lo_bh_wait);
- lo->lo_number = i;
- cfs_spin_lock_init(&lo->lo_lock);
+ mutex_init(&lo->lo_ctl_mutex);
+ sema_init(&lo->lo_sem, 0);
+ cfs_waitq_init(&lo->lo_bh_wait);
+ lo->lo_number = i;
+ spin_lock_init(&lo->lo_lock);
disk->major = lloop_major;
disk->first_minor = i;
disk->fops = &lo_fops;
blk_cleanup_queue(loop_dev[i].lo_queue);
put_disk(disks[i]);
}
- if (ll_unregister_blkdev(lloop_major, "lloop"))
- CWARN("lloop: cannot unregister blkdev\n");
- else
- CDEBUG(D_CONFIG, "unregistered lloop major %d\n", lloop_major);
+ unregister_blkdev(lloop_major, "lloop");
OBD_FREE(disks, max_loop * sizeof(*disks));
OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));