* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/kthread.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
#include <lustre_lib.h>
-#include <lustre_lite.h>
#include "llite_internal.h"
#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
loff_t lo_offset;
loff_t lo_sizelimit;
int lo_flags;
- int (*ioctl)(struct lloop_device *, int cmd,
- unsigned long arg);
-
struct file *lo_backing_file;
struct block_device *lo_device;
unsigned lo_blocksize;
- int old_gfp_mask;
+ gfp_t old_gfp_mask;
spinlock_t lo_lock;
struct bio *lo_bio;
int lo_state;
struct semaphore lo_sem;
struct mutex lo_ctl_mutex;
- cfs_atomic_t lo_pending;
+ atomic_t lo_pending;
wait_queue_head_t lo_bh_wait;
struct request_queue *lo_queue;
{
const struct lu_env *env = lo->lo_env;
struct cl_io *io = &lo->lo_io;
- struct inode *inode = lo->lo_backing_file->f_dentry->d_inode;
+ struct dentry *de = lo->lo_backing_file->f_path.dentry;
+ struct inode *inode = de->d_inode;
struct cl_object *obj = ll_i2info(inode)->lli_clob;
pgoff_t offset;
int ret;
- int i;
+#ifdef HAVE_BVEC_ITER
+ struct bvec_iter iter;
+ struct bio_vec bvec;
+#else
+ int iter;
+ struct bio_vec *bvec;
+#endif
int rw;
- obd_count page_count = 0;
- struct bio_vec *bvec;
+ size_t page_count = 0;
struct bio *bio;
ssize_t bytes;
for (bio = head; bio != NULL; bio = bio->bi_next) {
LASSERT(rw == bio->bi_rw);
- offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, i) {
- BUG_ON(bvec->bv_offset != 0);
+#ifdef HAVE_BVEC_ITER
+ offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec.bv_offset != 0);
+ BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
+
+ pages[page_count] = bvec.bv_page;
+ offsets[page_count] = offset;
+ page_count++;
+ offset += bvec.bv_len;
+#else
+ offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec->bv_offset != 0);
BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
- pages[page_count] = bvec->bv_page;
- offsets[page_count] = offset;
- page_count++;
- offset += bvec->bv_len;
- }
- LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
- }
+ pages[page_count] = bvec->bv_page;
+ offsets[page_count] = offset;
+ page_count++;
+ offset += bvec->bv_len;
+#endif
+ }
+ LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
+ }
ll_stats_ops_tally(ll_i2sbi(inode),
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
lo->lo_bio = lo->lo_biotail = bio;
spin_unlock_irqrestore(&lo->lo_lock, flags);
- cfs_atomic_inc(&lo->lo_pending);
+ atomic_inc(&lo->lo_pending);
if (waitqueue_active(&lo->lo_bh_wait))
wake_up(&lo->lo_bh_wait);
}
rw = first->bi_rw;
bio = &lo->lo_bio;
while (*bio && (*bio)->bi_rw == rw) {
- CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
- (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
- page_count, (*bio)->bi_vcnt);
- if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
- break;
-
+#ifdef HAVE_BVEC_ITER
+ CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
+ (unsigned long long)(*bio)->bi_iter.bi_sector,
+ (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt);
+#else
+ CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
+ (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+ page_count, (*bio)->bi_vcnt);
+#endif
+ if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
+ break;
page_count += (*bio)->bi_vcnt;
count++;
if (!lo)
goto err;
- CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+#ifdef HAVE_BVEC_ITER
+ CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
+ (unsigned long long)old_bio->bi_iter.bi_sector,
+ old_bio->bi_iter.bi_size);
+#else
+ CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
+ (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+#endif
spin_lock_irq(&lo->lo_lock);
inactive = (lo->lo_state != LLOOP_BOUND);
static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
{
- int ret;
- ret = do_bio_lustrebacked(lo, bio);
- while (bio) {
- struct bio *tmp = bio->bi_next;
- bio->bi_next = NULL;
+ int ret;
+
+ ret = do_bio_lustrebacked(lo, bio);
+ while (bio) {
+ struct bio *tmp = bio->bi_next;
+
+ bio->bi_next = NULL;
+#ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
+ bio->bi_error = ret;
+ bio_endio(bio);
+#else
bio_endio(bio, ret);
- bio = tmp;
- }
+#endif
+ bio = tmp;
+ }
}
static inline int loop_active(struct lloop_device *lo)
{
- return cfs_atomic_read(&lo->lo_pending) ||
- (lo->lo_state == LLOOP_RUNDOWN);
+ return atomic_read(&lo->lo_pending) ||
+ (lo->lo_state == LLOOP_RUNDOWN);
}
/*
for (;;) {
wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!cfs_atomic_read(&lo->lo_pending)) {
+ if (!atomic_read(&lo->lo_pending)) {
int exiting = 0;
spin_lock_irq(&lo->lo_lock);
exiting = (lo->lo_state == LLOOP_RUNDOWN);
} else {
times++;
}
- if ((times & 127) == 0) {
- CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
- total_count, times, total_count / times);
- }
+ if ((times & 127) == 0) {
+ CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
+ total_count, times, total_count / times);
+ }
- LASSERT(bio != NULL);
- LASSERT(count <= cfs_atomic_read(&lo->lo_pending));
- loop_handle_bio(lo, bio);
- cfs_atomic_sub(count, &lo->lo_pending);
- }
- cl_env_put(env, &refcheck);
+ LASSERT(bio != NULL);
+ LASSERT(count <= atomic_read(&lo->lo_pending));
+ loop_handle_bio(lo, bio);
+ atomic_sub(count, &lo->lo_pending);
+ }
+ cl_env_put(env, &refcheck);
out:
up(&lo->lo_sem);
- return ret;
+ return ret;
}
static int loop_set_fd(struct lloop_device *lo, struct file *unused,
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->ioctl = NULL;
lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
int count)
{
struct file *filp = lo->lo_backing_file;
- int gfp = lo->old_gfp_mask;
+ gfp_t gfp = lo->old_gfp_mask;
if (lo->lo_state != LLOOP_BOUND)
return -ENXIO;
down(&lo->lo_sem);
lo->lo_backing_file = NULL;
- lo->ioctl = NULL;
lo->lo_device = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct lloop_device *lo = bdev->bd_disk->private_data;
- struct inode *inode = NULL;
- int err = 0;
+ struct lloop_device *lo = bdev->bd_disk->private_data;
+ int err = 0;
mutex_lock(&lloop_mutex);
switch (cmd) {
break;
}
- case LL_IOC_LLOOP_INFO: {
- struct lu_fid fid;
+ case LL_IOC_LLOOP_INFO: {
+ struct inode *inode;
+ struct lu_fid fid;
if (lo->lo_backing_file == NULL) {
err = -ENOENT;
break;
}
- if (inode == NULL)
- inode = lo->lo_backing_file->f_dentry->d_inode;
- if (lo->lo_state == LLOOP_BOUND)
+ inode = lo->lo_backing_file->f_path.dentry->d_inode;
+ if (inode != NULL && lo->lo_state == LLOOP_BOUND)
fid = ll_i2info(inode)->lli_fid;
else
fid_zero(&fid);
- if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
- err = -EFAULT;
- break;
+ if (copy_to_user((struct lu_fid __user *)arg,
+ &fid, sizeof(fid)))
+ err = -EFAULT;
+ break;
}
default:
CWARN("Enter llop_ioctl\n");
mutex_lock(&lloop_mutex);
- switch (cmd) {
- case LL_IOC_LLOOP_ATTACH: {
- struct lloop_device *lo_free = NULL;
- int i;
+ switch (cmd) {
+ case LL_IOC_LLOOP_ATTACH: {
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct lloop_device *lo_free = NULL;
+ int i;
for (i = 0; i < max_loop; i++, lo = NULL) {
lo = &loop_dev[i];
lo_free = lo;
continue;
}
- if (lo->lo_backing_file->f_dentry->d_inode ==
- file->f_dentry->d_inode)
- break;
- }
- if (lo || !lo_free)
- GOTO(out, err = -EBUSY);
+ if (lo->lo_backing_file->f_path.dentry->d_inode ==
+ inode)
+ break;
+ }
+ if (lo || !lo_free)
+ GOTO(out, err = -EBUSY);
lo = lo_free;
dev = MKDEV(lloop_major, lo->lo_number);
/* quit if the used pointer is writable */
- if (put_user((long)old_encode_dev(dev), (long*)arg))
- GOTO(out, err = -EFAULT);
+ if (put_user((long)old_encode_dev(dev), (long __user *)arg))
+ GOTO(out, err = -EFAULT);
bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
if (IS_ERR(bdev))
module_exit(lloop_exit);
CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device");
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre virtual block device");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");