* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/kthread.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
#include <lustre_lib.h>
-#include <lustre_lite.h>
#include "llite_internal.h"
#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
loff_t lo_offset;
loff_t lo_sizelimit;
int lo_flags;
- int (*ioctl)(struct lloop_device *, int cmd,
- unsigned long arg);
-
struct file *lo_backing_file;
struct block_device *lo_device;
unsigned lo_blocksize;
- int old_gfp_mask;
+ gfp_t old_gfp_mask;
spinlock_t lo_lock;
struct bio *lo_bio;
int lo_state;
struct semaphore lo_sem;
struct mutex lo_ctl_mutex;
- cfs_atomic_t lo_pending;
+ atomic_t lo_pending;
wait_queue_head_t lo_bh_wait;
struct request_queue *lo_queue;
struct cl_object *obj = ll_i2info(inode)->lli_clob;
pgoff_t offset;
int ret;
- int i;
+#ifdef HAVE_BVEC_ITER
+ struct bvec_iter iter;
+ struct bio_vec bvec;
+#else
+ int iter;
+ struct bio_vec *bvec;
+#endif
int rw;
- obd_count page_count = 0;
- struct bio_vec *bvec;
+ size_t page_count = 0;
struct bio *bio;
ssize_t bytes;
for (bio = head; bio != NULL; bio = bio->bi_next) {
LASSERT(rw == bio->bi_rw);
- offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, i) {
- BUG_ON(bvec->bv_offset != 0);
+#ifdef HAVE_BVEC_ITER
+ offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec.bv_offset != 0);
+ BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
+
+ pages[page_count] = bvec.bv_page;
+ offsets[page_count] = offset;
+ page_count++;
+ offset += bvec.bv_len;
+#else
+ offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec->bv_offset != 0);
BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
- pages[page_count] = bvec->bv_page;
- offsets[page_count] = offset;
- page_count++;
- offset += bvec->bv_len;
- }
- LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
- }
+ pages[page_count] = bvec->bv_page;
+ offsets[page_count] = offset;
+ page_count++;
+ offset += bvec->bv_len;
+#endif
+ }
+ LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
+ }
ll_stats_ops_tally(ll_i2sbi(inode),
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
lo->lo_bio = lo->lo_biotail = bio;
spin_unlock_irqrestore(&lo->lo_lock, flags);
- cfs_atomic_inc(&lo->lo_pending);
+ atomic_inc(&lo->lo_pending);
if (waitqueue_active(&lo->lo_bh_wait))
wake_up(&lo->lo_bh_wait);
}
rw = first->bi_rw;
bio = &lo->lo_bio;
while (*bio && (*bio)->bi_rw == rw) {
- CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
- (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
- page_count, (*bio)->bi_vcnt);
- if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
- break;
-
+#ifdef HAVE_BVEC_ITER
+ CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
+ (unsigned long long)(*bio)->bi_iter.bi_sector,
+ (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt);
+#else
+ CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
+ (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+ page_count, (*bio)->bi_vcnt);
+#endif
+ if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
+ break;
page_count += (*bio)->bi_vcnt;
count++;
if (!lo)
goto err;
- CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+#ifdef HAVE_BVEC_ITER
+ CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
+ (unsigned long long)old_bio->bi_iter.bi_sector,
+ old_bio->bi_iter.bi_size);
+#else
+ CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
+ (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+#endif
spin_lock_irq(&lo->lo_lock);
inactive = (lo->lo_state != LLOOP_BOUND);
loop_add_bio(lo, old_bio);
LL_MRF_RETURN(0);
err:
- cfs_bio_io_error(old_bio, old_bio->bi_size);
+ bio_io_error(old_bio);
LL_MRF_RETURN(0);
}
while (bio) {
struct bio *tmp = bio->bi_next;
bio->bi_next = NULL;
- cfs_bio_endio(bio, bio->bi_size, ret);
+ bio_endio(bio, ret);
bio = tmp;
}
}
static inline int loop_active(struct lloop_device *lo)
{
- return cfs_atomic_read(&lo->lo_pending) ||
- (lo->lo_state == LLOOP_RUNDOWN);
+ return atomic_read(&lo->lo_pending) ||
+ (lo->lo_state == LLOOP_RUNDOWN);
}
/*
for (;;) {
wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!cfs_atomic_read(&lo->lo_pending)) {
+ if (!atomic_read(&lo->lo_pending)) {
int exiting = 0;
spin_lock_irq(&lo->lo_lock);
exiting = (lo->lo_state == LLOOP_RUNDOWN);
} else {
times++;
}
- if ((times & 127) == 0) {
- CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
- total_count, times, total_count / times);
- }
-
- LASSERT(bio != NULL);
- LASSERT(count <= cfs_atomic_read(&lo->lo_pending));
- loop_handle_bio(lo, bio);
- cfs_atomic_sub(count, &lo->lo_pending);
- }
- cl_env_put(env, &refcheck);
+ if ((times & 127) == 0) {
+ CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
+ total_count, times, total_count / times);
+ }
+
+ LASSERT(bio != NULL);
+ LASSERT(count <= atomic_read(&lo->lo_pending));
+ loop_handle_bio(lo, bio);
+ atomic_sub(count, &lo->lo_pending);
+ }
+ cl_env_put(env, &refcheck);
out:
up(&lo->lo_sem);
- return ret;
+ return ret;
}
static int loop_set_fd(struct lloop_device *lo, struct file *unused,
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->ioctl = NULL;
lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
int count)
{
struct file *filp = lo->lo_backing_file;
- int gfp = lo->old_gfp_mask;
+ gfp_t gfp = lo->old_gfp_mask;
if (lo->lo_state != LLOOP_BOUND)
return -ENXIO;
down(&lo->lo_sem);
lo->lo_backing_file = NULL;
- lo->ioctl = NULL;
lo->lo_device = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
return 0;
}
-static int lo_release(struct gendisk *disk, fmode_t mode)
+#ifdef HAVE_BLKDEV_RELEASE_RETURN_INT
+static int
+#else
+static void
+#endif
+lo_release(struct gendisk *disk, fmode_t mode)
{
- struct lloop_device *lo = disk->private_data;
+ struct lloop_device *lo = disk->private_data;
mutex_lock(&lo->lo_ctl_mutex);
- --lo->lo_refcnt;
+ --lo->lo_refcnt;
mutex_unlock(&lo->lo_ctl_mutex);
-
- return 0;
+#ifdef HAVE_BLKDEV_RELEASE_RETURN_INT
+ return 0;
+#endif
}
/* lloop device node's ioctl function. */
case LL_IOC_LLOOP_INFO: {
struct lu_fid fid;
- LASSERT(lo->lo_backing_file != NULL);
+ if (lo->lo_backing_file == NULL) {
+ err = -ENOENT;
+ break;
+ }
if (inode == NULL)
inode = lo->lo_backing_file->f_dentry->d_inode;
if (lo->lo_state == LLOOP_BOUND)
else
fid_zero(&fid);
- if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
- err = -EFAULT;
- break;
+ if (copy_to_user((struct lu_fid __user *)arg,
+ &fid, sizeof(fid)))
+ err = -EFAULT;
+ break;
}
default:
dev = MKDEV(lloop_major, lo->lo_number);
/* quit if the used pointer is writable */
- if (put_user((long)old_encode_dev(dev), (long*)arg))
- GOTO(out, err = -EFAULT);
+ if (put_user((long)old_encode_dev(dev), (long __user *)arg))
+ GOTO(out, err = -EFAULT);
bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
if (IS_ERR(bdev))