-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*
*/
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/suspend.h>
struct bio *lo_biotail;
int lo_state;
cfs_semaphore_t lo_sem;
- cfs_semaphore_t lo_ctl_mutex;
+ cfs_mutex_t lo_ctl_mutex;
cfs_atomic_t lo_pending;
cfs_waitq_t lo_bh_wait;
static int max_loop = MAX_LOOP_DEFAULT;
static struct lloop_device *loop_dev;
static struct gendisk **disks;
-static cfs_semaphore_t lloop_mutex;
+static cfs_mutex_t lloop_mutex;
static void *ll_iocontrol_magic = NULL;
static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
ll_stats_ops_tally(ll_i2sbi(inode),
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
- page_count << PAGE_CACHE_SHIFT);
+ page_count);
pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
pvec->ldp_nr = page_count;
- /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
- * write those pages into OST. Even worse case is that more pages
- * would be asked to write out to swap space, and then finally get here
- * again.
- * Unfortunately this is NOT easy to fix.
- * Thoughts on solution:
- * 0. Define a reserved pool for cl_pages, which could be a list of
- * pre-allocated cl_pages from cl_page_kmem;
- * 1. Define a new operation in cl_object_operations{}, says clo_depth,
- * which measures how many layers for this lustre object. Generally
- * speaking, the depth would be 2, one for llite, and one for lovsub.
- * However, for SNS, there will be more since we need additional page
- * to store parity;
- * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
- * pool. Afterwards, the clio would allocate the pages from reserved
- * pool, this guarantees we neeedn't allocate the cl_pages from
- * generic cl_page slab cache.
- * Of course, if there is NOT enough pages in the pool, we might
- * be asked to write less pages once, this purely depends on
- * implementation. Anyway, we should be careful to avoid deadlocking.
- */
- LOCK_INODE_MUTEX(inode);
- bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
- UNLOCK_INODE_MUTEX(inode);
- cl_io_fini(env, io);
- return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
+ /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
+ * write those pages into OST. Even worse case is that more pages
+ * would be asked to write out to swap space, and then finally get here
+ * again.
+ * Unfortunately this is NOT easy to fix.
+ * Thoughts on solution:
+ * 0. Define a reserved pool for cl_pages, which could be a list of
+ * pre-allocated cl_pages from cl_page_kmem;
+ * 1. Define a new operation in cl_object_operations{}, says clo_depth,
+ * which measures how many layers for this lustre object. Generally
+ * speaking, the depth would be 2, one for llite, and one for lovsub.
+ * However, for SNS, there will be more since we need additional page
+ * to store parity;
+ * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
+ * pool. Afterwards, the clio would allocate the pages from reserved
+ * pool, this guarantees we neeedn't allocate the cl_pages from
+ * generic cl_page slab cache.
+ * Of course, if there is NOT enough pages in the pool, we might
+ * be asked to write less pages once, this purely depends on
+ * implementation. Anyway, we should be careful to avoid deadlocking.
+ */
+ mutex_lock(&inode->i_mutex);
+ bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
+ mutex_unlock(&inode->i_mutex);
+ cl_io_fini(env, io);
+ return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
}
/*
return 0;
}
+#ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN
/*
* kick off io on the underlying address space
*/
clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
blk_run_address_space(lo->lo_backing_file->f_mapping);
}
+#endif
static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
{
*/
blk_queue_make_request(lo->lo_queue, loop_make_request);
lo->lo_queue->queuedata = lo;
+#ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN
lo->lo_queue->unplug_fn = loop_unplug;
+#endif
/* queue parameters */
- blk_queue_logical_block_size(lo->lo_queue, CFS_PAGE_SIZE);
- blk_queue_max_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9));
- blk_queue_max_phys_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
- blk_queue_max_hw_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
+ CLASSERT(CFS_PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+ blk_queue_logical_block_size(lo->lo_queue,
+ (unsigned short)CFS_PAGE_SIZE);
+ blk_queue_max_hw_sectors(lo->lo_queue,
+ LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9));
+ blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);
bd_set_size(bdev, size << 9);
set_blocksize(bdev, lo->lo_blocksize);
- cfs_kernel_thread(loop_thread, lo, CLONE_KERNEL);
+ cfs_create_thread(loop_thread, lo, CLONE_KERNEL);
cfs_down(&lo->lo_sem);
return 0;
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
#endif
- cfs_down(&lo->lo_ctl_mutex);
+ cfs_mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
- cfs_up(&lo->lo_ctl_mutex);
+ cfs_mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
#endif
- cfs_down(&lo->lo_ctl_mutex);
+ cfs_mutex_lock(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
- cfs_up(&lo->lo_ctl_mutex);
+ cfs_mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
unsigned int cmd, unsigned long arg)
{
struct lloop_device *lo = bdev->bd_disk->private_data;
+ struct inode *inode = NULL;
+ int err = 0;
#else
static int lo_ioctl(struct inode *inode, struct file *unused,
unsigned int cmd, unsigned long arg)
{
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
struct block_device *bdev = inode->i_bdev;
-#endif
int err = 0;
+#endif
- cfs_down(&lloop_mutex);
+ cfs_mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_DETACH: {
err = loop_clr_fd(lo, bdev, 2);
}
case LL_IOC_LLOOP_INFO: {
- __u64 ino = 0;
+ struct lu_fid fid;
+ LASSERT(lo->lo_backing_file != NULL);
+ if (inode == NULL)
+ inode = lo->lo_backing_file->f_dentry->d_inode;
if (lo->lo_state == LLOOP_BOUND)
- ino = lo->lo_backing_file->f_dentry->d_inode->i_ino;
+ fid = ll_i2info(inode)->lli_fid;
+ else
+ fid_zero(&fid);
- if (put_user(ino, (__u64 *)arg))
+ if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
err = -EFAULT;
break;
}
err = -EINVAL;
break;
}
- cfs_up(&lloop_mutex);
+ cfs_mutex_unlock(&lloop_mutex);
return err;
}
CWARN("Enter llop_ioctl\n");
- cfs_down(&lloop_mutex);
+ cfs_mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_ATTACH: {
struct lloop_device *lo_free = NULL;
if (put_user((long)old_encode_dev(dev), (long*)arg))
GOTO(out, err = -EFAULT);
- bdev = open_by_devnum(dev, file->f_mode);
+ bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
if (IS_ERR(bdev))
GOTO(out, err = PTR_ERR(bdev));
}
out:
- cfs_up(&lloop_mutex);
+ cfs_mutex_unlock(&lloop_mutex);
out1:
if (rcp)
*rcp = err;
goto out_mem3;
}
- cfs_init_mutex(&lloop_mutex);
+ cfs_mutex_init(&lloop_mutex);
for (i = 0; i < max_loop; i++) {
struct lloop_device *lo = &loop_dev[i];
if (!lo->lo_queue)
goto out_mem4;
- cfs_init_mutex(&lo->lo_ctl_mutex);
- cfs_init_mutex_locked(&lo->lo_sem);
+ cfs_mutex_init(&lo->lo_ctl_mutex);
+ cfs_sema_init(&lo->lo_sem, 0);
cfs_waitq_init(&lo->lo_bh_wait);
lo->lo_number = i;
cfs_spin_lock_init(&lo->lo_lock);