X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Flloop.c;h=b8ae0b50f440e6643c1639e67ed7f48367c4f67f;hp=fa98a4942692a352661477ce9a5bd12c06bc7d1a;hb=5e5e4ae2be4bc377f0f896163ae59bf338c4250c;hpb=f2a9374170e4522b9d2ac3b7096cf2912339d480 diff --git a/lustre/llite/lloop.c b/lustre/llite/lloop.c index fa98a49..b8ae0b5 100644 --- a/lustre/llite/lloop.c +++ b/lustre/llite/lloop.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -29,8 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011 Whamcloud, Inc. - * + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -82,12 +79,10 @@ * */ -#ifndef AUTOCONF_INCLUDED -#include -#endif #include #include +#include #include #include #include @@ -97,7 +92,6 @@ #include #include #include -#include #include #include #include @@ -112,10 +106,9 @@ #include #include -#include #include "llite_internal.h" -#define LLOOP_MAX_SEGMENTS PTLRPC_MAX_BRW_PAGES +#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV /* Possible states of device */ enum { @@ -125,40 +118,37 @@ enum { }; struct lloop_device { - int lo_number; - int lo_refcnt; - loff_t lo_offset; - loff_t lo_sizelimit; - int lo_flags; - int (*ioctl)(struct lloop_device *, int cmd, - unsigned long arg); - - struct file *lo_backing_file; - struct block_device *lo_device; - unsigned lo_blocksize; - - int old_gfp_mask; - - cfs_spinlock_t lo_lock; - struct bio *lo_bio; - struct bio *lo_biotail; - int lo_state; - cfs_semaphore_t lo_sem; - cfs_semaphore_t lo_ctl_mutex; - cfs_atomic_t lo_pending; - cfs_waitq_t lo_bh_wait; - - struct request_queue *lo_queue; - - const struct lu_env *lo_env; - struct cl_io lo_io; - struct ll_dio_pages lo_pvec; - - /* data to handle bio for lustre. */ - struct lo_request_data { - struct page *lrd_pages[LLOOP_MAX_SEGMENTS]; - loff_t lrd_offsets[LLOOP_MAX_SEGMENTS]; - } lo_requests[1]; + int lo_number; + int lo_refcnt; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + struct file *lo_backing_file; + struct block_device *lo_device; + unsigned lo_blocksize; + + gfp_t old_gfp_mask; + + spinlock_t lo_lock; + struct bio *lo_bio; + struct bio *lo_biotail; + int lo_state; + struct semaphore lo_sem; + struct mutex lo_ctl_mutex; + atomic_t lo_pending; + wait_queue_head_t lo_bh_wait; + + struct request_queue *lo_queue; + + const struct lu_env *lo_env; + struct cl_io lo_io; + struct ll_dio_pages lo_pvec; + + /* data to handle bio for lustre. */ + struct lo_request_data { + struct page *lrd_pages[LLOOP_MAX_SEGMENTS]; + loff_t lrd_offsets[LLOOP_MAX_SEGMENTS]; + } lo_requests[1]; }; /* @@ -173,7 +163,7 @@ static int lloop_major; static int max_loop = MAX_LOOP_DEFAULT; static struct lloop_device *loop_dev; static struct gendisk **disks; -static cfs_semaphore_t lloop_mutex; +static struct mutex lloop_mutex; static void *ll_iocontrol_magic = NULL; static loff_t get_loop_size(struct lloop_device *lo, struct file *file) @@ -198,14 +188,20 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) { const struct lu_env *env = lo->lo_env; struct cl_io *io = &lo->lo_io; - struct inode *inode = lo->lo_backing_file->f_dentry->d_inode; + struct dentry *de = lo->lo_backing_file->f_path.dentry; + struct inode *inode = de->d_inode; struct cl_object *obj = ll_i2info(inode)->lli_clob; pgoff_t offset; int ret; - int i; +#ifdef HAVE_BVEC_ITER + struct bvec_iter iter; + struct bio_vec bvec; +#else + int iter; + struct bio_vec *bvec; +#endif int rw; - obd_count page_count = 0; - struct bio_vec *bvec; + size_t page_count = 0; struct bio *bio; ssize_t bytes; @@ -228,18 +224,30 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) for (bio = head; bio != NULL; bio = bio->bi_next) { LASSERT(rw == bio->bi_rw); - offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; - bio_for_each_segment(bvec, bio, i) { - BUG_ON(bvec->bv_offset != 0); - BUG_ON(bvec->bv_len != CFS_PAGE_SIZE); +#ifdef HAVE_BVEC_ITER + offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; + bio_for_each_segment_all(bvec, bio, iter) { + BUG_ON(bvec.bv_offset != 0); + BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); - pages[page_count] = bvec->bv_page; - offsets[page_count] = offset; - page_count++; - offset += bvec->bv_len; - } - LASSERT(page_count <= LLOOP_MAX_SEGMENTS); - } + pages[page_count] = bvec.bv_page; + offsets[page_count] = offset; + page_count++; + offset += bvec.bv_len; +#else + offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; + bio_for_each_segment_all(bvec, bio, iter) { + BUG_ON(bvec->bv_offset != 0); + BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); + + pages[page_count] = bvec->bv_page; + offsets[page_count] = offset; + page_count++; + offset += bvec->bv_len; +#endif + } + LASSERT(page_count <= LLOOP_MAX_SEGMENTS); + } ll_stats_ops_tally(ll_i2sbi(inode), (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, @@ -248,32 +256,32 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; pvec->ldp_nr = page_count; - /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to - * write those pages into OST. Even worse case is that more pages - * would be asked to write out to swap space, and then finally get here - * again. - * Unfortunately this is NOT easy to fix. - * Thoughts on solution: - * 0. Define a reserved pool for cl_pages, which could be a list of - * pre-allocated cl_pages from cl_page_kmem; - * 1. Define a new operation in cl_object_operations{}, says clo_depth, - * which measures how many layers for this lustre object. Generally - * speaking, the depth would be 2, one for llite, and one for lovsub. - * However, for SNS, there will be more since we need additional page - * to store parity; - * 2. Reserve the # of (page_count * depth) cl_pages from the reserved - * pool. Afterwards, the clio would allocate the pages from reserved - * pool, this guarantees we neeedn't allocate the cl_pages from - * generic cl_page slab cache. - * Of course, if there is NOT enough pages in the pool, we might - * be asked to write less pages once, this purely depends on - * implementation. Anyway, we should be careful to avoid deadlocking. - */ - LOCK_INODE_MUTEX(inode); - bytes = ll_direct_rw_pages(env, io, rw, inode, pvec); - UNLOCK_INODE_MUTEX(inode); - cl_io_fini(env, io); - return (bytes == pvec->ldp_size) ? 0 : (int)bytes; + /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to + * write those pages into OST. Even worse case is that more pages + * would be asked to write out to swap space, and then finally get here + * again. + * Unfortunately this is NOT easy to fix. + * Thoughts on solution: + * 0. Define a reserved pool for cl_pages, which could be a list of + * pre-allocated cl_pages; + * 1. Define a new operation in cl_object_operations{}, says clo_depth, + * which measures how many layers for this lustre object. Generally + * speaking, the depth would be 2, one for llite, and one for lovsub. + * However, for SNS, there will be more since we need additional page + * to store parity; + * 2. Reserve the # of (page_count * depth) cl_pages from the reserved + * pool. Afterwards, the clio would allocate the pages from reserved + * pool, this guarantees we neeedn't allocate the cl_pages from + * generic cl_page slab cache. + * Of course, if there is NOT enough pages in the pool, we might + * be asked to write less pages once, this purely depends on + * implementation. Anyway, we should be careful to avoid deadlocking. + */ + mutex_lock(&inode->i_mutex); + bytes = ll_direct_rw_pages(env, io, rw, inode, pvec); + mutex_unlock(&inode->i_mutex); + cl_io_fini(env, io); + return (bytes == pvec->ldp_size) ? 0 : (int)bytes; } /* @@ -281,19 +289,19 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) */ static void loop_add_bio(struct lloop_device *lo, struct bio *bio) { - unsigned long flags; - - cfs_spin_lock_irqsave(&lo->lo_lock, flags); - if (lo->lo_biotail) { - lo->lo_biotail->bi_next = bio; - lo->lo_biotail = bio; - } else - lo->lo_bio = lo->lo_biotail = bio; - cfs_spin_unlock_irqrestore(&lo->lo_lock, flags); - - cfs_atomic_inc(&lo->lo_pending); - if (cfs_waitq_active(&lo->lo_bh_wait)) - cfs_waitq_signal(&lo->lo_bh_wait); + unsigned long flags; + + spin_lock_irqsave(&lo->lo_lock, flags); + if (lo->lo_biotail) { + lo->lo_biotail->bi_next = bio; + lo->lo_biotail = bio; + } else + lo->lo_bio = lo->lo_biotail = bio; + spin_unlock_irqrestore(&lo->lo_lock, flags); + + atomic_inc(&lo->lo_pending); + if (waitqueue_active(&lo->lo_bh_wait)) + wake_up(&lo->lo_bh_wait); } /* @@ -301,18 +309,18 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio) */ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) { - struct bio *first; - struct bio **bio; - unsigned int count = 0; - unsigned int page_count = 0; - int rw; - - cfs_spin_lock_irq(&lo->lo_lock); - first = lo->lo_bio; - if (unlikely(first == NULL)) { - cfs_spin_unlock_irq(&lo->lo_lock); - return 0; - } + struct bio *first; + struct bio **bio; + unsigned int count = 0; + unsigned int page_count = 0; + int rw; + + spin_lock_irq(&lo->lo_lock); + first = lo->lo_bio; + if (unlikely(first == NULL)) { + spin_unlock_irq(&lo->lo_lock); + return 0; + } /* TODO: need to split the bio, too bad. */ LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS); @@ -320,12 +328,17 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) rw = first->bi_rw; bio = &lo->lo_bio; while (*bio && (*bio)->bi_rw == rw) { - CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", - (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, - page_count, (*bio)->bi_vcnt); - if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) - break; - +#ifdef HAVE_BVEC_ITER + CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", + (unsigned long long)(*bio)->bi_iter.bi_sector, + (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt); +#else + CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", + (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, + page_count, (*bio)->bi_vcnt); +#endif + if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) + break; page_count += (*bio)->bi_vcnt; count++; @@ -341,11 +354,12 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) lo->lo_bio = NULL; } *req = first; - cfs_spin_unlock_irq(&lo->lo_lock); - return count; + spin_unlock_irq(&lo->lo_lock); + return count; } -static int loop_make_request(struct request_queue *q, struct bio *old_bio) +static ll_mrf_ret +loop_make_request(struct request_queue *q, struct bio *old_bio) { struct lloop_device *lo = q->queuedata; int rw = bio_rw(old_bio); @@ -354,12 +368,18 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio) if (!lo) goto err; - CDEBUG(D_INFO, "submit bio sector %llu size %u\n", - (unsigned long long)old_bio->bi_sector, old_bio->bi_size); +#ifdef HAVE_BVEC_ITER + CDEBUG(D_INFO, "submit bio sector %llu size %u\n", + (unsigned long long)old_bio->bi_iter.bi_sector, + old_bio->bi_iter.bi_size); +#else + CDEBUG(D_INFO, "submit bio sector %llu size %u\n", + (unsigned long long)old_bio->bi_sector, old_bio->bi_size); +#endif - cfs_spin_lock_irq(&lo->lo_lock); - inactive = (lo->lo_state != LLOOP_BOUND); - cfs_spin_unlock_irq(&lo->lo_lock); + spin_lock_irq(&lo->lo_lock); + inactive = (lo->lo_state != LLOOP_BOUND); + spin_unlock_irq(&lo->lo_lock); if (inactive) goto err; @@ -373,12 +393,13 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio) goto err; } loop_add_bio(lo, old_bio); - return 0; + LL_MRF_RETURN(0); err: - cfs_bio_io_error(old_bio, old_bio->bi_size); - return 0; + bio_io_error(old_bio); + LL_MRF_RETURN(0); } +#ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN /* * kick off io on the underlying address space */ @@ -389,23 +410,31 @@ static void loop_unplug(struct request_queue *q) clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); blk_run_address_space(lo->lo_backing_file->f_mapping); } +#endif static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio) { - int ret; - ret = do_bio_lustrebacked(lo, bio); - while (bio) { - struct bio *tmp = bio->bi_next; - bio->bi_next = NULL; - cfs_bio_endio(bio, bio->bi_size, ret); - bio = tmp; - } + int ret; + + ret = do_bio_lustrebacked(lo, bio); + while (bio) { + struct bio *tmp = bio->bi_next; + + bio->bi_next = NULL; +#ifdef HAVE_BIO_ENDIO_USES_ONE_ARG + bio->bi_error = ret; + bio_endio(bio); +#else + bio_endio(bio, ret); +#endif + bio = tmp; + } } static inline int loop_active(struct lloop_device *lo) { - return cfs_atomic_read(&lo->lo_pending) || - (lo->lo_state == LLOOP_RUNDOWN); + return atomic_read(&lo->lo_pending) || + (lo->lo_state == LLOOP_RUNDOWN); } /* @@ -421,11 +450,9 @@ static int loop_thread(void *data) unsigned long total_count = 0; struct lu_env *env; - int refcheck; + __u16 refcheck; int ret = 0; - daemonize("lloop%d", lo->lo_number); - set_user_nice(current, -20); lo->lo_state = LLOOP_BOUND; @@ -442,15 +469,15 @@ static int loop_thread(void *data) /* * up sem, we are running */ - cfs_up(&lo->lo_sem); - - for (;;) { - cfs_wait_event(lo->lo_bh_wait, loop_active(lo)); - if (!cfs_atomic_read(&lo->lo_pending)) { - int exiting = 0; - cfs_spin_lock_irq(&lo->lo_lock); - exiting = (lo->lo_state == LLOOP_RUNDOWN); - cfs_spin_unlock_irq(&lo->lo_lock); + up(&lo->lo_sem); + + for (;;) { + wait_event(lo->lo_bh_wait, loop_active(lo)); + if (!atomic_read(&lo->lo_pending)) { + int exiting = 0; + spin_lock_irq(&lo->lo_lock); + exiting = (lo->lo_state == LLOOP_RUNDOWN); + spin_unlock_irq(&lo->lo_lock); if (exiting) break; } @@ -469,21 +496,21 @@ static int loop_thread(void *data) } else { times++; } - if ((times & 127) == 0) { - CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n", - total_count, times, total_count / times); - } - - LASSERT(bio != NULL); - LASSERT(count <= cfs_atomic_read(&lo->lo_pending)); - loop_handle_bio(lo, bio); - cfs_atomic_sub(count, &lo->lo_pending); - } - cl_env_put(env, &refcheck); + if ((times & 127) == 0) { + CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n", + total_count, times, total_count / times); + } + + LASSERT(bio != NULL); + LASSERT(count <= atomic_read(&lo->lo_pending)); + loop_handle_bio(lo, bio); + atomic_sub(count, &lo->lo_pending); + } + cl_env_put(env, &refcheck); out: - cfs_up(&lo->lo_sem); - return ret; + up(&lo->lo_sem); + return ret; } static int loop_set_fd(struct lloop_device *lo, struct file *unused, @@ -495,8 +522,8 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, int error; loff_t size; - if (!cfs_try_module_get(THIS_MODULE)) - return -ENODEV; + if (!try_module_get(THIS_MODULE)) + return -ENODEV; error = -EBUSY; if (lo->lo_state != LLOOP_UNBOUND) @@ -524,11 +551,10 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->lo_blocksize = CFS_PAGE_SIZE; + lo->lo_blocksize = PAGE_CACHE_SIZE; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; - lo->ioctl = NULL; lo->lo_sizelimit = 0; lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); @@ -541,36 +567,35 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, */ blk_queue_make_request(lo->lo_queue, loop_make_request); lo->lo_queue->queuedata = lo; +#ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN lo->lo_queue->unplug_fn = loop_unplug; +#endif - /* queue parameters */ - CLASSERT(CFS_PAGE_SIZE < (1 << (sizeof(unsigned short) * 8))); - blk_queue_logical_block_size(lo->lo_queue, - (unsigned short)CFS_PAGE_SIZE); - blk_queue_max_hw_sectors(lo->lo_queue, - LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9)); - blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); + /* queue parameters */ + blk_queue_max_hw_sectors(lo->lo_queue, + LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); + blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); set_capacity(disks[lo->lo_number], size); bd_set_size(bdev, size << 9); - set_blocksize(bdev, lo->lo_blocksize); + set_blocksize(bdev, lo->lo_blocksize); - cfs_create_thread(loop_thread, lo, CLONE_KERNEL); - cfs_down(&lo->lo_sem); - return 0; + kthread_run(loop_thread, lo, "lloop%d", lo->lo_number); + down(&lo->lo_sem); + return 0; - out: - /* This is safe: open() is still holding a reference. */ - cfs_module_put(THIS_MODULE); - return error; +out: + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return error; } static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev, int count) { struct file *filp = lo->lo_backing_file; - int gfp = lo->old_gfp_mask; + gfp_t gfp = lo->old_gfp_mask; if (lo->lo_state != LLOOP_BOUND) return -ENXIO; @@ -581,106 +606,97 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev, if (filp == NULL) return -EINVAL; - cfs_spin_lock_irq(&lo->lo_lock); - lo->lo_state = LLOOP_RUNDOWN; - cfs_spin_unlock_irq(&lo->lo_lock); - cfs_waitq_signal(&lo->lo_bh_wait); + spin_lock_irq(&lo->lo_lock); + lo->lo_state = LLOOP_RUNDOWN; + spin_unlock_irq(&lo->lo_lock); + wake_up(&lo->lo_bh_wait); - cfs_down(&lo->lo_sem); + down(&lo->lo_sem); lo->lo_backing_file = NULL; - lo->ioctl = NULL; lo->lo_device = NULL; lo->lo_offset = 0; lo->lo_sizelimit = 0; lo->lo_flags = 0; - ll_invalidate_bdev(bdev, 0); + invalidate_bdev(bdev); set_capacity(disks[lo->lo_number], 0); bd_set_size(bdev, 0); mapping_set_gfp_mask(filp->f_mapping, gfp); lo->lo_state = LLOOP_UNBOUND; - fput(filp); - /* This is safe: open() is still holding a reference. */ - cfs_module_put(THIS_MODULE); - return 0; + fput(filp); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return 0; } -#ifdef HAVE_BLKDEV_PUT_2ARGS static int lo_open(struct block_device *bdev, fmode_t mode) { struct lloop_device *lo = bdev->bd_disk->private_data; -#else -static int lo_open(struct inode *inode, struct file *file) -{ - struct lloop_device *lo = inode->i_bdev->bd_disk->private_data; -#endif - cfs_down(&lo->lo_ctl_mutex); + mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; - cfs_up(&lo->lo_ctl_mutex); + mutex_unlock(&lo->lo_ctl_mutex); return 0; } -#ifdef HAVE_BLKDEV_PUT_2ARGS -static int lo_release(struct gendisk *disk, fmode_t mode) -{ - struct lloop_device *lo = disk->private_data; +#ifdef HAVE_BLKDEV_RELEASE_RETURN_INT +static int #else -static int lo_release(struct inode *inode, struct file *file) -{ - struct lloop_device *lo = inode->i_bdev->bd_disk->private_data; +static void #endif +lo_release(struct gendisk *disk, fmode_t mode) +{ + struct lloop_device *lo = disk->private_data; - cfs_down(&lo->lo_ctl_mutex); - --lo->lo_refcnt; - cfs_up(&lo->lo_ctl_mutex); - - return 0; + mutex_lock(&lo->lo_ctl_mutex); + --lo->lo_refcnt; + mutex_unlock(&lo->lo_ctl_mutex); +#ifdef HAVE_BLKDEV_RELEASE_RETURN_INT + return 0; +#endif } /* lloop device node's ioctl function. */ -#ifdef HAVE_BLKDEV_PUT_2ARGS static int lo_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct lloop_device *lo = bdev->bd_disk->private_data; - struct inode *inode = lo->lo_backing_file->f_dentry->d_inode; -#else -static int lo_ioctl(struct inode *inode, struct file *unused, - unsigned int cmd, unsigned long arg) -{ - struct lloop_device *lo = inode->i_bdev->bd_disk->private_data; - struct block_device *bdev = inode->i_bdev; -#endif - int err = 0; + struct lloop_device *lo = bdev->bd_disk->private_data; + int err = 0; - cfs_down(&lloop_mutex); + mutex_lock(&lloop_mutex); switch (cmd) { case LL_IOC_LLOOP_DETACH: { err = loop_clr_fd(lo, bdev, 2); if (err == 0) - ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */ + blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */ break; } - case LL_IOC_LLOOP_INFO: { - struct lu_fid fid; + case LL_IOC_LLOOP_INFO: { + struct inode *inode; + struct lu_fid fid; - if (lo->lo_state == LLOOP_BOUND) + if (lo->lo_backing_file == NULL) { + err = -ENOENT; + break; + } + inode = lo->lo_backing_file->f_path.dentry->d_inode; + if (inode != NULL && lo->lo_state == LLOOP_BOUND) fid = ll_i2info(inode)->lli_fid; else fid_zero(&fid); - if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid))) - err = -EFAULT; - break; + if (copy_to_user((struct lu_fid __user *)arg, + &fid, sizeof(fid))) + err = -EFAULT; + break; } default: err = -EINVAL; break; } - cfs_up(&lloop_mutex); + mutex_unlock(&lloop_mutex); return err; } @@ -716,11 +732,12 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, CWARN("Enter llop_ioctl\n"); - cfs_down(&lloop_mutex); - switch (cmd) { - case LL_IOC_LLOOP_ATTACH: { - struct lloop_device *lo_free = NULL; - int i; + mutex_lock(&lloop_mutex); + switch (cmd) { + case LL_IOC_LLOOP_ATTACH: { + struct inode *inode = file->f_path.dentry->d_inode; + struct lloop_device *lo_free = NULL; + int i; for (i = 0; i < max_loop; i++, lo = NULL) { lo = &loop_dev[i]; @@ -729,21 +746,21 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, lo_free = lo; continue; } - if (lo->lo_backing_file->f_dentry->d_inode == - file->f_dentry->d_inode) - break; - } - if (lo || !lo_free) - GOTO(out, err = -EBUSY); + if (lo->lo_backing_file->f_path.dentry->d_inode == + inode) + break; + } + if (lo || !lo_free) + GOTO(out, err = -EBUSY); lo = lo_free; dev = MKDEV(lloop_major, lo->lo_number); /* quit if the used pointer is writable */ - if (put_user((long)old_encode_dev(dev), (long*)arg)) - GOTO(out, err = -EFAULT); + if (put_user((long)old_encode_dev(dev), (long __user *)arg)) + GOTO(out, err = -EFAULT); - bdev = open_by_devnum(dev, file->f_mode); + bdev = blkdev_get_by_dev(dev, file->f_mode, NULL); if (IS_ERR(bdev)) GOTO(out, err = PTR_ERR(bdev)); @@ -751,7 +768,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, err = loop_set_fd(lo, NULL, bdev, file); if (err) { fput(file); - ll_blkdev_put(bdev, 0); + blkdev_put(bdev, 0); } break; @@ -775,7 +792,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, bdev = lo->lo_device; err = loop_clr_fd(lo, bdev, 1); if (err == 0) - ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */ + blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */ break; } @@ -786,7 +803,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, } out: - cfs_up(&lloop_mutex); + mutex_unlock(&lloop_mutex); out1: if (rcp) *rcp = err; @@ -832,7 +849,7 @@ static int __init lloop_init(void) goto out_mem3; } - cfs_init_mutex(&lloop_mutex); + mutex_init(&lloop_mutex); for (i = 0; i < max_loop; i++) { struct lloop_device *lo = &loop_dev[i]; @@ -842,11 +859,11 @@ static int __init lloop_init(void) if (!lo->lo_queue) goto out_mem4; - cfs_init_mutex(&lo->lo_ctl_mutex); - cfs_init_mutex_locked(&lo->lo_sem); - cfs_waitq_init(&lo->lo_bh_wait); - lo->lo_number = i; - cfs_spin_lock_init(&lo->lo_lock); + mutex_init(&lo->lo_ctl_mutex); + sema_init(&lo->lo_sem, 0); + init_waitqueue_head(&lo->lo_bh_wait); + lo->lo_number = i; + spin_lock_init(&lo->lo_lock); disk->major = lloop_major; disk->first_minor = i; disk->fops = &lo_fops; @@ -877,29 +894,28 @@ out_mem1: return -ENOMEM; } -static void lloop_exit(void) +static void __exit lloop_exit(void) { - int i; - - ll_iocontrol_unregister(ll_iocontrol_magic); - for (i = 0; i < max_loop; i++) { - del_gendisk(disks[i]); - blk_cleanup_queue(loop_dev[i].lo_queue); - put_disk(disks[i]); - } - if (ll_unregister_blkdev(lloop_major, "lloop")) - CWARN("lloop: cannot unregister blkdev\n"); - else - CDEBUG(D_CONFIG, "unregistered lloop major %d\n", lloop_major); - - OBD_FREE(disks, max_loop * sizeof(*disks)); - OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev)); + int i; + + ll_iocontrol_unregister(ll_iocontrol_magic); + for (i = 0; i < max_loop; i++) { + del_gendisk(disks[i]); + blk_cleanup_queue(loop_dev[i].lo_queue); + put_disk(disks[i]); + } + unregister_blkdev(lloop_major, "lloop"); + + OBD_FREE(disks, max_loop * sizeof(*disks)); + OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev)); } -module_init(lloop_init); -module_exit(lloop_exit); - -CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device"); -MODULE_AUTHOR("Sun Microsystems, Inc. "); +module_param(max_loop, int, 0444); +MODULE_PARM_DESC(max_loop, "maximum of lloop_device"); +MODULE_AUTHOR("OpenSFS, Inc. "); MODULE_DESCRIPTION("Lustre virtual block device"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); + +module_init(lloop_init); +module_exit(lloop_exit);