4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * linux/drivers/block/loop.c
40 * Written by Theodore Ts'o, 3/29/93
42 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
43 * permitted under the GNU General Public License.
45 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
46 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
48 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
50 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
52 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
54 * Loadable modules and other fixes by AK, 1998
56 * Maximum number of loop devices now dynamic via max_loop module parameter.
57 * Russell Kroll <rkroll@exploits.org> 19990701
59 * Maximum number of loop devices when compiled-in now selectable by passing
60 * max_loop=<1-255> to the kernel on boot.
61 * Erik I. Bols?, <eriki@himolde.no>, Oct 31, 1999
63 * Completely rewrite request handling to be make_request_fn style and
64 * non blocking, pushing work to a helper thread. Lots of fixes from
66 * Jens Axboe <axboe@suse.de>, Nov 2000
68 * Support up to 256 loop devices
69 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
71 * Support for falling back on the write file operation when the address space
72 * operations prepare_write and/or commit_write are not available on the
74 * Anton Altaparmakov, 16 Feb 2005
77 * - Advisory locking is ignored here.
78 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
82 #include <linux/module.h>
84 #include <linux/sched.h>
86 #include <linux/file.h>
87 #include <linux/stat.h>
88 #include <linux/errno.h>
89 #include <linux/major.h>
90 #include <linux/wait.h>
91 #include <linux/blkdev.h>
92 #include <linux/blkpg.h>
93 #include <linux/init.h>
94 #include <linux/swap.h>
95 #include <linux/slab.h>
96 #include <linux/suspend.h>
97 #include <linux/writeback.h>
98 #include <linux/buffer_head.h> /* for invalidate_bdev() */
99 #include <linux/completion.h>
100 #include <linux/highmem.h>
101 #include <linux/gfp.h>
102 #include <linux/swap.h>
103 #include <linux/pagevec.h>
105 #include <asm/uaccess.h>
107 #include <lustre_lib.h>
108 #include <lustre_lite.h>
109 #include "llite_internal.h"
111 #define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
113 /* Possible states of device */
120 struct lloop_device {
126 struct file *lo_backing_file;
127 struct block_device *lo_device;
128 unsigned lo_blocksize;
134 struct bio *lo_biotail;
136 struct semaphore lo_sem;
137 struct mutex lo_ctl_mutex;
139 wait_queue_head_t lo_bh_wait;
141 struct request_queue *lo_queue;
143 const struct lu_env *lo_env;
145 struct ll_dio_pages lo_pvec;
147 /* data to handle bio for lustre. */
148 struct lo_request_data {
149 struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
150 loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
158 LO_FLAGS_READ_ONLY = 1,
161 static int lloop_major;
162 #define MAX_LOOP_DEFAULT 16
163 static int max_loop = MAX_LOOP_DEFAULT;
164 static struct lloop_device *loop_dev;
165 static struct gendisk **disks;
166 static struct mutex lloop_mutex;
167 static void *ll_iocontrol_magic = NULL;
169 static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
171 loff_t size, offset, loopsize;
173 /* Compute loopsize in bytes */
174 size = i_size_read(file->f_mapping->host);
175 offset = lo->lo_offset;
176 loopsize = size - offset;
177 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
178 loopsize = lo->lo_sizelimit;
181 * Unfortunately, if we want to do I/O on the device,
182 * the number of 512-byte sectors has to fit into a sector_t.
184 return loopsize >> 9;
187 static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
189 const struct lu_env *env = lo->lo_env;
190 struct cl_io *io = &lo->lo_io;
191 struct inode *inode = lo->lo_backing_file->f_dentry->d_inode;
192 struct cl_object *obj = ll_i2info(inode)->lli_clob;
197 obd_count page_count = 0;
198 struct bio_vec *bvec;
202 struct ll_dio_pages *pvec = &lo->lo_pvec;
203 struct page **pages = pvec->ldp_pages;
204 loff_t *offsets = pvec->ldp_offsets;
206 truncate_inode_pages(inode->i_mapping, 0);
208 /* initialize the IO */
209 memset(io, 0, sizeof(*io));
211 ret = cl_io_init(env, io, CIT_MISC, obj);
213 return io->ci_result;
214 io->ci_lockreq = CILR_NEVER;
216 LASSERT(head != NULL);
218 for (bio = head; bio != NULL; bio = bio->bi_next) {
219 LASSERT(rw == bio->bi_rw);
221 offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
222 bio_for_each_segment(bvec, bio, i) {
223 BUG_ON(bvec->bv_offset != 0);
224 BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
226 pages[page_count] = bvec->bv_page;
227 offsets[page_count] = offset;
229 offset += bvec->bv_len;
231 LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
234 ll_stats_ops_tally(ll_i2sbi(inode),
235 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
238 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
239 pvec->ldp_nr = page_count;
241 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
242 * write those pages into OST. Even worse case is that more pages
243 * would be asked to write out to swap space, and then finally get here
245 * Unfortunately this is NOT easy to fix.
246 * Thoughts on solution:
247 * 0. Define a reserved pool for cl_pages, which could be a list of
248 * pre-allocated cl_pages;
249 * 1. Define a new operation in cl_object_operations{}, says clo_depth,
250 * which measures how many layers for this lustre object. Generally
251 * speaking, the depth would be 2, one for llite, and one for lovsub.
252 * However, for SNS, there will be more since we need additional page
254 * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
255 * pool. Afterwards, the clio would allocate the pages from reserved
256 * pool, this guarantees we neeedn't allocate the cl_pages from
257 * generic cl_page slab cache.
258 * Of course, if there is NOT enough pages in the pool, we might
259 * be asked to write less pages once, this purely depends on
260 * implementation. Anyway, we should be careful to avoid deadlocking.
262 mutex_lock(&inode->i_mutex);
263 bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
264 mutex_unlock(&inode->i_mutex);
266 return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
270 * Add bio to back of pending list
272 static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
276 spin_lock_irqsave(&lo->lo_lock, flags);
277 if (lo->lo_biotail) {
278 lo->lo_biotail->bi_next = bio;
279 lo->lo_biotail = bio;
281 lo->lo_bio = lo->lo_biotail = bio;
282 spin_unlock_irqrestore(&lo->lo_lock, flags);
284 atomic_inc(&lo->lo_pending);
285 if (waitqueue_active(&lo->lo_bh_wait))
286 wake_up(&lo->lo_bh_wait);
290 * Grab first pending buffer
292 static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
296 unsigned int count = 0;
297 unsigned int page_count = 0;
300 spin_lock_irq(&lo->lo_lock);
302 if (unlikely(first == NULL)) {
303 spin_unlock_irq(&lo->lo_lock);
307 /* TODO: need to split the bio, too bad. */
308 LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
312 while (*bio && (*bio)->bi_rw == rw) {
313 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
314 (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
315 page_count, (*bio)->bi_vcnt);
316 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
320 page_count += (*bio)->bi_vcnt;
322 bio = &(*bio)->bi_next;
325 /* Some of bios can't be mergable. */
329 /* Hit the end of queue */
330 lo->lo_biotail = NULL;
334 spin_unlock_irq(&lo->lo_lock);
339 loop_make_request(struct request_queue *q, struct bio *old_bio)
341 struct lloop_device *lo = q->queuedata;
342 int rw = bio_rw(old_bio);
348 CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
349 (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
351 spin_lock_irq(&lo->lo_lock);
352 inactive = (lo->lo_state != LLOOP_BOUND);
353 spin_unlock_irq(&lo->lo_lock);
358 if (lo->lo_flags & LO_FLAGS_READ_ONLY)
360 } else if (rw == READA) {
362 } else if (rw != READ) {
363 CERROR("lloop: unknown command (%x)\n", rw);
366 loop_add_bio(lo, old_bio);
369 bio_io_error(old_bio);
373 #ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN
375 * kick off io on the underlying address space
377 static void loop_unplug(struct request_queue *q)
379 struct lloop_device *lo = q->queuedata;
381 clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
382 blk_run_address_space(lo->lo_backing_file->f_mapping);
386 static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
389 ret = do_bio_lustrebacked(lo, bio);
391 struct bio *tmp = bio->bi_next;
398 static inline int loop_active(struct lloop_device *lo)
400 return atomic_read(&lo->lo_pending) ||
401 (lo->lo_state == LLOOP_RUNDOWN);
405 * worker thread that handles reads/writes to file backed loop devices,
406 * to avoid blocking in our make_request_fn.
408 static int loop_thread(void *data)
410 struct lloop_device *lo = data;
413 unsigned long times = 0;
414 unsigned long total_count = 0;
420 set_user_nice(current, -20);
422 lo->lo_state = LLOOP_BOUND;
424 env = cl_env_get(&refcheck);
426 GOTO(out, ret = PTR_ERR(env));
429 memset(&lo->lo_pvec, 0, sizeof(lo->lo_pvec));
430 lo->lo_pvec.ldp_pages = lo->lo_requests[0].lrd_pages;
431 lo->lo_pvec.ldp_offsets = lo->lo_requests[0].lrd_offsets;
434 * up sem, we are running
439 wait_event(lo->lo_bh_wait, loop_active(lo));
440 if (!atomic_read(&lo->lo_pending)) {
442 spin_lock_irq(&lo->lo_lock);
443 exiting = (lo->lo_state == LLOOP_RUNDOWN);
444 spin_unlock_irq(&lo->lo_lock);
450 count = loop_get_bio(lo, &bio);
452 CWARN("lloop(minor: %d): missing bio\n", lo->lo_number);
456 total_count += count;
457 if (total_count < count) { /* overflow */
463 if ((times & 127) == 0) {
464 CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
465 total_count, times, total_count / times);
468 LASSERT(bio != NULL);
469 LASSERT(count <= atomic_read(&lo->lo_pending));
470 loop_handle_bio(lo, bio);
471 atomic_sub(count, &lo->lo_pending);
473 cl_env_put(env, &refcheck);
480 static int loop_set_fd(struct lloop_device *lo, struct file *unused,
481 struct block_device *bdev, struct file *file)
484 struct address_space *mapping;
489 if (!try_module_get(THIS_MODULE))
493 if (lo->lo_state != LLOOP_UNBOUND)
496 mapping = file->f_mapping;
497 inode = mapping->host;
500 if (!S_ISREG(inode->i_mode) || inode->i_sb->s_magic != LL_SUPER_MAGIC)
503 if (!(file->f_mode & FMODE_WRITE))
504 lo_flags |= LO_FLAGS_READ_ONLY;
506 size = get_loop_size(lo, file);
508 if ((loff_t)(sector_t)size != size) {
513 /* remove all pages in cache so as dirty pages not to be existent. */
514 truncate_inode_pages(mapping, 0);
516 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
518 lo->lo_blocksize = PAGE_CACHE_SIZE;
519 lo->lo_device = bdev;
520 lo->lo_flags = lo_flags;
521 lo->lo_backing_file = file;
522 lo->lo_sizelimit = 0;
523 lo->old_gfp_mask = mapping_gfp_mask(mapping);
524 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
526 lo->lo_bio = lo->lo_biotail = NULL;
529 * set queue make_request_fn, and add limits based on lower level
532 blk_queue_make_request(lo->lo_queue, loop_make_request);
533 lo->lo_queue->queuedata = lo;
534 #ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN
535 lo->lo_queue->unplug_fn = loop_unplug;
538 /* queue parameters */
539 blk_queue_max_hw_sectors(lo->lo_queue,
540 LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
541 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
543 set_capacity(disks[lo->lo_number], size);
544 bd_set_size(bdev, size << 9);
546 set_blocksize(bdev, lo->lo_blocksize);
548 kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
553 /* This is safe: open() is still holding a reference. */
554 module_put(THIS_MODULE);
558 static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
561 struct file *filp = lo->lo_backing_file;
562 int gfp = lo->old_gfp_mask;
564 if (lo->lo_state != LLOOP_BOUND)
567 if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */
573 spin_lock_irq(&lo->lo_lock);
574 lo->lo_state = LLOOP_RUNDOWN;
575 spin_unlock_irq(&lo->lo_lock);
576 wake_up(&lo->lo_bh_wait);
579 lo->lo_backing_file = NULL;
580 lo->lo_device = NULL;
582 lo->lo_sizelimit = 0;
584 invalidate_bdev(bdev);
585 set_capacity(disks[lo->lo_number], 0);
586 bd_set_size(bdev, 0);
587 mapping_set_gfp_mask(filp->f_mapping, gfp);
588 lo->lo_state = LLOOP_UNBOUND;
590 /* This is safe: open() is still holding a reference. */
591 module_put(THIS_MODULE);
595 static int lo_open(struct block_device *bdev, fmode_t mode)
597 struct lloop_device *lo = bdev->bd_disk->private_data;
599 mutex_lock(&lo->lo_ctl_mutex);
601 mutex_unlock(&lo->lo_ctl_mutex);
606 #ifdef HAVE_BLKDEV_RELEASE_RETURN_INT
611 lo_release(struct gendisk *disk, fmode_t mode)
613 struct lloop_device *lo = disk->private_data;
615 mutex_lock(&lo->lo_ctl_mutex);
617 mutex_unlock(&lo->lo_ctl_mutex);
618 #ifdef HAVE_BLKDEV_RELEASE_RETURN_INT
623 /* lloop device node's ioctl function. */
624 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
625 unsigned int cmd, unsigned long arg)
627 struct lloop_device *lo = bdev->bd_disk->private_data;
628 struct inode *inode = NULL;
631 mutex_lock(&lloop_mutex);
633 case LL_IOC_LLOOP_DETACH: {
634 err = loop_clr_fd(lo, bdev, 2);
636 blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
640 case LL_IOC_LLOOP_INFO: {
643 if (lo->lo_backing_file == NULL) {
648 inode = lo->lo_backing_file->f_dentry->d_inode;
649 if (lo->lo_state == LLOOP_BOUND)
650 fid = ll_i2info(inode)->lli_fid;
654 if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
663 mutex_unlock(&lloop_mutex);
668 static struct block_device_operations lo_fops = {
669 .owner = THIS_MODULE,
671 .release = lo_release,
675 /* dynamic iocontrol callback.
676 * This callback is registered in lloop_init and will be called by
679 * This is a llite regular file ioctl function. It takes the responsibility
680 * of attaching or detaching a file by a lloop's device numner.
682 static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
683 unsigned int cmd, unsigned long arg,
684 void *magic, int *rcp)
686 struct lloop_device *lo = NULL;
687 struct block_device *bdev = NULL;
691 if (magic != ll_iocontrol_magic)
695 GOTO(out1, err = -ENODEV);
697 CWARN("Enter llop_ioctl\n");
699 mutex_lock(&lloop_mutex);
701 case LL_IOC_LLOOP_ATTACH: {
702 struct lloop_device *lo_free = NULL;
705 for (i = 0; i < max_loop; i++, lo = NULL) {
707 if (lo->lo_state == LLOOP_UNBOUND) {
712 if (lo->lo_backing_file->f_dentry->d_inode ==
713 file->f_dentry->d_inode)
717 GOTO(out, err = -EBUSY);
720 dev = MKDEV(lloop_major, lo->lo_number);
722 /* quit if the used pointer is writable */
723 if (put_user((long)old_encode_dev(dev), (long*)arg))
724 GOTO(out, err = -EFAULT);
726 bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
728 GOTO(out, err = PTR_ERR(bdev));
731 err = loop_set_fd(lo, NULL, bdev, file);
740 case LL_IOC_LLOOP_DETACH_BYDEV: {
743 dev = old_decode_dev(arg);
744 if (MAJOR(dev) != lloop_major)
745 GOTO(out, err = -EINVAL);
748 if (minor > max_loop - 1)
749 GOTO(out, err = -EINVAL);
751 lo = &loop_dev[minor];
752 if (lo->lo_state != LLOOP_BOUND)
753 GOTO(out, err = -EINVAL);
755 bdev = lo->lo_device;
756 err = loop_clr_fd(lo, bdev, 1);
758 blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
769 mutex_unlock(&lloop_mutex);
776 static int __init lloop_init(void)
779 unsigned int cmdlist[] = {
781 LL_IOC_LLOOP_DETACH_BYDEV,
784 if (max_loop < 1 || max_loop > 256) {
785 max_loop = MAX_LOOP_DEFAULT;
786 CWARN("lloop: invalid max_loop (must be between"
787 " 1 and 256), using default (%u)\n", max_loop);
790 lloop_major = register_blkdev(0, "lloop");
794 CDEBUG(D_CONFIG, "registered lloop major %d with %u minors\n",
795 lloop_major, max_loop);
797 ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
798 if (ll_iocontrol_magic == NULL)
801 OBD_ALLOC_WAIT(loop_dev, max_loop * sizeof(*loop_dev));
805 OBD_ALLOC_WAIT(disks, max_loop * sizeof(*disks));
809 for (i = 0; i < max_loop; i++) {
810 disks[i] = alloc_disk(1);
815 mutex_init(&lloop_mutex);
817 for (i = 0; i < max_loop; i++) {
818 struct lloop_device *lo = &loop_dev[i];
819 struct gendisk *disk = disks[i];
821 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
825 mutex_init(&lo->lo_ctl_mutex);
826 sema_init(&lo->lo_sem, 0);
827 init_waitqueue_head(&lo->lo_bh_wait);
829 spin_lock_init(&lo->lo_lock);
830 disk->major = lloop_major;
831 disk->first_minor = i;
832 disk->fops = &lo_fops;
833 sprintf(disk->disk_name, "lloop%d", i);
834 disk->private_data = lo;
835 disk->queue = lo->lo_queue;
838 /* We cannot fail after we call this, so another loop!*/
839 for (i = 0; i < max_loop; i++)
845 blk_cleanup_queue(loop_dev[i].lo_queue);
850 OBD_FREE(disks, max_loop * sizeof(*disks));
852 OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
854 unregister_blkdev(lloop_major, "lloop");
855 ll_iocontrol_unregister(ll_iocontrol_magic);
856 CERROR("lloop: ran out of memory\n");
860 static void lloop_exit(void)
864 ll_iocontrol_unregister(ll_iocontrol_magic);
865 for (i = 0; i < max_loop; i++) {
866 del_gendisk(disks[i]);
867 blk_cleanup_queue(loop_dev[i].lo_queue);
870 unregister_blkdev(lloop_major, "lloop");
872 OBD_FREE(disks, max_loop * sizeof(*disks));
873 OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
876 module_init(lloop_init);
877 module_exit(lloop_exit);
879 CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device");
880 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
881 MODULE_DESCRIPTION("Lustre virtual block device");
882 MODULE_LICENSE("GPL");