4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * linux/drivers/block/loop.c
40 * Written by Theodore Ts'o, 3/29/93
42 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
43 * permitted under the GNU General Public License.
45 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
46 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
48 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
50 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
52 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
54 * Loadable modules and other fixes by AK, 1998
56 * Maximum number of loop devices now dynamic via max_loop module parameter.
57 * Russell Kroll <rkroll@exploits.org> 19990701
59 * Maximum number of loop devices when compiled-in now selectable by passing
60 * max_loop=<1-255> to the kernel on boot.
61 * Erik I. Bols?, <eriki@himolde.no>, Oct 31, 1999
63 * Completely rewrite request handling to be make_request_fn style and
64 * non blocking, pushing work to a helper thread. Lots of fixes from
66 * Jens Axboe <axboe@suse.de>, Nov 2000
68 * Support up to 256 loop devices
69 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
71 * Support for falling back on the write file operation when the address space
72 * operations prepare_write and/or commit_write are not available on the
74 * Anton Altaparmakov, 16 Feb 2005
77 * - Advisory locking is ignored here.
78 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
82 #ifndef AUTOCONF_INCLUDED
83 #include <linux/config.h>
85 #include <linux/module.h>
87 #include <linux/sched.h>
89 #include <linux/file.h>
90 #include <linux/stat.h>
91 #include <linux/errno.h>
92 #include <linux/major.h>
93 #include <linux/wait.h>
94 #include <linux/blkdev.h>
95 #include <linux/blkpg.h>
96 #include <linux/init.h>
97 #include <linux/swap.h>
98 #include <linux/slab.h>
99 #include <linux/suspend.h>
100 #include <linux/writeback.h>
101 #include <linux/buffer_head.h> /* for invalidate_bdev() */
102 #include <linux/completion.h>
103 #include <linux/highmem.h>
104 #include <linux/gfp.h>
105 #include <linux/swap.h>
106 #include <linux/pagevec.h>
108 #include <asm/uaccess.h>
110 #include <lustre_lib.h>
111 #include <lustre_lite.h>
112 #include "llite_internal.h"
114 #define LLOOP_MAX_SEGMENTS PTLRPC_MAX_BRW_PAGES
116 /* Possible states of device */
123 struct lloop_device {
129 int (*ioctl)(struct lloop_device *, int cmd,
132 struct file *lo_backing_file;
133 struct block_device *lo_device;
134 unsigned lo_blocksize;
138 cfs_spinlock_t lo_lock;
140 struct bio *lo_biotail;
142 cfs_semaphore_t lo_sem;
143 cfs_mutex_t lo_ctl_mutex;
144 cfs_atomic_t lo_pending;
145 cfs_waitq_t lo_bh_wait;
147 struct request_queue *lo_queue;
149 const struct lu_env *lo_env;
151 struct ll_dio_pages lo_pvec;
153 /* data to handle bio for lustre. */
154 struct lo_request_data {
155 struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
156 loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
164 LO_FLAGS_READ_ONLY = 1,
167 static int lloop_major;
168 #define MAX_LOOP_DEFAULT 16
169 static int max_loop = MAX_LOOP_DEFAULT;
170 static struct lloop_device *loop_dev;
171 static struct gendisk **disks;
172 static cfs_mutex_t lloop_mutex;
173 static void *ll_iocontrol_magic = NULL;
175 static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
177 loff_t size, offset, loopsize;
179 /* Compute loopsize in bytes */
180 size = i_size_read(file->f_mapping->host);
181 offset = lo->lo_offset;
182 loopsize = size - offset;
183 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
184 loopsize = lo->lo_sizelimit;
187 * Unfortunately, if we want to do I/O on the device,
188 * the number of 512-byte sectors has to fit into a sector_t.
190 return loopsize >> 9;
193 static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
195 const struct lu_env *env = lo->lo_env;
196 struct cl_io *io = &lo->lo_io;
197 struct inode *inode = lo->lo_backing_file->f_dentry->d_inode;
198 struct cl_object *obj = ll_i2info(inode)->lli_clob;
203 obd_count page_count = 0;
204 struct bio_vec *bvec;
208 struct ll_dio_pages *pvec = &lo->lo_pvec;
209 struct page **pages = pvec->ldp_pages;
210 loff_t *offsets = pvec->ldp_offsets;
212 truncate_inode_pages(inode->i_mapping, 0);
214 /* initialize the IO */
215 memset(io, 0, sizeof(*io));
217 ret = cl_io_init(env, io, CIT_MISC, obj);
219 return io->ci_result;
220 io->ci_lockreq = CILR_NEVER;
222 LASSERT(head != NULL);
224 for (bio = head; bio != NULL; bio = bio->bi_next) {
225 LASSERT(rw == bio->bi_rw);
227 offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
228 bio_for_each_segment(bvec, bio, i) {
229 BUG_ON(bvec->bv_offset != 0);
230 BUG_ON(bvec->bv_len != CFS_PAGE_SIZE);
232 pages[page_count] = bvec->bv_page;
233 offsets[page_count] = offset;
235 offset += bvec->bv_len;
237 LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
240 ll_stats_ops_tally(ll_i2sbi(inode),
241 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
244 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
245 pvec->ldp_nr = page_count;
247 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
248 * write those pages into OST. Even worse case is that more pages
249 * would be asked to write out to swap space, and then finally get here
251 * Unfortunately this is NOT easy to fix.
252 * Thoughts on solution:
253 * 0. Define a reserved pool for cl_pages, which could be a list of
254 * pre-allocated cl_pages from cl_page_kmem;
255 * 1. Define a new operation in cl_object_operations{}, says clo_depth,
256 * which measures how many layers for this lustre object. Generally
257 * speaking, the depth would be 2, one for llite, and one for lovsub.
258 * However, for SNS, there will be more since we need additional page
260 * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
261 * pool. Afterwards, the clio would allocate the pages from reserved
262 * pool, this guarantees we neeedn't allocate the cl_pages from
263 * generic cl_page slab cache.
264 * Of course, if there is NOT enough pages in the pool, we might
265 * be asked to write less pages once, this purely depends on
266 * implementation. Anyway, we should be careful to avoid deadlocking.
268 LOCK_INODE_MUTEX(inode);
269 bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
270 UNLOCK_INODE_MUTEX(inode);
272 return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
276 * Add bio to back of pending list
278 static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
282 cfs_spin_lock_irqsave(&lo->lo_lock, flags);
283 if (lo->lo_biotail) {
284 lo->lo_biotail->bi_next = bio;
285 lo->lo_biotail = bio;
287 lo->lo_bio = lo->lo_biotail = bio;
288 cfs_spin_unlock_irqrestore(&lo->lo_lock, flags);
290 cfs_atomic_inc(&lo->lo_pending);
291 if (cfs_waitq_active(&lo->lo_bh_wait))
292 cfs_waitq_signal(&lo->lo_bh_wait);
296 * Grab first pending buffer
298 static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
302 unsigned int count = 0;
303 unsigned int page_count = 0;
306 cfs_spin_lock_irq(&lo->lo_lock);
308 if (unlikely(first == NULL)) {
309 cfs_spin_unlock_irq(&lo->lo_lock);
313 /* TODO: need to split the bio, too bad. */
314 LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
318 while (*bio && (*bio)->bi_rw == rw) {
319 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
320 (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
321 page_count, (*bio)->bi_vcnt);
322 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
326 page_count += (*bio)->bi_vcnt;
328 bio = &(*bio)->bi_next;
331 /* Some of bios can't be mergable. */
335 /* Hit the end of queue */
336 lo->lo_biotail = NULL;
340 cfs_spin_unlock_irq(&lo->lo_lock);
344 static int loop_make_request(struct request_queue *q, struct bio *old_bio)
346 struct lloop_device *lo = q->queuedata;
347 int rw = bio_rw(old_bio);
353 CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
354 (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
356 cfs_spin_lock_irq(&lo->lo_lock);
357 inactive = (lo->lo_state != LLOOP_BOUND);
358 cfs_spin_unlock_irq(&lo->lo_lock);
363 if (lo->lo_flags & LO_FLAGS_READ_ONLY)
365 } else if (rw == READA) {
367 } else if (rw != READ) {
368 CERROR("lloop: unknown command (%x)\n", rw);
371 loop_add_bio(lo, old_bio);
374 cfs_bio_io_error(old_bio, old_bio->bi_size);
378 #ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN
380 * kick off io on the underlying address space
382 static void loop_unplug(struct request_queue *q)
384 struct lloop_device *lo = q->queuedata;
386 clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
387 blk_run_address_space(lo->lo_backing_file->f_mapping);
391 static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
394 ret = do_bio_lustrebacked(lo, bio);
396 struct bio *tmp = bio->bi_next;
398 cfs_bio_endio(bio, bio->bi_size, ret);
403 static inline int loop_active(struct lloop_device *lo)
405 return cfs_atomic_read(&lo->lo_pending) ||
406 (lo->lo_state == LLOOP_RUNDOWN);
410 * worker thread that handles reads/writes to file backed loop devices,
411 * to avoid blocking in our make_request_fn.
413 static int loop_thread(void *data)
415 struct lloop_device *lo = data;
418 unsigned long times = 0;
419 unsigned long total_count = 0;
425 daemonize("lloop%d", lo->lo_number);
427 set_user_nice(current, -20);
429 lo->lo_state = LLOOP_BOUND;
431 env = cl_env_get(&refcheck);
433 GOTO(out, ret = PTR_ERR(env));
436 memset(&lo->lo_pvec, 0, sizeof(lo->lo_pvec));
437 lo->lo_pvec.ldp_pages = lo->lo_requests[0].lrd_pages;
438 lo->lo_pvec.ldp_offsets = lo->lo_requests[0].lrd_offsets;
441 * up sem, we are running
446 cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
447 if (!cfs_atomic_read(&lo->lo_pending)) {
449 cfs_spin_lock_irq(&lo->lo_lock);
450 exiting = (lo->lo_state == LLOOP_RUNDOWN);
451 cfs_spin_unlock_irq(&lo->lo_lock);
457 count = loop_get_bio(lo, &bio);
459 CWARN("lloop(minor: %d): missing bio\n", lo->lo_number);
463 total_count += count;
464 if (total_count < count) { /* overflow */
470 if ((times & 127) == 0) {
471 CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
472 total_count, times, total_count / times);
475 LASSERT(bio != NULL);
476 LASSERT(count <= cfs_atomic_read(&lo->lo_pending));
477 loop_handle_bio(lo, bio);
478 cfs_atomic_sub(count, &lo->lo_pending);
480 cl_env_put(env, &refcheck);
487 static int loop_set_fd(struct lloop_device *lo, struct file *unused,
488 struct block_device *bdev, struct file *file)
491 struct address_space *mapping;
496 if (!cfs_try_module_get(THIS_MODULE))
500 if (lo->lo_state != LLOOP_UNBOUND)
503 mapping = file->f_mapping;
504 inode = mapping->host;
507 if (!S_ISREG(inode->i_mode) || inode->i_sb->s_magic != LL_SUPER_MAGIC)
510 if (!(file->f_mode & FMODE_WRITE))
511 lo_flags |= LO_FLAGS_READ_ONLY;
513 size = get_loop_size(lo, file);
515 if ((loff_t)(sector_t)size != size) {
520 /* remove all pages in cache so as dirty pages not to be existent. */
521 truncate_inode_pages(mapping, 0);
523 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
525 lo->lo_blocksize = CFS_PAGE_SIZE;
526 lo->lo_device = bdev;
527 lo->lo_flags = lo_flags;
528 lo->lo_backing_file = file;
530 lo->lo_sizelimit = 0;
531 lo->old_gfp_mask = mapping_gfp_mask(mapping);
532 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
534 lo->lo_bio = lo->lo_biotail = NULL;
537 * set queue make_request_fn, and add limits based on lower level
540 blk_queue_make_request(lo->lo_queue, loop_make_request);
541 lo->lo_queue->queuedata = lo;
542 #ifdef HAVE_REQUEST_QUEUE_UNPLUG_FN
543 lo->lo_queue->unplug_fn = loop_unplug;
546 /* queue parameters */
547 CLASSERT(CFS_PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
548 blk_queue_logical_block_size(lo->lo_queue,
549 (unsigned short)CFS_PAGE_SIZE);
550 blk_queue_max_hw_sectors(lo->lo_queue,
551 LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9));
552 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
554 set_capacity(disks[lo->lo_number], size);
555 bd_set_size(bdev, size << 9);
557 set_blocksize(bdev, lo->lo_blocksize);
559 cfs_create_thread(loop_thread, lo, CLONE_KERNEL);
560 cfs_down(&lo->lo_sem);
564 /* This is safe: open() is still holding a reference. */
565 cfs_module_put(THIS_MODULE);
569 static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
572 struct file *filp = lo->lo_backing_file;
573 int gfp = lo->old_gfp_mask;
575 if (lo->lo_state != LLOOP_BOUND)
578 if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */
584 cfs_spin_lock_irq(&lo->lo_lock);
585 lo->lo_state = LLOOP_RUNDOWN;
586 cfs_spin_unlock_irq(&lo->lo_lock);
587 cfs_waitq_signal(&lo->lo_bh_wait);
589 cfs_down(&lo->lo_sem);
590 lo->lo_backing_file = NULL;
592 lo->lo_device = NULL;
594 lo->lo_sizelimit = 0;
596 ll_invalidate_bdev(bdev, 0);
597 set_capacity(disks[lo->lo_number], 0);
598 bd_set_size(bdev, 0);
599 mapping_set_gfp_mask(filp->f_mapping, gfp);
600 lo->lo_state = LLOOP_UNBOUND;
602 /* This is safe: open() is still holding a reference. */
603 cfs_module_put(THIS_MODULE);
607 #ifdef HAVE_BLKDEV_PUT_2ARGS
608 static int lo_open(struct block_device *bdev, fmode_t mode)
610 struct lloop_device *lo = bdev->bd_disk->private_data;
612 static int lo_open(struct inode *inode, struct file *file)
614 struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
617 cfs_mutex_lock(&lo->lo_ctl_mutex);
619 cfs_mutex_unlock(&lo->lo_ctl_mutex);
624 #ifdef HAVE_BLKDEV_PUT_2ARGS
625 static int lo_release(struct gendisk *disk, fmode_t mode)
627 struct lloop_device *lo = disk->private_data;
629 static int lo_release(struct inode *inode, struct file *file)
631 struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
634 cfs_mutex_lock(&lo->lo_ctl_mutex);
636 cfs_mutex_unlock(&lo->lo_ctl_mutex);
641 /* lloop device node's ioctl function. */
642 #ifdef HAVE_BLKDEV_PUT_2ARGS
643 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
644 unsigned int cmd, unsigned long arg)
646 struct lloop_device *lo = bdev->bd_disk->private_data;
647 struct inode *inode = NULL;
650 static int lo_ioctl(struct inode *inode, struct file *unused,
651 unsigned int cmd, unsigned long arg)
653 struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
654 struct block_device *bdev = inode->i_bdev;
658 cfs_mutex_lock(&lloop_mutex);
660 case LL_IOC_LLOOP_DETACH: {
661 err = loop_clr_fd(lo, bdev, 2);
663 ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
667 case LL_IOC_LLOOP_INFO: {
670 LASSERT(lo->lo_backing_file != NULL);
672 inode = lo->lo_backing_file->f_dentry->d_inode;
673 if (lo->lo_state == LLOOP_BOUND)
674 fid = ll_i2info(inode)->lli_fid;
678 if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
687 cfs_mutex_unlock(&lloop_mutex);
692 static struct block_device_operations lo_fops = {
693 .owner = THIS_MODULE,
695 .release = lo_release,
699 /* dynamic iocontrol callback.
700 * This callback is registered in lloop_init and will be called by
703 * This is a llite regular file ioctl function. It takes the responsibility
704 * of attaching or detaching a file by a lloop's device numner.
706 static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
707 unsigned int cmd, unsigned long arg,
708 void *magic, int *rcp)
710 struct lloop_device *lo = NULL;
711 struct block_device *bdev = NULL;
715 if (magic != ll_iocontrol_magic)
719 GOTO(out1, err = -ENODEV);
721 CWARN("Enter llop_ioctl\n");
723 cfs_mutex_lock(&lloop_mutex);
725 case LL_IOC_LLOOP_ATTACH: {
726 struct lloop_device *lo_free = NULL;
729 for (i = 0; i < max_loop; i++, lo = NULL) {
731 if (lo->lo_state == LLOOP_UNBOUND) {
736 if (lo->lo_backing_file->f_dentry->d_inode ==
737 file->f_dentry->d_inode)
741 GOTO(out, err = -EBUSY);
744 dev = MKDEV(lloop_major, lo->lo_number);
746 /* quit if the used pointer is writable */
747 if (put_user((long)old_encode_dev(dev), (long*)arg))
748 GOTO(out, err = -EFAULT);
750 bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
752 GOTO(out, err = PTR_ERR(bdev));
755 err = loop_set_fd(lo, NULL, bdev, file);
758 ll_blkdev_put(bdev, 0);
764 case LL_IOC_LLOOP_DETACH_BYDEV: {
767 dev = old_decode_dev(arg);
768 if (MAJOR(dev) != lloop_major)
769 GOTO(out, err = -EINVAL);
772 if (minor > max_loop - 1)
773 GOTO(out, err = -EINVAL);
775 lo = &loop_dev[minor];
776 if (lo->lo_state != LLOOP_BOUND)
777 GOTO(out, err = -EINVAL);
779 bdev = lo->lo_device;
780 err = loop_clr_fd(lo, bdev, 1);
782 ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
793 cfs_mutex_unlock(&lloop_mutex);
800 static int __init lloop_init(void)
803 unsigned int cmdlist[] = {
805 LL_IOC_LLOOP_DETACH_BYDEV,
808 if (max_loop < 1 || max_loop > 256) {
809 max_loop = MAX_LOOP_DEFAULT;
810 CWARN("lloop: invalid max_loop (must be between"
811 " 1 and 256), using default (%u)\n", max_loop);
814 lloop_major = register_blkdev(0, "lloop");
818 CDEBUG(D_CONFIG, "registered lloop major %d with %u minors\n",
819 lloop_major, max_loop);
821 ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
822 if (ll_iocontrol_magic == NULL)
825 OBD_ALLOC_WAIT(loop_dev, max_loop * sizeof(*loop_dev));
829 OBD_ALLOC_WAIT(disks, max_loop * sizeof(*disks));
833 for (i = 0; i < max_loop; i++) {
834 disks[i] = alloc_disk(1);
839 cfs_mutex_init(&lloop_mutex);
841 for (i = 0; i < max_loop; i++) {
842 struct lloop_device *lo = &loop_dev[i];
843 struct gendisk *disk = disks[i];
845 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
849 cfs_mutex_init(&lo->lo_ctl_mutex);
850 cfs_sema_init(&lo->lo_sem, 0);
851 cfs_waitq_init(&lo->lo_bh_wait);
853 cfs_spin_lock_init(&lo->lo_lock);
854 disk->major = lloop_major;
855 disk->first_minor = i;
856 disk->fops = &lo_fops;
857 sprintf(disk->disk_name, "lloop%d", i);
858 disk->private_data = lo;
859 disk->queue = lo->lo_queue;
862 /* We cannot fail after we call this, so another loop!*/
863 for (i = 0; i < max_loop; i++)
869 blk_cleanup_queue(loop_dev[i].lo_queue);
874 OBD_FREE(disks, max_loop * sizeof(*disks));
876 OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
878 unregister_blkdev(lloop_major, "lloop");
879 ll_iocontrol_unregister(ll_iocontrol_magic);
880 CERROR("lloop: ran out of memory\n");
884 static void lloop_exit(void)
888 ll_iocontrol_unregister(ll_iocontrol_magic);
889 for (i = 0; i < max_loop; i++) {
890 del_gendisk(disks[i]);
891 blk_cleanup_queue(loop_dev[i].lo_queue);
894 if (ll_unregister_blkdev(lloop_major, "lloop"))
895 CWARN("lloop: cannot unregister blkdev\n");
897 CDEBUG(D_CONFIG, "unregistered lloop major %d\n", lloop_major);
899 OBD_FREE(disks, max_loop * sizeof(*disks));
900 OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
903 module_init(lloop_init);
904 module_exit(lloop_exit);
906 CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device");
907 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
908 MODULE_DESCRIPTION("Lustre virtual block device");
909 MODULE_LICENSE("GPL");