4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/lvfs/fsfilt_ext3.c
38 * Author: Andreas Dilger <adilger@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_FILTER
43 #include <linux/init.h>
44 #include <linux/module.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <ext4/ext4.h>
49 #include <ext4/ext4_jbd2.h>
50 #include <linux/version.h>
51 #include <linux/bitops.h>
52 #include <linux/quota.h>
54 #include <libcfs/libcfs.h>
55 #include <lustre_fsfilt.h>
57 #include <linux/lustre_compat25.h>
58 #include <linux/lprocfs_status.h>
60 #include <ext4/ext4_extents.h>
62 /* for kernels 2.6.18 and later */
63 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
65 #define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
66 ext3_ext_insert_extent(handle, inode, path, newext, flag)
68 #define ext3_mb_discard_inode_preallocations(inode) \
69 ext3_discard_preallocations(inode)
71 #define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
72 #define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
74 static cfs_mem_cache_t *fcb_cache;
76 struct fsfilt_cb_data {
77 struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
78 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
79 struct obd_device *cb_obd; /* MDS/OBD completion device */
80 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
81 void *cb_data; /* MDS/OST completion function data */
84 static char *fsfilt_ext3_get_label(struct super_block *sb)
86 return EXT3_SB(sb)->s_es->s_volume_name;
89 /* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
90 #ifdef HAVE_BLOCKS_FOR_TRUNCATE
91 # include <ext4/truncate.h>
93 static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
97 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
100 if (needed > EXT4_MAX_TRANS_DATA)
101 needed = EXT4_MAX_TRANS_DATA;
102 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
107 * We don't currently need any additional blocks for rmdir and
108 * unlink transactions because we are storing the OST oa_id inside
109 * the inode (which we will be changing anyways as part of this
112 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
115 /* For updates to the last received file */
116 int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
120 if (current->journal_info) {
121 CDEBUG(D_INODE, "increasing refcount on %p\n",
122 current->journal_info);
127 case FSFILT_OP_UNLINK:
128 /* delete one file + create/update logs for each stripe */
129 nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
130 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
131 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
133 case FSFILT_OP_CANCEL_UNLINK:
136 /* blocks for log header bitmap update OR
137 * blocks for catalog header bitmap update + unlink of logs +
138 * blocks for delete the inode (include blocks truncating). */
139 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
140 EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
141 ext4_blocks_for_truncate(inode) + 3;
143 default: CERROR("unknown transaction start op %d\n", op);
147 LASSERT(current->journal_info == desc_private);
148 journal = EXT3_SB(inode->i_sb)->s_journal;
149 if (nblocks > journal->j_max_transaction_buffers) {
150 CWARN("too many credits %d for op %ux%u using %d instead\n",
151 nblocks, op, logs, journal->j_max_transaction_buffers);
152 nblocks = journal->j_max_transaction_buffers;
156 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
157 handle = ext3_journal_start(inode, nblocks);
160 LASSERT(current->journal_info == handle);
162 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
163 op, nblocks, PTR_ERR(handle));
167 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
170 handle_t *handle = h;
172 LASSERT(current->journal_info == handle);
174 handle->h_sync = 1; /* recovery likes this */
176 rc = ext3_journal_stop(handle);
181 #ifndef EXT3_EXTENTS_FL
182 #define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
186 #define EXT_ASSERT(cond) BUG_ON(!(cond))
189 #define EXT_GENERATION(inode) (EXT4_I(inode)->i_ext_generation)
190 #define ext3_ext_base inode
191 #define ext3_ext_base2inode(inode) (inode)
192 #define EXT_DEPTH(inode) ext_depth(inode)
193 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
194 ext3_ext_walk_space(inode, block, num, cb, cbdata);
197 unsigned long *blocks;
205 static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
206 unsigned long block, int *aflags)
208 struct ext3_inode_info *ei = EXT3_I(inode);
209 unsigned long bg_start;
210 unsigned long colour;
214 struct ext3_extent *ex;
215 depth = path->p_depth;
217 /* try to predict block placement */
218 if ((ex = path[depth].p_ext))
219 return ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
221 /* it looks index is empty
222 * try to find starting from index itself */
223 if (path[depth].p_bh)
224 return path[depth].p_bh->b_blocknr;
227 /* OK. use inode's group */
228 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
229 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
230 colour = (current->pid % 16) *
231 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
232 return bg_start + colour + block;
235 #define ll_unmap_underlying_metadata(sb, blocknr) \
236 unmap_underlying_metadata((sb)->s_bdev, blocknr)
238 #ifndef EXT3_MB_HINT_GROUP_ALLOC
239 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
240 struct ext3_ext_path *path, unsigned long block,
241 unsigned long *count, int *err)
243 unsigned long pblock, goal;
245 struct inode *inode = ext3_ext_base2inode(base);
247 goal = ext3_ext_find_goal(inode, path, block, &aflags);
248 aflags |= 2; /* block have been already reserved */
249 pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
254 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
255 struct ext3_ext_path *path, unsigned long block,
256 unsigned long *count, int *err)
258 struct inode *inode = ext3_ext_base2inode(base);
259 struct ext3_allocation_request ar;
260 unsigned long pblock;
263 /* find neighbour allocated blocks */
265 *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
269 *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
273 /* allocate new block */
274 ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
278 ar.flags = EXT3_MB_HINT_DATA;
279 pblock = ext3_mb_new_blocks(handle, &ar, err);
285 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
286 struct ext3_ext_path *path,
287 struct ext3_ext_cache *cex,
288 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
289 struct ext3_extent *ex,
293 struct bpointers *bp = cbdata;
294 struct inode *inode = ext3_ext_base2inode(base);
295 struct ext3_extent nex;
296 unsigned long pblock;
302 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
307 if (bp->create == 0) {
309 if (cex->ec_block < bp->start)
310 i = bp->start - cex->ec_block;
311 if (i >= cex->ec_len)
312 CERROR("nothing to do?! i = %d, e_num = %u\n",
314 for (; i < cex->ec_len && bp->num; i++) {
326 tgen = EXT_GENERATION(base);
327 count = ext3_ext_calc_credits_for_insert(base, path);
329 handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
330 if (IS_ERR(handle)) {
331 return PTR_ERR(handle);
334 if (tgen != EXT_GENERATION(base)) {
335 /* the tree has changed. so path can be invalid at moment */
336 ext3_journal_stop(handle);
340 /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
341 * protected by i_data_sem as whole. so we patch it to store
342 * generation to path and now verify the tree hasn't changed */
343 down_write((&EXT4_I(inode)->i_data_sem));
345 /* validate extent, make sure the extent tree does not changed */
346 if (EXT_GENERATION(base) != path[0].p_generation) {
347 /* cex is invalid, try again */
348 up_write(&EXT4_I(inode)->i_data_sem);
349 ext3_journal_stop(handle);
354 pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
357 EXT_ASSERT(count <= cex->ec_len);
359 /* insert new extent */
360 nex.ee_block = cpu_to_le32(cex->ec_block);
361 ext3_ext_store_pblock(&nex, pblock);
362 nex.ee_len = cpu_to_le16(count);
363 err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
365 /* free data blocks we just allocated */
366 /* not a good idea to call discard here directly,
367 * but otherwise we'd need to call it every free() */
368 #ifdef EXT3_MB_HINT_GROUP_ALLOC
369 ext3_mb_discard_inode_preallocations(inode);
371 ext3_free_blocks(handle, inode, ext_pblock(&nex),
372 cpu_to_le16(nex.ee_len), 0);
377 * Putting len of the actual extent we just inserted,
378 * we are asking ext3_ext_walk_space() to continue
379 * scaning after that block
381 cex->ec_len = le16_to_cpu(nex.ee_len);
382 cex->ec_start = ext_pblock(&nex);
383 BUG_ON(le16_to_cpu(nex.ee_len) == 0);
384 BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
387 up_write((&EXT4_I(inode)->i_data_sem));
388 ext3_journal_stop(handle);
393 CERROR("hmm. why do we find this extent?\n");
394 CERROR("initial space: %lu:%u\n",
395 bp->start, bp->init_num);
396 CERROR("current extent: %u/%u/%llu %d\n",
397 cex->ec_block, cex->ec_len,
398 (unsigned long long)cex->ec_start,
402 if (cex->ec_block < bp->start)
403 i = bp->start - cex->ec_block;
404 if (i >= cex->ec_len)
405 CERROR("nothing to do?! i = %d, e_num = %u\n",
407 for (; i < cex->ec_len && bp->num; i++) {
408 *(bp->blocks) = cex->ec_start + i;
409 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
413 /* unmap any possible underlying metadata from
414 * the block device mapping. bug 6998. */
415 ll_unmap_underlying_metadata(inode->i_sb,
427 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
428 unsigned long num, unsigned long *blocks,
429 int *created, int create)
431 struct ext3_ext_base *base = inode;
435 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
436 block, block + num - 1, (unsigned) inode->i_ino);
439 bp.created = created;
441 bp.init_num = bp.num = num;
444 err = fsfilt_ext3_ext_walk_space(base, block, num,
445 ext3_ext_new_extent_cb, &bp);
446 ext3_ext_invalidate_cache(base);
451 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
452 int pages, unsigned long *blocks,
453 int *created, int create)
455 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
457 struct page *fp = NULL;
460 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
461 inode->i_ino, pages, (*page)->index);
463 /* pages are sorted already. so, we just have to find
464 * contig. space and process them properly */
467 /* start new extent */
472 } else if (fp->index + clen == (*page)->index) {
473 /* continue the extent */
480 /* process found extent */
481 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
482 clen * blocks_per_page, blocks,
487 /* look for next extent */
489 blocks += blocks_per_page * clen;
490 created += blocks_per_page * clen;
494 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
495 clen * blocks_per_page, blocks,
501 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
502 unsigned long *blocks, int *created, int create);
503 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
504 int pages, unsigned long *blocks,
505 int *created, int create)
507 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
511 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
512 rc = ext3_map_inode_page(inode, *page, b, cr, create);
514 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
515 inode->i_ino, *b, *cr, create, rc);
519 b += blocks_per_page;
520 cr += blocks_per_page;
525 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
526 int pages, unsigned long *blocks,
527 int *created, int create,
528 struct mutex *optional_mutex)
532 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
533 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
534 blocks, created, create);
537 if (optional_mutex != NULL)
538 mutex_lock(optional_mutex);
539 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
541 if (optional_mutex != NULL)
542 mutex_unlock(optional_mutex);
547 int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
550 struct buffer_head *bh;
551 int err, blocksize, csize, boffs, osize = size;
553 /* prevent reading after eof */
554 spin_lock(&inode->i_lock);
555 if (i_size_read(inode) < *offs + size) {
556 size = i_size_read(inode) - *offs;
557 spin_unlock(&inode->i_lock);
559 CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
560 i_size_read(inode), *offs);
562 } else if (size == 0) {
566 spin_unlock(&inode->i_lock);
569 blocksize = 1 << inode->i_blkbits;
572 block = *offs >> inode->i_blkbits;
573 boffs = *offs & (blocksize - 1);
574 csize = min(blocksize - boffs, size);
575 bh = ext3_bread(NULL, inode, block, 0, &err);
577 CERROR("can't read block: %d\n", err);
581 memcpy(buf, bh->b_data + boffs, csize);
590 EXPORT_SYMBOL(fsfilt_ext3_read);
592 static int fsfilt_ext3_read_record(struct file * file, void *buf,
593 int size, loff_t *offs)
596 rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
602 int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
603 loff_t *offs, handle_t *handle)
605 struct buffer_head *bh = NULL;
606 loff_t old_size = i_size_read(inode), offset = *offs;
607 loff_t new_size = i_size_read(inode);
609 int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
611 while (bufsize > 0) {
615 block = offset >> inode->i_blkbits;
616 boffs = offset & (blocksize - 1);
617 size = min(blocksize - boffs, bufsize);
618 bh = ext3_bread(handle, inode, block, 1, &err);
620 CERROR("can't read/create block: %d\n", err);
624 err = ext3_journal_get_write_access(handle, bh);
626 CERROR("journal_get_write_access() returned error %d\n",
630 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
631 memcpy(bh->b_data + boffs, buf, size);
632 err = ext3_journal_dirty_metadata(handle, bh);
634 CERROR("journal_dirty_metadata() returned error %d\n",
638 if (offset + size > new_size)
639 new_size = offset + size;
647 /* correct in-core and on-disk sizes */
648 if (new_size > i_size_read(inode)) {
649 spin_lock(&inode->i_lock);
650 if (new_size > i_size_read(inode))
651 i_size_write(inode, new_size);
652 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
653 EXT3_I(inode)->i_disksize = i_size_read(inode);
654 if (i_size_read(inode) > old_size) {
655 spin_unlock(&inode->i_lock);
656 mark_inode_dirty(inode);
658 spin_unlock(&inode->i_lock);
666 EXPORT_SYMBOL(fsfilt_ext3_write_handle);
668 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
669 loff_t *offs, int force_sync)
671 struct inode *inode = file->f_dentry->d_inode;
673 int err, block_count = 0, blocksize;
675 /* Determine how many transaction credits are needed */
676 blocksize = 1 << inode->i_blkbits;
677 block_count = (*offs & (blocksize - 1)) + bufsize;
678 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
680 handle = ext3_journal_start(inode,
681 block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
682 if (IS_ERR(handle)) {
683 CERROR("can't start transaction for %d blocks (%d bytes)\n",
684 block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
686 return PTR_ERR(handle);
689 err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
691 if (!err && force_sync)
692 handle->h_sync = 1; /* recovery likes this */
694 ext3_journal_stop(handle);
699 static int fsfilt_ext3_setup(struct super_block *sb)
701 if (!EXT3_HAS_COMPAT_FEATURE(sb,
702 EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
703 CERROR("ext3 mounted without journal\n");
708 CWARN("Enabling PDIROPS\n");
709 set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
710 sb->s_flags |= S_PDIROPS;
712 if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
713 CWARN("filesystem doesn't have dir_index feature enabled\n");
716 static struct fsfilt_operations fsfilt_ext3_ops = {
718 .fs_owner = THIS_MODULE,
719 .fs_getlabel = fsfilt_ext3_get_label,
720 .fs_start = fsfilt_ext3_start,
721 .fs_commit = fsfilt_ext3_commit,
722 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
723 .fs_write_record = fsfilt_ext3_write_record,
724 .fs_read_record = fsfilt_ext3_read_record,
725 .fs_setup = fsfilt_ext3_setup,
728 static int __init fsfilt_ext3_init(void)
732 fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
733 sizeof(struct fsfilt_cb_data), 0, 0);
735 CERROR("error allocating fsfilt journal callback cache\n");
736 GOTO(out, rc = -ENOMEM);
739 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
742 int err = cfs_mem_cache_destroy(fcb_cache);
743 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
749 static void __exit fsfilt_ext3_exit(void)
753 fsfilt_unregister_ops(&fsfilt_ext3_ops);
754 rc = cfs_mem_cache_destroy(fcb_cache);
755 LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
758 module_init(fsfilt_ext3_init);
759 module_exit(fsfilt_ext3_exit);
761 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
762 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
763 MODULE_LICENSE("GPL");