1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lib/fsfilt_ext3.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_FILTER
28 #include <linux/init.h>
29 #include <linux/module.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #include <linux/bitops.h>
39 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
40 #include <linux/ext3_xattr.h>
42 #include <ext3/xattr.h>
45 #include <libcfs/kp30.h>
46 #include <linux/lustre_fsfilt.h>
47 #include <linux/obd.h>
48 #include <linux/obd_class.h>
49 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
50 #include <linux/iobuf.h>
53 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
54 #include <linux/ext3_extents.h>
57 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
58 # define lock_24kernel() lock_kernel()
59 # define unlock_24kernel() unlock_kernel()
61 # define lock_24kernel() do {} while (0)
62 # define unlock_24kernel() do {} while (0)
65 static kmem_cache_t *fcb_cache;
67 struct fsfilt_cb_data {
68 struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
69 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
70 struct obd_device *cb_obd; /* MDS/OBD completion device */
71 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
72 void *cb_data; /* MDS/OST completion function data */
75 #ifndef EXT3_XATTR_INDEX_TRUSTED /* temporary until we hit l28 kernel */
76 #define EXT3_XATTR_INDEX_TRUSTED 4
78 #define XATTR_LUSTRE_MDS_LOV_EA "lov"
81 * We don't currently need any additional blocks for rmdir and
82 * unlink transactions because we are storing the OST oa_id inside
83 * the inode (which we will be changing anyways as part of this
86 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
89 /* For updates to the last received file */
90 int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
94 if (current->journal_info) {
95 CDEBUG(D_INODE, "increasing refcount on %p\n",
96 current->journal_info);
101 case FSFILT_OP_RMDIR:
102 case FSFILT_OP_UNLINK:
103 /* delete one file + create/update logs for each stripe */
104 nblocks += EXT3_DELETE_TRANS_BLOCKS;
105 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
106 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
108 case FSFILT_OP_RENAME:
109 /* modify additional directory */
110 nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
111 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
112 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
114 case FSFILT_OP_SYMLINK:
115 /* additional block + block bitmap + GDT for long symlink */
118 case FSFILT_OP_CREATE: {
119 #if defined(EXT3_EXTENTS_FL) && defined(EXT3_INDEX_FL)
122 if (!test_opt(inode->i_sb, EXTENTS)) {
124 } else if (((EXT3_I(inode)->i_flags &
125 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL)) ==
126 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL))) {
127 CWARN("extent-mapped directory found - contact "
128 "CFS: support@clusterfs.com\n");
133 /* create/update logs for each stripe */
134 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
135 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
138 case FSFILT_OP_MKDIR:
139 case FSFILT_OP_MKNOD:
140 /* modify one inode + block bitmap + GDT */
144 /* modify parent directory */
145 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
146 EXT3_DATA_TRANS_BLOCKS;
148 case FSFILT_OP_SETATTR:
149 /* Setattr on inode */
152 case FSFILT_OP_CANCEL_UNLINK:
153 /* blocks for log header bitmap update OR
154 * blocks for catalog header bitmap update + unlink of logs */
155 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
156 EXT3_DELETE_TRANS_BLOCKS * logs;
158 default: CERROR("unknown transaction start op %d\n", op);
162 LASSERT(current->journal_info == desc_private);
163 journal = EXT3_SB(inode->i_sb)->s_journal;
164 if (nblocks > journal->j_max_transaction_buffers) {
165 CERROR("too many credits %d for op %ux%u using %d instead\n",
166 nblocks, op, logs, journal->j_max_transaction_buffers);
167 nblocks = journal->j_max_transaction_buffers;
171 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
173 handle = journal_start(EXT3_JOURNAL(inode), nblocks);
177 LASSERT(current->journal_info == handle);
179 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
180 op, nblocks, PTR_ERR(handle));
185 * Calculate the number of buffer credits needed to write multiple pages in
186 * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
187 * doesn't have a nice API for calculating this sort of thing in advance.
189 * See comment above ext3_writepage_trans_blocks for details. We assume
190 * no data journaling is being done, but it does allow for all of the pages
191 * being non-contiguous. If we are guaranteed contiguous pages we could
192 * reduce the number of (d)indirect blocks a lot.
194 * With N blocks per page and P pages, for each inode we have at most:
196 * min(N*P, blocksize/4 + 1) dindirect blocks
199 * For the entire filesystem, we have at most:
200 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
201 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
202 * objcount inode blocks
204 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
206 * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
208 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
209 int niocount, struct niobuf_local *nb)
211 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
213 const int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
214 int nbitmaps = 0, ngdblocks;
215 int needed = objcount + 1; /* inodes + superblock */
218 for (i = 0, j = 0; i < objcount; i++, fso++) {
219 /* two or more dindirect blocks in case we cross boundary */
220 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
222 sb->s_blocksize_bits) /
223 (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
224 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
226 /* leaf, indirect, tindirect blocks for first block */
227 nbitmaps += blockpp + 2;
229 j += fso->fso_bufcnt;
232 next_indir = nb[0].offset +
233 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
234 for (i = 1; i < niocount; i++) {
235 if (nb[i].offset >= next_indir) {
236 nbitmaps++; /* additional indirect */
237 next_indir = nb[i].offset +
238 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
239 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
240 nbitmaps++; /* additional indirect */
242 nbitmaps += blockpp; /* each leaf in different group? */
245 ngdblocks = nbitmaps;
246 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
247 nbitmaps = EXT3_SB(sb)->s_groups_count;
248 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
249 ngdblocks = EXT3_SB(sb)->s_gdb_count;
251 needed += nbitmaps + ngdblocks;
253 /* last_rcvd update */
254 needed += EXT3_DATA_TRANS_BLOCKS;
256 #if defined(CONFIG_QUOTA)
257 /* We assume that there will be 1 bit set in s_dquot.flags for each
258 * quota file that is active. This is at least true for now.
260 needed += hweight32(sb_any_quota_enabled(sb)) *
261 EXT3_SINGLEDATA_TRANS_BLOCKS;
267 /* We have to start a huge journal transaction here to hold all of the
268 * metadata for the pages being written here. This is necessitated by
269 * the fact that we do lots of prepare_write operations before we do
270 * any of the matching commit_write operations, so even if we split
271 * up to use "smaller" transactions none of them could complete until
272 * all of them were opened. By having a single journal transaction,
273 * we eliminate duplicate reservations for common blocks like the
274 * superblock and group descriptors or bitmaps.
276 * We will start the transaction here, but each prepare_write will
277 * add a refcount to the transaction, and each commit_write will
278 * remove a refcount. The transaction will be closed when all of
279 * the pages have been written.
281 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
282 int niocount, struct niobuf_local *nb,
283 void *desc_private, int logs)
290 LASSERT(current->journal_info == desc_private);
291 journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
292 needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
294 /* The number of blocks we could _possibly_ dirty can very large.
295 * We reduce our request if it is absurd (and we couldn't get that
296 * many credits for a single handle anyways).
298 * At some point we have to limit the size of I/Os sent at one time,
299 * increase the size of the journal, or we have to calculate the
300 * actual journal requirements more carefully by checking all of
301 * the blocks instead of being maximally pessimistic. It remains to
302 * be seen if this is a real problem or not.
304 if (needed > journal->j_max_transaction_buffers) {
305 CERROR("want too many journal credits (%d) using %d instead\n",
306 needed, journal->j_max_transaction_buffers);
307 needed = journal->j_max_transaction_buffers;
310 LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
312 handle = journal_start(journal, needed);
314 if (IS_ERR(handle)) {
315 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
318 LASSERT(handle->h_buffer_credits >= needed);
319 LASSERT(current->journal_info == handle);
325 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
328 handle_t *handle = h;
330 LASSERT(current->journal_info == handle);
332 handle->h_sync = 1; /* recovery likes this */
335 rc = journal_stop(handle);
341 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
345 transaction_t *transaction;
346 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
349 handle_t *handle = h;
353 LASSERT(current->journal_info == handle);
356 transaction = handle->h_transaction;
357 journal = transaction->t_journal;
358 tid = transaction->t_tid;
359 /* we don't want to be blocked */
361 rc = journal_stop(handle);
363 CERROR("error while stopping transaction: %d\n", rc);
367 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
368 rtid = log_start_commit(journal, transaction);
370 CERROR("strange race: %lu != %lu\n",
371 (unsigned long) tid, (unsigned long) rtid);
373 log_start_commit(journal, transaction->t_tid);
377 *wait_handle = (void *) tid;
378 CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
382 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
384 tid_t tid = (tid_t)(long)h;
386 CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
387 if (is_journal_aborted(EXT3_JOURNAL(inode)))
390 log_wait_commit(EXT3_JOURNAL(inode), tid);
395 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
396 struct iattr *iattr, int do_trunc)
398 struct inode *inode = dentry->d_inode;
403 /* A _really_ horrible hack to avoid removing the data stored
404 * in the block pointers; this is really the "small" stripe MD data.
405 * We can avoid further hackery by virtue of the MDS file size being
406 * zero all the time (which doesn't invoke block truncate at unlink
407 * time), so we assert we never change the MDS file size from zero. */
408 if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
409 /* ATTR_SIZE would invoke truncate: clear it */
410 iattr->ia_valid &= ~ATTR_SIZE;
411 EXT3_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
413 /* make sure _something_ gets set - so new inode
414 * goes to disk (probably won't work over XFS */
415 if (!(iattr->ia_valid & (ATTR_MODE | ATTR_MTIME | ATTR_CTIME))){
416 iattr->ia_valid |= ATTR_MODE;
417 iattr->ia_mode = inode->i_mode;
421 /* Don't allow setattr to change file type */
422 iattr->ia_mode = (inode->i_mode & S_IFMT)|(iattr->ia_mode & ~S_IFMT);
424 /* We set these flags on the client, but have already checked perms
425 * so don't confuse inode_change_ok. */
426 iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
428 if (inode->i_op->setattr) {
429 rc = inode->i_op->setattr(dentry, iattr);
431 rc = inode_change_ok(inode, iattr);
433 rc = inode_setattr(inode, iattr);
441 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
442 unsigned int cmd, unsigned long arg)
447 if (inode->i_fop->ioctl)
448 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
455 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
456 void *lmm, int lmm_size)
460 LASSERT(down_trylock(&inode->i_sem) != 0);
462 if (EXT3_I(inode)->i_file_acl /* || large inode EA flag */)
463 CWARN("setting EA on %lu/%u again... interesting\n",
464 inode->i_ino, inode->i_generation);
467 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
468 XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size, 0);
473 CERROR("error adding MD data to inode %lu: rc = %d\n",
478 /* Must be called with i_sem held */
479 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
483 LASSERT(down_trylock(&inode->i_sem) != 0);
486 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
487 XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size);
490 /* This gives us the MD size */
492 return (rc == -ENODATA) ? 0 : rc;
495 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
496 EXT3_XATTR_INDEX_TRUSTED, XATTR_LUSTRE_MDS_LOV_EA,
498 memset(lmm, 0, lmm_size);
499 return (rc == -ENODATA) ? 0 : rc;
505 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
506 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
512 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct kiobuf *bio)
514 int rc, blocks_per_page;
516 rc = brw_kiovec(rw, 1, &bio, inode->i_dev,
517 KIOBUF_GET_BLOCKS(bio), 1 << inode->i_blkbits);
519 blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
521 if (rc != (1 << inode->i_blkbits) * bio->nr_pages * blocks_per_page) {
522 CERROR("short write? expected %d, wrote %d\n",
523 (1 << inode->i_blkbits) * bio->nr_pages *
524 blocks_per_page, rc);
531 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
534 struct inode *inode = file->f_dentry->d_inode;
537 if (S_ISREG(inode->i_mode))
538 rc = file->f_op->read(file, buf, count, off);
540 const int blkbits = inode->i_sb->s_blocksize_bits;
541 const int blksize = inode->i_sb->s_blocksize;
543 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
544 count, inode->i_ino, *off);
546 struct buffer_head *bh;
549 if (*off < inode->i_size) {
552 bh = ext3_bread(NULL, inode, *off >> blkbits,
555 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
558 memcpy(buf, bh->b_data, blksize);
561 /* XXX in theory we should just fake
562 * this buffer and continue like ext3,
563 * especially if this is a partial read
565 CERROR("error read dir %lu+%llu: %d\n",
566 inode->i_ino, *off, err);
571 struct ext3_dir_entry_2 *fake = (void *)buf;
573 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
574 memset(fake, 0, sizeof(*fake));
575 fake->rec_len = cpu_to_le32(blksize);
587 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
589 struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
591 fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
593 OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
596 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
597 void *handle, fsfilt_cb_t cb_func,
600 struct fsfilt_cb_data *fcb;
602 OBD_SLAB_ALLOC(fcb, fcb_cache, GFP_NOFS, sizeof *fcb);
606 fcb->cb_func = cb_func;
608 fcb->cb_last_rcvd = last_rcvd;
609 fcb->cb_data = cb_data;
611 CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
613 journal_callback_set(handle, fsfilt_ext3_cb_func,
614 (struct journal_callback *)fcb);
621 * We need to hack the return value for the free inode counts because
622 * the current EA code requires one filesystem block per inode with EAs,
623 * so it is possible to run out of blocks before we run out of inodes.
625 * This can be removed when the ext3 EA code is fixed.
627 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
632 memset(&sfs, 0, sizeof(sfs));
634 rc = sb->s_op->statfs(sb, &sfs);
636 if (!rc && sfs.f_bfree < sfs.f_ffree) {
637 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
638 sfs.f_ffree = sfs.f_bfree;
641 statfs_pack(osfs, &sfs);
645 static int fsfilt_ext3_sync(struct super_block *sb)
647 return ext3_force_commit(sb);
650 #if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))
651 #warning "kernel code has old extents/mballoc patch, disabling"
652 #undef EXT3_MULTIBLOCK_ALLOCATOR
654 #ifndef EXT3_EXTENTS_FL
655 #define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
658 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
659 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
660 #define ext3_up_truncate_sem(inode) up_write(&EXT3_I(inode)->truncate_sem);
661 #define ext3_down_truncate_sem(inode) down_write(&EXT3_I(inode)->truncate_sem);
663 #define ext3_up_truncate_sem(inode) up(&EXT3_I(inode)->truncate_sem);
664 #define ext3_down_truncate_sem(inode) down(&EXT3_I(inode)->truncate_sem);
667 #include <linux/lustre_version.h>
668 #if EXT3_EXT_MAGIC == 0xf301
669 #define ee_start e_start
670 #define ee_block e_block
673 #ifndef EXT3_BB_MAX_BLOCKS
674 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
675 ext3_new_blocks(handle, inode, count, goal, err)
679 unsigned long *blocks;
687 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
688 unsigned long block, int *aflags)
690 struct ext3_inode_info *ei = EXT3_I(inode);
691 unsigned long bg_start;
692 unsigned long colour;
696 struct ext3_extent *ex;
697 depth = path->p_depth;
699 /* try to predict block placement */
700 if ((ex = path[depth].p_ext)) {
702 /* This prefers to eat into a contiguous extent
703 * rather than find an extent that the whole
704 * request will fit into. This can fragment data
705 * block allocation and prevents our lovely 1M I/Os
706 * from reaching the disk intact. */
707 if (ex->ee_block + ex->ee_len == block)
710 return ex->ee_start + (block - ex->ee_block);
713 /* it looks index is empty
714 * try to find starting from index itself */
715 if (path[depth].p_bh)
716 return path[depth].p_bh->b_blocknr;
719 /* OK. use inode's group */
720 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
721 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
722 colour = (current->pid % 16) *
723 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
724 return bg_start + colour + block;
727 static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
728 struct ext3_ext_path *path,
729 struct ext3_ext_cache *cex)
731 struct inode *inode = tree->inode;
732 struct bpointers *bp = tree->private;
733 struct ext3_extent nex;
734 int count, err, goal;
735 unsigned long pblock;
742 EXT_ASSERT(i == path->p_depth);
743 EXT_ASSERT(path[i].p_hdr);
745 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
750 if (bp->create == 0) {
752 if (cex->ec_block < bp->start)
753 i = bp->start - cex->ec_block;
754 if (i >= cex->ec_len)
755 CERROR("nothing to do?! i = %d, e_num = %u\n",
757 for (; i < cex->ec_len && bp->num; i++) {
769 tgen = EXT_GENERATION(tree);
770 count = ext3_ext_calc_credits_for_insert(tree, path);
771 ext3_up_truncate_sem(inode);
774 handle = journal_start(EXT3_JOURNAL(inode), count+EXT3_ALLOC_NEEDED+1);
776 if (IS_ERR(handle)) {
777 ext3_down_truncate_sem(inode);
778 return PTR_ERR(handle);
781 ext3_down_truncate_sem(inode);
782 if (tgen != EXT_GENERATION(tree)) {
783 /* the tree has changed. so path can be invalid at moment */
785 journal_stop(handle);
791 goal = ext3_ext_find_goal(inode, path, cex->ec_block, &aflags);
792 aflags |= 2; /* block have been already reserved */
793 pblock = ext3_mb_new_blocks(handle, inode, goal, &count, aflags, &err);
796 EXT_ASSERT(count <= cex->ec_len);
798 /* insert new extent */
799 nex.ee_block = cex->ec_block;
800 nex.ee_start = pblock;
802 err = ext3_ext_insert_extent(handle, tree, path, &nex);
807 * Putting len of the actual extent we just inserted,
808 * we are asking ext3_ext_walk_space() to continue
809 * scaning after that block
811 cex->ec_len = nex.ee_len;
812 cex->ec_start = nex.ee_start;
813 BUG_ON(nex.ee_len == 0);
814 BUG_ON(nex.ee_block != cex->ec_block);
816 /* correct on-disk inode size */
817 if (nex.ee_len > 0) {
818 new_i_size = (loff_t) nex.ee_block + nex.ee_len;
819 new_i_size = new_i_size << inode->i_blkbits;
820 if (new_i_size > EXT3_I(inode)->i_disksize) {
821 EXT3_I(inode)->i_disksize = new_i_size;
822 err = ext3_mark_inode_dirty(handle, inode);
828 journal_stop(handle);
834 CERROR("hmm. why do we find this extent?\n");
835 CERROR("initial space: %lu:%u\n",
836 bp->start, bp->init_num);
837 CERROR("current extent: %u/%u/%u %d\n",
838 cex->ec_block, cex->ec_len,
839 cex->ec_start, cex->ec_type);
842 if (cex->ec_block < bp->start)
843 i = bp->start - cex->ec_block;
844 if (i >= cex->ec_len)
845 CERROR("nothing to do?! i = %d, e_num = %u\n",
847 for (; i < cex->ec_len && bp->num; i++) {
848 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
853 *(bp->blocks) = cex->ec_start + i;
862 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
863 unsigned long num, unsigned long *blocks,
864 int *created, int create)
866 struct ext3_extents_tree tree;
870 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
871 block, block + num, (unsigned) inode->i_ino);
873 ext3_init_tree_desc(&tree, inode);
876 bp.created = created;
878 bp.init_num = bp.num = num;
881 ext3_down_truncate_sem(inode);
882 err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
883 ext3_ext_invalidate_cache(&tree);
884 ext3_up_truncate_sem(inode);
889 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
890 int pages, unsigned long *blocks,
891 int *created, int create)
893 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
895 struct page *fp = NULL;
898 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
899 inode->i_ino, pages, (*page)->index);
901 /* pages are sorted already. so, we just have to find
902 * contig. space and process them properly */
905 /* start new extent */
910 } else if (fp->index + clen == (*page)->index) {
911 /* continue the extent */
918 /* process found extent */
919 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
920 clen * blocks_per_page, blocks,
925 /* look for next extent */
927 blocks += blocks_per_page * clen;
928 created += blocks_per_page * clen;
932 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
933 clen * blocks_per_page, blocks,
940 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
941 unsigned long *blocks, int *created, int create);
942 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
943 int pages, unsigned long *blocks,
944 int *created, int create)
946 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
950 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
951 rc = ext3_map_inode_page(inode, *page, b, cr, create);
953 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
954 inode->i_ino, *b, *cr, create, rc);
958 b += blocks_per_page;
959 cr += blocks_per_page;
964 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
965 int pages, unsigned long *blocks,
966 int *created, int create,
967 struct semaphore *optional_sem)
970 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
971 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
972 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
973 blocks, created, create);
977 if (optional_sem != NULL)
979 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
981 if (optional_sem != NULL)
987 extern int ext3_prep_san_write(struct inode *inode, long *blocks,
988 int nblocks, loff_t newsize);
989 static int fsfilt_ext3_prep_san_write(struct inode *inode, long *blocks,
990 int nblocks, loff_t newsize)
992 return ext3_prep_san_write(inode, blocks, nblocks, newsize);
995 static int fsfilt_ext3_read_record(struct file * file, void *buf,
996 int size, loff_t *offs)
998 struct inode *inode = file->f_dentry->d_inode;
1000 struct buffer_head *bh;
1001 int err, blocksize, csize, boffs;
1003 /* prevent reading after eof */
1005 if (inode->i_size < *offs + size) {
1006 size = inode->i_size - *offs;
1009 CERROR("size %llu is too short for read %u@%llu\n",
1010 inode->i_size, size, *offs);
1012 } else if (size == 0) {
1019 blocksize = 1 << inode->i_blkbits;
1022 block = *offs >> inode->i_blkbits;
1023 boffs = *offs & (blocksize - 1);
1024 csize = min(blocksize - boffs, size);
1025 bh = ext3_bread(NULL, inode, block, 0, &err);
1027 CERROR("can't read block: %d\n", err);
1031 memcpy(buf, bh->b_data + boffs, csize);
1041 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1042 loff_t *offs, int force_sync)
1044 struct buffer_head *bh = NULL;
1045 unsigned long block;
1046 struct inode *inode = file->f_dentry->d_inode;
1047 loff_t old_size = inode->i_size, offset = *offs;
1048 loff_t new_size = inode->i_size;
1051 int err, block_count = 0, blocksize, size, boffs;
1053 /* Determine how many transaction credits are needed */
1054 blocksize = 1 << inode->i_blkbits;
1055 block_count = (*offs & (blocksize - 1)) + bufsize;
1056 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1058 journal = EXT3_SB(inode->i_sb)->s_journal;
1060 handle = journal_start(journal,
1061 block_count * EXT3_DATA_TRANS_BLOCKS + 2);
1063 if (IS_ERR(handle)) {
1064 CERROR("can't start transaction\n");
1065 return PTR_ERR(handle);
1068 while (bufsize > 0) {
1072 block = offset >> inode->i_blkbits;
1073 boffs = offset & (blocksize - 1);
1074 size = min(blocksize - boffs, bufsize);
1075 bh = ext3_bread(handle, inode, block, 1, &err);
1077 CERROR("can't read/create block: %d\n", err);
1081 err = ext3_journal_get_write_access(handle, bh);
1083 CERROR("journal_get_write_access() returned error %d\n",
1087 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1088 memcpy(bh->b_data + boffs, buf, size);
1089 err = ext3_journal_dirty_metadata(handle, bh);
1091 CERROR("journal_dirty_metadata() returned error %d\n",
1095 if (offset + size > new_size)
1096 new_size = offset + size;
1103 handle->h_sync = 1; /* recovery likes this */
1108 /* correct in-core and on-disk sizes */
1109 if (new_size > inode->i_size) {
1111 if (new_size > inode->i_size)
1112 inode->i_size = new_size;
1113 if (inode->i_size > EXT3_I(inode)->i_disksize)
1114 EXT3_I(inode)->i_disksize = inode->i_size;
1115 if (inode->i_size > old_size)
1116 mark_inode_dirty(inode);
1121 journal_stop(handle);
1129 static int fsfilt_ext3_setup(struct super_block *sb)
1132 EXT3_SB(sb)->dx_lock = fsfilt_ext3_dx_lock;
1133 EXT3_SB(sb)->dx_unlock = fsfilt_ext3_dx_unlock;
1136 CWARN("Enabling PDIROPS\n");
1137 set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
1138 sb->s_flags |= S_PDIROPS;
1143 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1144 objects. Logs is number of logfiles to update */
1145 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1149 case FSFILT_OP_CREATE:
1150 /* directory leaf, index & indirect & EA*/
1151 return 4 + 3 * logs;
1152 case FSFILT_OP_UNLINK:
1158 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1159 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
1160 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1161 for (i = 0; i < op; i++, fso++) {
1162 int nblocks = fso->fso_bufcnt * blockpp;
1163 int ndindirect = min(nblocks, addrpp + 1);
1164 int nindir = nblocks + ndindirect + 1;
1168 return needed + 3 * logs;
1174 static struct fsfilt_operations fsfilt_ext3_ops = {
1176 .fs_owner = THIS_MODULE,
1177 .fs_start = fsfilt_ext3_start,
1178 .fs_brw_start = fsfilt_ext3_brw_start,
1179 .fs_commit = fsfilt_ext3_commit,
1180 .fs_commit_async = fsfilt_ext3_commit_async,
1181 .fs_commit_wait = fsfilt_ext3_commit_wait,
1182 .fs_setattr = fsfilt_ext3_setattr,
1183 .fs_iocontrol = fsfilt_ext3_iocontrol,
1184 .fs_set_md = fsfilt_ext3_set_md,
1185 .fs_get_md = fsfilt_ext3_get_md,
1186 .fs_readpage = fsfilt_ext3_readpage,
1187 .fs_add_journal_cb = fsfilt_ext3_add_journal_cb,
1188 .fs_statfs = fsfilt_ext3_statfs,
1189 .fs_sync = fsfilt_ext3_sync,
1190 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
1191 .fs_prep_san_write = fsfilt_ext3_prep_san_write,
1192 .fs_write_record = fsfilt_ext3_write_record,
1193 .fs_read_record = fsfilt_ext3_read_record,
1194 .fs_setup = fsfilt_ext3_setup,
1195 .fs_send_bio = fsfilt_ext3_send_bio,
1196 .fs_get_op_len = fsfilt_ext3_get_op_len,
1199 static int __init fsfilt_ext3_init(void)
1203 fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
1204 sizeof(struct fsfilt_cb_data), 0,
1207 CERROR("error allocating fsfilt journal callback cache\n");
1208 GOTO(out, rc = -ENOMEM);
1211 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
1214 kmem_cache_destroy(fcb_cache);
1219 static void __exit fsfilt_ext3_exit(void)
1221 fsfilt_unregister_ops(&fsfilt_ext3_ops);
1222 LASSERT(kmem_cache_destroy(fcb_cache) == 0);
1225 module_init(fsfilt_ext3_init);
1226 module_exit(fsfilt_ext3_exit);
1228 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1229 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
1230 MODULE_LICENSE("GPL");