1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lib/fsfilt_ext3.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_FILTER
28 #include <linux/init.h>
29 #include <linux/module.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #include <linux/bitops.h>
39 #include <linux/quota.h>
40 #include <linux/quotaio_v1.h>
41 #include <linux/quotaio_v2.h>
42 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
43 #include <linux/ext3_xattr.h>
45 #include <ext3/xattr.h>
48 #include <libcfs/kp30.h>
49 #include <linux/lustre_fsfilt.h>
50 #include <linux/obd.h>
51 #include <linux/obd_class.h>
52 #include <linux/lustre_quota.h>
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
54 #include <linux/iobuf.h>
57 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
58 #include <linux/ext3_extents.h>
61 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
62 # define lock_24kernel() lock_kernel()
63 # define unlock_24kernel() unlock_kernel()
65 # define lock_24kernel() do {} while (0)
66 # define unlock_24kernel() do {} while (0)
69 static kmem_cache_t *fcb_cache;
71 struct fsfilt_cb_data {
72 struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
73 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
74 struct obd_device *cb_obd; /* MDS/OBD completion device */
75 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
76 void *cb_data; /* MDS/OST completion function data */
79 #ifndef EXT3_XATTR_INDEX_TRUSTED /* temporary until we hit l28 kernel */
80 #define EXT3_XATTR_INDEX_TRUSTED 4
82 #define XATTR_LUSTRE_MDS_LOV_EA "lov"
85 * We don't currently need any additional blocks for rmdir and
86 * unlink transactions because we are storing the OST oa_id inside
87 * the inode (which we will be changing anyways as part of this
90 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
93 /* For updates to the last received file */
94 int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
98 if (current->journal_info) {
99 CDEBUG(D_INODE, "increasing refcount on %p\n",
100 current->journal_info);
105 case FSFILT_OP_RMDIR:
106 case FSFILT_OP_UNLINK:
107 /* delete one file + create/update logs for each stripe */
108 nblocks += EXT3_DELETE_TRANS_BLOCKS;
109 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
110 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
112 case FSFILT_OP_RENAME:
113 /* modify additional directory */
114 nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
116 case FSFILT_OP_SYMLINK:
117 /* additional block + block bitmap + GDT for long symlink */
120 case FSFILT_OP_CREATE: {
121 #if defined(EXT3_EXTENTS_FL) && defined(EXT3_INDEX_FL)
124 if (!test_opt(inode->i_sb, EXTENTS)) {
126 } else if (((EXT3_I(inode)->i_flags &
127 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL)) ==
128 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL))) {
129 CWARN("extent-mapped directory found - contact "
130 "CFS: support@clusterfs.com\n");
137 case FSFILT_OP_MKDIR:
138 case FSFILT_OP_MKNOD:
139 /* modify one inode + block bitmap + GDT */
143 /* modify parent directory */
144 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
145 EXT3_DATA_TRANS_BLOCKS;
146 /* create/update logs for each stripe */
147 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
148 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
150 case FSFILT_OP_SETATTR:
151 /* Setattr on inode */
153 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
154 EXT3_DATA_TRANS_BLOCKS;
155 /* quota chown log for each stripe */
156 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
157 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
159 case FSFILT_OP_CANCEL_UNLINK:
160 /* blocks for log header bitmap update OR
161 * blocks for catalog header bitmap update + unlink of logs */
162 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
163 EXT3_DELETE_TRANS_BLOCKS * logs;
165 default: CERROR("unknown transaction start op %d\n", op);
169 LASSERT(current->journal_info == desc_private);
170 journal = EXT3_SB(inode->i_sb)->s_journal;
171 if (nblocks > journal->j_max_transaction_buffers) {
172 CERROR("too many credits %d for op %ux%u using %d instead\n",
173 nblocks, op, logs, journal->j_max_transaction_buffers);
174 nblocks = journal->j_max_transaction_buffers;
178 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
180 handle = journal_start(EXT3_JOURNAL(inode), nblocks);
184 LASSERT(current->journal_info == handle);
186 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
187 op, nblocks, PTR_ERR(handle));
192 * Calculate the number of buffer credits needed to write multiple pages in
193 * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
194 * doesn't have a nice API for calculating this sort of thing in advance.
196 * See comment above ext3_writepage_trans_blocks for details. We assume
197 * no data journaling is being done, but it does allow for all of the pages
198 * being non-contiguous. If we are guaranteed contiguous pages we could
199 * reduce the number of (d)indirect blocks a lot.
201 * With N blocks per page and P pages, for each inode we have at most:
203 * min(N*P, blocksize/4 + 1) dindirect blocks
206 * For the entire filesystem, we have at most:
207 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
208 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
209 * objcount inode blocks
211 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
213 * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
215 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
216 int niocount, struct niobuf_local *nb)
218 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
220 const int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
221 int nbitmaps = 0, ngdblocks;
222 int needed = objcount + 1; /* inodes + superblock */
225 for (i = 0, j = 0; i < objcount; i++, fso++) {
226 /* two or more dindirect blocks in case we cross boundary */
227 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
229 sb->s_blocksize_bits) /
230 (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
231 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
233 /* leaf, indirect, tindirect blocks for first block */
234 nbitmaps += blockpp + 2;
236 j += fso->fso_bufcnt;
239 next_indir = nb[0].offset +
240 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
241 for (i = 1; i < niocount; i++) {
242 if (nb[i].offset >= next_indir) {
243 nbitmaps++; /* additional indirect */
244 next_indir = nb[i].offset +
245 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
246 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
247 nbitmaps++; /* additional indirect */
249 nbitmaps += blockpp; /* each leaf in different group? */
252 ngdblocks = nbitmaps;
253 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
254 nbitmaps = EXT3_SB(sb)->s_groups_count;
255 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
256 ngdblocks = EXT3_SB(sb)->s_gdb_count;
258 needed += nbitmaps + ngdblocks;
260 /* last_rcvd update */
261 needed += EXT3_DATA_TRANS_BLOCKS;
263 #if defined(CONFIG_QUOTA)
264 /* We assume that there will be 1 bit set in s_dquot.flags for each
265 * quota file that is active. This is at least true for now.
267 needed += hweight32(sb_any_quota_enabled(sb)) *
268 EXT3_SINGLEDATA_TRANS_BLOCKS;
274 /* We have to start a huge journal transaction here to hold all of the
275 * metadata for the pages being written here. This is necessitated by
276 * the fact that we do lots of prepare_write operations before we do
277 * any of the matching commit_write operations, so even if we split
278 * up to use "smaller" transactions none of them could complete until
279 * all of them were opened. By having a single journal transaction,
280 * we eliminate duplicate reservations for common blocks like the
281 * superblock and group descriptors or bitmaps.
283 * We will start the transaction here, but each prepare_write will
284 * add a refcount to the transaction, and each commit_write will
285 * remove a refcount. The transaction will be closed when all of
286 * the pages have been written.
288 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
289 int niocount, struct niobuf_local *nb,
290 void *desc_private, int logs)
297 LASSERT(current->journal_info == desc_private);
298 journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
299 needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
301 /* The number of blocks we could _possibly_ dirty can very large.
302 * We reduce our request if it is absurd (and we couldn't get that
303 * many credits for a single handle anyways).
305 * At some point we have to limit the size of I/Os sent at one time,
306 * increase the size of the journal, or we have to calculate the
307 * actual journal requirements more carefully by checking all of
308 * the blocks instead of being maximally pessimistic. It remains to
309 * be seen if this is a real problem or not.
311 if (needed > journal->j_max_transaction_buffers) {
312 CERROR("want too many journal credits (%d) using %d instead\n",
313 needed, journal->j_max_transaction_buffers);
314 needed = journal->j_max_transaction_buffers;
317 LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
319 handle = journal_start(journal, needed);
321 if (IS_ERR(handle)) {
322 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
325 LASSERT(handle->h_buffer_credits >= needed);
326 LASSERT(current->journal_info == handle);
332 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
335 handle_t *handle = h;
337 LASSERT(current->journal_info == handle);
339 handle->h_sync = 1; /* recovery likes this */
342 rc = journal_stop(handle);
348 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
352 transaction_t *transaction;
353 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
356 handle_t *handle = h;
360 LASSERT(current->journal_info == handle);
363 transaction = handle->h_transaction;
364 journal = transaction->t_journal;
365 tid = transaction->t_tid;
366 /* we don't want to be blocked */
368 rc = journal_stop(handle);
370 CERROR("error while stopping transaction: %d\n", rc);
374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
375 rtid = log_start_commit(journal, transaction);
377 CERROR("strange race: %lu != %lu\n",
378 (unsigned long) tid, (unsigned long) rtid);
380 log_start_commit(journal, transaction->t_tid);
384 *wait_handle = (void *) tid;
385 CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
389 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
391 journal_t *journal = EXT3_JOURNAL(inode);
392 tid_t tid = (tid_t)(long)h;
394 CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
395 if (unlikely(is_journal_aborted(journal)))
398 log_wait_commit(EXT3_JOURNAL(inode), tid);
400 if (unlikely(is_journal_aborted(journal)))
405 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
406 struct iattr *iattr, int do_trunc)
408 struct inode *inode = dentry->d_inode;
413 /* A _really_ horrible hack to avoid removing the data stored
414 * in the block pointers; this is really the "small" stripe MD data.
415 * We can avoid further hackery by virtue of the MDS file size being
416 * zero all the time (which doesn't invoke block truncate at unlink
417 * time), so we assert we never change the MDS file size from zero. */
418 if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
419 /* ATTR_SIZE would invoke truncate: clear it */
420 iattr->ia_valid &= ~ATTR_SIZE;
421 EXT3_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
423 /* make sure _something_ gets set - so new inode
424 * goes to disk (probably won't work over XFS */
425 if (!(iattr->ia_valid & (ATTR_MODE | ATTR_MTIME | ATTR_CTIME))){
426 iattr->ia_valid |= ATTR_MODE;
427 iattr->ia_mode = inode->i_mode;
431 /* Don't allow setattr to change file type */
432 iattr->ia_mode = (inode->i_mode & S_IFMT)|(iattr->ia_mode & ~S_IFMT);
434 /* We set these flags on the client, but have already checked perms
435 * so don't confuse inode_change_ok. */
436 iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
438 if (inode->i_op->setattr) {
439 rc = inode->i_op->setattr(dentry, iattr);
441 rc = inode_change_ok(inode, iattr);
443 rc = inode_setattr(inode, iattr);
451 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
452 unsigned int cmd, unsigned long arg)
457 if (inode->i_fop->ioctl)
458 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
465 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
466 void *lmm, int lmm_size)
470 LASSERT(down_trylock(&inode->i_sem) != 0);
472 if (EXT3_I(inode)->i_file_acl /* || large inode EA flag */)
473 CWARN("setting EA on %lu/%u again... interesting\n",
474 inode->i_ino, inode->i_generation);
477 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
478 XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size, 0);
483 CERROR("error adding MD data to inode %lu: rc = %d\n",
488 /* Must be called with i_sem held */
489 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
493 LASSERT(down_trylock(&inode->i_sem) != 0);
496 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
497 XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size);
500 /* This gives us the MD size */
502 return (rc == -ENODATA) ? 0 : rc;
505 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
506 EXT3_XATTR_INDEX_TRUSTED, XATTR_LUSTRE_MDS_LOV_EA,
508 memset(lmm, 0, lmm_size);
509 return (rc == -ENODATA) ? 0 : rc;
515 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
516 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
522 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct kiobuf *bio)
524 int rc, blk_per_page;
526 rc = brw_kiovec(rw, 1, &bio, inode->i_dev,
527 KIOBUF_GET_BLOCKS(bio), 1 << inode->i_blkbits);
529 * brw_kiovec() returns number of bytes actually written. If error
530 * occurred after something was written, error code is returned though
531 * kiobuf->errno. (See bug 6854.)
534 blk_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
536 if (rc != (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page) {
537 CERROR("short write? expected %d, wrote %d (%d)\n",
538 (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
541 if (bio->errno != 0) {
542 CERROR("IO error. Wrote %d of %d (%d)\n",
544 (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
553 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
556 struct inode *inode = file->f_dentry->d_inode;
559 if (S_ISREG(inode->i_mode))
560 rc = file->f_op->read(file, buf, count, off);
562 const int blkbits = inode->i_sb->s_blocksize_bits;
563 const int blksize = inode->i_sb->s_blocksize;
565 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
566 count, inode->i_ino, *off);
568 struct buffer_head *bh;
571 if (*off < inode->i_size) {
574 bh = ext3_bread(NULL, inode, *off >> blkbits,
577 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
580 memcpy(buf, bh->b_data, blksize);
583 /* XXX in theory we should just fake
584 * this buffer and continue like ext3,
585 * especially if this is a partial read
587 CERROR("error read dir %lu+%llu: %d\n",
588 inode->i_ino, *off, err);
593 struct ext3_dir_entry_2 *fake = (void *)buf;
595 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
596 memset(fake, 0, sizeof(*fake));
597 fake->rec_len = cpu_to_le32(blksize);
609 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
611 struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
613 fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
615 OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
618 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
619 void *handle, fsfilt_cb_t cb_func,
622 struct fsfilt_cb_data *fcb;
624 OBD_SLAB_ALLOC(fcb, fcb_cache, GFP_NOFS, sizeof *fcb);
628 fcb->cb_func = cb_func;
630 fcb->cb_last_rcvd = last_rcvd;
631 fcb->cb_data = cb_data;
633 CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
635 journal_callback_set(handle, fsfilt_ext3_cb_func,
636 (struct journal_callback *)fcb);
643 * We need to hack the return value for the free inode counts because
644 * the current EA code requires one filesystem block per inode with EAs,
645 * so it is possible to run out of blocks before we run out of inodes.
647 * This can be removed when the ext3 EA code is fixed.
649 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
654 memset(&sfs, 0, sizeof(sfs));
656 rc = sb->s_op->statfs(sb, &sfs);
658 if (!rc && sfs.f_bfree < sfs.f_ffree) {
659 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
660 sfs.f_ffree = sfs.f_bfree;
663 statfs_pack(osfs, &sfs);
667 static int fsfilt_ext3_sync(struct super_block *sb)
669 return ext3_force_commit(sb);
672 #if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))
673 #warning "kernel code has old extents/mballoc patch, disabling"
674 #undef EXT3_MULTIBLOCK_ALLOCATOR
676 #ifndef EXT3_EXTENTS_FL
677 #define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
680 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
681 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
682 #define ext3_up_truncate_sem(inode) up_write(&EXT3_I(inode)->truncate_sem);
683 #define ext3_down_truncate_sem(inode) down_write(&EXT3_I(inode)->truncate_sem);
685 #define ext3_up_truncate_sem(inode) up(&EXT3_I(inode)->truncate_sem);
686 #define ext3_down_truncate_sem(inode) down(&EXT3_I(inode)->truncate_sem);
689 #include <linux/lustre_version.h>
690 #if EXT3_EXT_MAGIC == 0xf301
691 #define ee_start e_start
692 #define ee_block e_block
695 #ifndef EXT3_BB_MAX_BLOCKS
696 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
697 ext3_new_blocks(handle, inode, count, goal, err)
701 unsigned long *blocks;
709 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
710 unsigned long block, int *aflags)
712 struct ext3_inode_info *ei = EXT3_I(inode);
713 unsigned long bg_start;
714 unsigned long colour;
718 struct ext3_extent *ex;
719 depth = path->p_depth;
721 /* try to predict block placement */
722 if ((ex = path[depth].p_ext)) {
724 /* This prefers to eat into a contiguous extent
725 * rather than find an extent that the whole
726 * request will fit into. This can fragment data
727 * block allocation and prevents our lovely 1M I/Os
728 * from reaching the disk intact. */
729 if (ex->ee_block + ex->ee_len == block)
732 return ex->ee_start + (block - ex->ee_block);
735 /* it looks index is empty
736 * try to find starting from index itself */
737 if (path[depth].p_bh)
738 return path[depth].p_bh->b_blocknr;
741 /* OK. use inode's group */
742 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
743 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
744 colour = (current->pid % 16) *
745 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
746 return bg_start + colour + block;
749 static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
750 struct ext3_ext_path *path,
751 struct ext3_ext_cache *cex)
753 struct inode *inode = tree->inode;
754 struct bpointers *bp = tree->private;
755 struct ext3_extent nex;
756 int count, err, goal;
757 unsigned long pblock;
764 EXT_ASSERT(i == path->p_depth);
765 EXT_ASSERT(path[i].p_hdr);
767 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
772 if (bp->create == 0) {
774 if (cex->ec_block < bp->start)
775 i = bp->start - cex->ec_block;
776 if (i >= cex->ec_len)
777 CERROR("nothing to do?! i = %d, e_num = %u\n",
779 for (; i < cex->ec_len && bp->num; i++) {
791 tgen = EXT_GENERATION(tree);
792 count = ext3_ext_calc_credits_for_insert(tree, path);
793 ext3_up_truncate_sem(inode);
796 handle = journal_start(EXT3_JOURNAL(inode), count+EXT3_ALLOC_NEEDED+1);
798 if (IS_ERR(handle)) {
799 ext3_down_truncate_sem(inode);
800 return PTR_ERR(handle);
803 ext3_down_truncate_sem(inode);
804 if (tgen != EXT_GENERATION(tree)) {
805 /* the tree has changed. so path can be invalid at moment */
807 journal_stop(handle);
813 goal = ext3_ext_find_goal(inode, path, cex->ec_block, &aflags);
814 aflags |= 2; /* block have been already reserved */
815 pblock = ext3_mb_new_blocks(handle, inode, goal, &count, aflags, &err);
818 EXT_ASSERT(count <= cex->ec_len);
820 /* insert new extent */
821 nex.ee_block = cex->ec_block;
822 nex.ee_start = pblock;
824 err = ext3_ext_insert_extent(handle, tree, path, &nex);
829 * Putting len of the actual extent we just inserted,
830 * we are asking ext3_ext_walk_space() to continue
831 * scaning after that block
833 cex->ec_len = nex.ee_len;
834 cex->ec_start = nex.ee_start;
835 BUG_ON(nex.ee_len == 0);
836 BUG_ON(nex.ee_block != cex->ec_block);
838 /* correct on-disk inode size */
839 if (nex.ee_len > 0) {
840 new_i_size = (loff_t) nex.ee_block + nex.ee_len;
841 new_i_size = new_i_size << inode->i_blkbits;
842 if (new_i_size > EXT3_I(inode)->i_disksize) {
843 EXT3_I(inode)->i_disksize = new_i_size;
844 err = ext3_mark_inode_dirty(handle, inode);
850 journal_stop(handle);
856 CERROR("hmm. why do we find this extent?\n");
857 CERROR("initial space: %lu:%u\n",
858 bp->start, bp->init_num);
859 CERROR("current extent: %u/%u/%u %d\n",
860 cex->ec_block, cex->ec_len,
861 cex->ec_start, cex->ec_type);
864 if (cex->ec_block < bp->start)
865 i = bp->start - cex->ec_block;
866 if (i >= cex->ec_len)
867 CERROR("nothing to do?! i = %d, e_num = %u\n",
869 for (; i < cex->ec_len && bp->num; i++) {
870 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
875 *(bp->blocks) = cex->ec_start + i;
884 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
885 unsigned long num, unsigned long *blocks,
886 int *created, int create)
888 struct ext3_extents_tree tree;
892 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
893 block, block + num, (unsigned) inode->i_ino);
895 ext3_init_tree_desc(&tree, inode);
898 bp.created = created;
900 bp.init_num = bp.num = num;
903 ext3_down_truncate_sem(inode);
904 err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
905 ext3_ext_invalidate_cache(&tree);
906 ext3_up_truncate_sem(inode);
911 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
912 int pages, unsigned long *blocks,
913 int *created, int create)
915 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
917 struct page *fp = NULL;
920 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
921 inode->i_ino, pages, (*page)->index);
923 /* pages are sorted already. so, we just have to find
924 * contig. space and process them properly */
927 /* start new extent */
932 } else if (fp->index + clen == (*page)->index) {
933 /* continue the extent */
940 /* process found extent */
941 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
942 clen * blocks_per_page, blocks,
947 /* look for next extent */
949 blocks += blocks_per_page * clen;
950 created += blocks_per_page * clen;
954 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
955 clen * blocks_per_page, blocks,
962 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
963 unsigned long *blocks, int *created, int create);
964 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
965 int pages, unsigned long *blocks,
966 int *created, int create)
968 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
972 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
973 rc = ext3_map_inode_page(inode, *page, b, cr, create);
975 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
976 inode->i_ino, *b, *cr, create, rc);
980 b += blocks_per_page;
981 cr += blocks_per_page;
986 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
987 int pages, unsigned long *blocks,
988 int *created, int create,
989 struct semaphore *optional_sem)
992 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
993 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
994 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
995 blocks, created, create);
999 if (optional_sem != NULL)
1001 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
1003 if (optional_sem != NULL)
1009 extern int ext3_prep_san_write(struct inode *inode, long *blocks,
1010 int nblocks, loff_t newsize);
1011 static int fsfilt_ext3_prep_san_write(struct inode *inode, long *blocks,
1012 int nblocks, loff_t newsize)
1014 return ext3_prep_san_write(inode, blocks, nblocks, newsize);
1017 static int fsfilt_ext3_read_record(struct file * file, void *buf,
1018 int size, loff_t *offs)
1020 struct inode *inode = file->f_dentry->d_inode;
1021 unsigned long block;
1022 struct buffer_head *bh;
1023 int err, blocksize, csize, boffs;
1025 /* prevent reading after eof */
1027 if (inode->i_size < *offs + size) {
1028 size = inode->i_size - *offs;
1031 CERROR("size %llu is too short for read %u@%llu\n",
1032 inode->i_size, size, *offs);
1034 } else if (size == 0) {
1041 blocksize = 1 << inode->i_blkbits;
1044 block = *offs >> inode->i_blkbits;
1045 boffs = *offs & (blocksize - 1);
1046 csize = min(blocksize - boffs, size);
1047 bh = ext3_bread(NULL, inode, block, 0, &err);
1049 CERROR("can't read block: %d\n", err);
1053 memcpy(buf, bh->b_data + boffs, csize);
1063 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1064 loff_t *offs, int force_sync)
1066 struct buffer_head *bh = NULL;
1067 unsigned long block;
1068 struct inode *inode = file->f_dentry->d_inode;
1069 loff_t old_size = inode->i_size, offset = *offs;
1070 loff_t new_size = inode->i_size;
1073 int err, block_count = 0, blocksize, size, boffs;
1075 /* Determine how many transaction credits are needed */
1076 blocksize = 1 << inode->i_blkbits;
1077 block_count = (*offs & (blocksize - 1)) + bufsize;
1078 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1080 journal = EXT3_SB(inode->i_sb)->s_journal;
1082 handle = journal_start(journal,
1083 block_count * EXT3_DATA_TRANS_BLOCKS + 2);
1085 if (IS_ERR(handle)) {
1086 CERROR("can't start transaction\n");
1087 return PTR_ERR(handle);
1090 while (bufsize > 0) {
1094 block = offset >> inode->i_blkbits;
1095 boffs = offset & (blocksize - 1);
1096 size = min(blocksize - boffs, bufsize);
1097 bh = ext3_bread(handle, inode, block, 1, &err);
1099 CERROR("can't read/create block: %d\n", err);
1103 err = ext3_journal_get_write_access(handle, bh);
1105 CERROR("journal_get_write_access() returned error %d\n",
1109 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1110 memcpy(bh->b_data + boffs, buf, size);
1111 err = ext3_journal_dirty_metadata(handle, bh);
1113 CERROR("journal_dirty_metadata() returned error %d\n",
1117 if (offset + size > new_size)
1118 new_size = offset + size;
1125 handle->h_sync = 1; /* recovery likes this */
1130 /* correct in-core and on-disk sizes */
1131 if (new_size > inode->i_size) {
1133 if (new_size > inode->i_size)
1134 inode->i_size = new_size;
1135 if (inode->i_size > EXT3_I(inode)->i_disksize)
1136 EXT3_I(inode)->i_disksize = inode->i_size;
1137 if (inode->i_size > old_size)
1138 mark_inode_dirty(inode);
1143 journal_stop(handle);
1151 static int fsfilt_ext3_setup(struct super_block *sb)
1154 EXT3_SB(sb)->dx_lock = fsfilt_ext3_dx_lock;
1155 EXT3_SB(sb)->dx_unlock = fsfilt_ext3_dx_unlock;
1158 CWARN("Enabling PDIROPS\n");
1159 set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
1160 sb->s_flags |= S_PDIROPS;
1165 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1166 objects. Logs is number of logfiles to update */
1167 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1171 case FSFILT_OP_CREATE:
1172 /* directory leaf, index & indirect & EA*/
1173 return 4 + 3 * logs;
1174 case FSFILT_OP_UNLINK:
1180 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1181 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
1182 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1183 for (i = 0; i < op; i++, fso++) {
1184 int nblocks = fso->fso_bufcnt * blockpp;
1185 int ndindirect = min(nblocks, addrpp + 1);
1186 int nindir = nblocks + ndindirect + 1;
1190 return needed + 3 * logs;
1196 static inline struct ext3_group_desc *
1197 get_group_desc(struct super_block *sb, int group)
1199 unsigned long desc_block, desc;
1200 struct ext3_group_desc *gdp;
1202 desc_block = group / EXT3_DESC_PER_BLOCK(sb);
1203 desc = group % EXT3_DESC_PER_BLOCK(sb);
1204 gdp = (struct ext3_group_desc *)
1205 EXT3_SB(sb)->s_group_desc[desc_block]->b_data;
1210 static inline struct buffer_head *
1211 read_inode_bitmap(struct super_block *sb, unsigned long group)
1213 struct ext3_group_desc *desc;
1214 struct buffer_head *bh;
1216 desc = get_group_desc(sb, group);
1217 bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
1222 static inline struct inode *ext3_iget_inuse(struct super_block *sb,
1223 struct buffer_head *bitmap_bh,
1224 int index, unsigned long ino)
1226 struct inode *inode = NULL;
1228 if (ext3_test_bit(index, bitmap_bh->b_data))
1229 inode = iget(sb, ino);
1234 #ifdef HAVE_QUOTA_SUPPORT
1235 # include "fsfilt_ext3_quota.h"
1238 static struct fsfilt_operations fsfilt_ext3_ops = {
1240 .fs_owner = THIS_MODULE,
1241 .fs_start = fsfilt_ext3_start,
1242 .fs_brw_start = fsfilt_ext3_brw_start,
1243 .fs_commit = fsfilt_ext3_commit,
1244 .fs_commit_async = fsfilt_ext3_commit_async,
1245 .fs_commit_wait = fsfilt_ext3_commit_wait,
1246 .fs_setattr = fsfilt_ext3_setattr,
1247 .fs_iocontrol = fsfilt_ext3_iocontrol,
1248 .fs_set_md = fsfilt_ext3_set_md,
1249 .fs_get_md = fsfilt_ext3_get_md,
1250 .fs_readpage = fsfilt_ext3_readpage,
1251 .fs_add_journal_cb = fsfilt_ext3_add_journal_cb,
1252 .fs_statfs = fsfilt_ext3_statfs,
1253 .fs_sync = fsfilt_ext3_sync,
1254 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
1255 .fs_prep_san_write = fsfilt_ext3_prep_san_write,
1256 .fs_write_record = fsfilt_ext3_write_record,
1257 .fs_read_record = fsfilt_ext3_read_record,
1258 .fs_setup = fsfilt_ext3_setup,
1259 .fs_send_bio = fsfilt_ext3_send_bio,
1260 .fs_get_op_len = fsfilt_ext3_get_op_len,
1261 #ifdef HAVE_QUOTA_SUPPORT
1262 .fs_quotactl = fsfilt_ext3_quotactl,
1263 .fs_quotacheck = fsfilt_ext3_quotacheck,
1264 .fs_quotainfo = fsfilt_ext3_quotainfo,
1265 .fs_dquot = fsfilt_ext3_dquot,
1269 static int __init fsfilt_ext3_init(void)
1273 fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
1274 sizeof(struct fsfilt_cb_data), 0,
1277 CERROR("error allocating fsfilt journal callback cache\n");
1278 GOTO(out, rc = -ENOMEM);
1281 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
1284 kmem_cache_destroy(fcb_cache);
1289 static void __exit fsfilt_ext3_exit(void)
1291 fsfilt_unregister_ops(&fsfilt_ext3_ops);
1292 LASSERT(kmem_cache_destroy(fcb_cache) == 0);
1295 module_init(fsfilt_ext3_init);
1296 module_exit(fsfilt_ext3_exit);
1298 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1299 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
1300 MODULE_LICENSE("GPL");