1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lib/fsfilt_ext3.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_FILTER
28 #include <linux/init.h>
29 #include <linux/module.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
39 #include <linux/ext3_xattr.h>
41 #include <ext3/xattr.h>
44 #include <linux/kp30.h>
45 #include <linux/lustre_fsfilt.h>
46 #include <linux/obd.h>
47 #include <linux/obd_class.h>
48 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
49 #include <linux/iobuf.h>
52 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
53 #include <linux/ext3_extents.h>
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
57 # define lock_24kernel() lock_kernel()
58 # define unlock_24kernel() unlock_kernel()
60 # define lock_24kernel() do {} while (0)
61 # define unlock_24kernel() do {} while (0)
64 static kmem_cache_t *fcb_cache;
65 static atomic_t fcb_cache_count = ATOMIC_INIT(0);
67 struct fsfilt_cb_data {
68 struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
69 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
70 struct obd_device *cb_obd; /* MDS/OBD completion device */
71 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
72 void *cb_data; /* MDS/OST completion function data */
75 #ifndef EXT3_XATTR_INDEX_TRUSTED /* temporary until we hit l28 kernel */
76 #define EXT3_XATTR_INDEX_TRUSTED 4
78 #define XATTR_LUSTRE_MDS_LOV_EA "lov"
80 #define EXT3_XATTR_INDEX_LUSTRE 5 /* old */
81 #define XATTR_LUSTRE_MDS_OBJID "system.lustre_mds_objid" /* old */
84 * We don't currently need any additional blocks for rmdir and
85 * unlink transactions because we are storing the OST oa_id inside
86 * the inode (which we will be changing anyways as part of this
89 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
92 /* For updates to the last recieved file */
93 int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
97 if (current->journal_info) {
98 CDEBUG(D_INODE, "increasing refcount on %p\n",
99 current->journal_info);
104 case FSFILT_OP_RMDIR:
105 case FSFILT_OP_UNLINK:
106 /* delete one file + create/update logs for each stripe */
107 nblocks += EXT3_DELETE_TRANS_BLOCKS;
108 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
109 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
111 case FSFILT_OP_RENAME:
112 /* modify additional directory */
113 nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
114 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
115 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
117 case FSFILT_OP_SYMLINK:
118 /* additional block + block bitmap + GDT for long symlink */
121 case FSFILT_OP_CREATE:
122 /* create/update logs for each stripe */
123 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
124 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
126 case FSFILT_OP_MKDIR:
127 case FSFILT_OP_MKNOD:
128 /* modify one inode + block bitmap + GDT */
132 /* modify parent directory */
133 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
134 EXT3_DATA_TRANS_BLOCKS;
136 case FSFILT_OP_SETATTR:
137 /* Setattr on inode */
140 case FSFILT_OP_CANCEL_UNLINK:
141 /* blocks for log header bitmap update OR
142 * blocks for catalog header bitmap update + unlink of logs */
143 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
144 EXT3_DELETE_TRANS_BLOCKS * logs;
146 default: CERROR("unknown transaction start op %d\n", op);
150 LASSERT(current->journal_info == desc_private);
151 journal = EXT3_SB(inode->i_sb)->s_journal;
152 if (nblocks > journal->j_max_transaction_buffers) {
153 CERROR("too many credits %d for op %ux%u using %d instead\n",
154 nblocks, op, logs, journal->j_max_transaction_buffers);
155 nblocks = journal->j_max_transaction_buffers;
159 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
161 handle = journal_start(EXT3_JOURNAL(inode), nblocks);
165 LASSERT(current->journal_info == handle);
167 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
168 op, nblocks, PTR_ERR(handle));
173 * Calculate the number of buffer credits needed to write multiple pages in
174 * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
175 * doesn't have a nice API for calculating this sort of thing in advance.
177 * See comment above ext3_writepage_trans_blocks for details. We assume
178 * no data journaling is being done, but it does allow for all of the pages
179 * being non-contiguous. If we are guaranteed contiguous pages we could
180 * reduce the number of (d)indirect blocks a lot.
182 * With N blocks per page and P pages, for each inode we have at most:
184 * min(N*P, blocksize/4 + 1) dindirect blocks
187 * For the entire filesystem, we have at most:
188 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
189 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
190 * objcount inode blocks
192 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
194 * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
196 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
197 int niocount, struct niobuf_local *nb)
199 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
201 const int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
202 int nbitmaps = 0, ngdblocks;
203 int needed = objcount + 1; /* inodes + superblock */
206 for (i = 0, j = 0; i < objcount; i++, fso++) {
207 /* two or more dindirect blocks in case we cross boundary */
208 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
210 sb->s_blocksize_bits) /
211 (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
212 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
214 /* leaf, indirect, tindirect blocks for first block */
215 nbitmaps += blockpp + 2;
217 j += fso->fso_bufcnt;
220 next_indir = nb[0].offset +
221 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
222 for (i = 1; i < niocount; i++) {
223 if (nb[i].offset >= next_indir) {
224 nbitmaps++; /* additional indirect */
225 next_indir = nb[i].offset +
226 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
227 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
228 nbitmaps++; /* additional indirect */
230 nbitmaps += blockpp; /* each leaf in different group? */
233 ngdblocks = nbitmaps;
234 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
235 nbitmaps = EXT3_SB(sb)->s_groups_count;
236 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
237 ngdblocks = EXT3_SB(sb)->s_gdb_count;
239 needed += nbitmaps + ngdblocks;
241 /* last_rcvd update */
242 needed += EXT3_DATA_TRANS_BLOCKS;
244 #if defined(CONFIG_QUOTA) && !defined(__x86_64__) /* XXX */
245 /* We assume that there will be 1 bit set in s_dquot.flags for each
246 * quota file that is active. This is at least true for now.
248 needed += hweight32(sb_any_quota_enabled(sb)) *
249 EXT3_SINGLEDATA_TRANS_BLOCKS;
255 /* We have to start a huge journal transaction here to hold all of the
256 * metadata for the pages being written here. This is necessitated by
257 * the fact that we do lots of prepare_write operations before we do
258 * any of the matching commit_write operations, so even if we split
259 * up to use "smaller" transactions none of them could complete until
260 * all of them were opened. By having a single journal transaction,
261 * we eliminate duplicate reservations for common blocks like the
262 * superblock and group descriptors or bitmaps.
264 * We will start the transaction here, but each prepare_write will
265 * add a refcount to the transaction, and each commit_write will
266 * remove a refcount. The transaction will be closed when all of
267 * the pages have been written.
269 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
270 int niocount, struct niobuf_local *nb,
271 void *desc_private, int logs)
278 LASSERT(current->journal_info == desc_private);
279 journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
280 needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
282 /* The number of blocks we could _possibly_ dirty can very large.
283 * We reduce our request if it is absurd (and we couldn't get that
284 * many credits for a single handle anyways).
286 * At some point we have to limit the size of I/Os sent at one time,
287 * increase the size of the journal, or we have to calculate the
288 * actual journal requirements more carefully by checking all of
289 * the blocks instead of being maximally pessimistic. It remains to
290 * be seen if this is a real problem or not.
292 if (needed > journal->j_max_transaction_buffers) {
293 CERROR("want too many journal credits (%d) using %d instead\n",
294 needed, journal->j_max_transaction_buffers);
295 needed = journal->j_max_transaction_buffers;
298 LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
300 handle = journal_start(journal, needed);
302 if (IS_ERR(handle)) {
303 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
306 LASSERT(handle->h_buffer_credits >= needed);
307 LASSERT(current->journal_info == handle);
313 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
316 handle_t *handle = h;
318 LASSERT(current->journal_info == handle);
320 handle->h_sync = 1; /* recovery likes this */
323 rc = journal_stop(handle);
329 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
333 transaction_t *transaction;
334 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
337 handle_t *handle = h;
341 LASSERT(current->journal_info == handle);
344 transaction = handle->h_transaction;
345 journal = transaction->t_journal;
346 tid = transaction->t_tid;
347 /* we don't want to be blocked */
349 rc = journal_stop(handle);
351 CERROR("error while stopping transaction: %d\n", rc);
355 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
356 rtid = log_start_commit(journal, transaction);
358 CERROR("strange race: %lu != %lu\n",
359 (unsigned long) tid, (unsigned long) rtid);
361 log_start_commit(journal, transaction->t_tid);
365 *wait_handle = (void *) tid;
366 CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
370 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
372 tid_t tid = (tid_t)(long)h;
374 CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
375 if (is_journal_aborted(EXT3_JOURNAL(inode)))
378 log_wait_commit(EXT3_JOURNAL(inode), tid);
383 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
384 struct iattr *iattr, int do_trunc)
386 struct inode *inode = dentry->d_inode;
391 /* A _really_ horrible hack to avoid removing the data stored
392 * in the block pointers; this is really the "small" stripe MD data.
393 * We can avoid further hackery by virtue of the MDS file size being
394 * zero all the time (which doesn't invoke block truncate at unlink
395 * time), so we assert we never change the MDS file size from zero. */
396 if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
397 /* ATTR_SIZE would invoke truncate: clear it */
398 iattr->ia_valid &= ~ATTR_SIZE;
399 EXT3_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
401 /* make sure _something_ gets set - so new inode
402 * goes to disk (probably won't work over XFS */
403 if (!(iattr->ia_valid & (ATTR_MODE | ATTR_MTIME | ATTR_CTIME))){
404 iattr->ia_valid |= ATTR_MODE;
405 iattr->ia_mode = inode->i_mode;
409 /* Don't allow setattr to change file type */
410 iattr->ia_mode = (inode->i_mode & S_IFMT)|(iattr->ia_mode & ~S_IFMT);
412 /* We set these flags on the client, but have already checked perms
413 * so don't confuse inode_change_ok. */
414 iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
416 if (inode->i_op->setattr) {
417 rc = inode->i_op->setattr(dentry, iattr);
419 rc = inode_change_ok(inode, iattr);
421 rc = inode_setattr(inode, iattr);
429 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
430 unsigned int cmd, unsigned long arg)
435 if (inode->i_fop->ioctl)
436 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
445 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
446 void *lmm, int lmm_size)
450 LASSERT(down_trylock(&inode->i_sem) != 0);
452 #ifdef INLINE_EA /* can go away before 1.0 - just for testing bug 2097 now */
453 /* Nasty hack city - store stripe MD data in the block pointers if
454 * it will fit, because putting it in an EA currently kills the MDS
455 * performance. We'll fix this with "fast EAs" in the future.
457 if (inode->i_blocks == 0 && lmm_size <= sizeof(EXT3_I(inode)->i_data) -
458 sizeof(EXT3_I(inode)->i_data[0])) {
459 unsigned old_size = EXT3_I(inode)->i_data[0];
461 LASSERT(old_size < sizeof(EXT3_I(inode)->i_data));
462 CERROR("setting EA on %lu/%u again... interesting\n",
463 inode->i_ino, inode->i_generation);
466 EXT3_I(inode)->i_data[0] = cpu_to_le32(lmm_size);
467 memcpy(&EXT3_I(inode)->i_data[1], lmm, lmm_size);
468 mark_inode_dirty(inode);
473 /* keep this when we get rid of OLD_EA (too noisy during conversion) */
474 if (EXT3_I(inode)->i_file_acl /* || large inode EA flag */) {
475 CWARN("setting EA on %lu/%u again... interesting\n",
476 inode->i_ino, inode->i_generation);
481 /* this can go away before 1.0. For bug 2097 testing only. */
482 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_LUSTRE,
483 XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size, 0);
486 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
487 XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size, 0);
489 /* This tries to delete the old-format LOV EA, but only as long as we
490 * have successfully saved the new-format LOV EA (we can always try
491 * the conversion again the next time the file is accessed). It is
492 * possible (although unlikely) that the new-format LOV EA couldn't be
493 * saved because it ran out of space but we would need a file striped
494 * over least 123 OSTs before the two EAs filled a 4kB block.
496 * This can be removed when all filesystems have converted to the
497 * new EA format, but otherwise adds little if any overhead. If we
498 * wanted backward compatibility for existing files, we could keep
499 * the old EA around for a while but we'd have to clean it up later. */
500 if (rc >= 0 && old_ea) {
501 int err = ext3_xattr_set_handle(handle, inode,
502 EXT3_XATTR_INDEX_LUSTRE,
503 XATTR_LUSTRE_MDS_OBJID,
506 CERROR("error deleting old LOV EA on %lu/%u: rc %d\n",
507 inode->i_ino, inode->i_generation, err);
513 CERROR("error adding MD data to inode %lu: rc = %d\n",
518 /* Must be called with i_sem held */
519 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
523 LASSERT(down_trylock(&inode->i_sem) != 0);
525 /* Keep support for reading "inline EAs" until we convert
526 * users over to new format entirely. See bug 841/2097. */
527 if (inode->i_blocks == 0 && EXT3_I(inode)->i_data[0]) {
528 unsigned size = le32_to_cpu(EXT3_I(inode)->i_data[0]);
531 LASSERT(size < sizeof(EXT3_I(inode)->i_data));
533 if (size > lmm_size) {
534 CERROR("inline EA on %lu/%u bad size %u > %u\n",
535 inode->i_ino, inode->i_generation,
539 memcpy(lmm, &EXT3_I(inode)->i_data[1], size);
543 /* migrate LOV EA data to external block - keep same format */
544 CWARN("DEBUG: migrate inline EA for inode %lu/%u to block\n",
545 inode->i_ino, inode->i_generation);
547 handle = journal_start(EXT3_JOURNAL(inode),
548 EXT3_XATTR_TRANS_BLOCKS);
549 if (!IS_ERR(handle)) {
551 rc = fsfilt_ext3_set_md(inode, handle,
552 &EXT3_I(inode)->i_data[1],size);
554 memset(EXT3_I(inode)->i_data, 0,
555 sizeof(EXT3_I(inode)->i_data));
556 mark_inode_dirty(inode);
558 err = journal_stop(handle);
562 rc = PTR_ERR(handle);
569 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
570 XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size);
571 /* try old EA type if new one failed - MDS will convert it for us */
572 if (rc == -ENODATA) {
573 CDEBUG(D_INFO,"failed new LOV EA %d/%s from inode %lu: rc %d\n",
574 EXT3_XATTR_INDEX_TRUSTED, XATTR_LUSTRE_MDS_LOV_EA,
577 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_LUSTRE,
578 XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size);
582 /* This gives us the MD size */
584 return (rc == -ENODATA) ? 0 : rc;
587 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
588 EXT3_XATTR_INDEX_LUSTRE, XATTR_LUSTRE_MDS_OBJID,
590 memset(lmm, 0, lmm_size);
591 return (rc == -ENODATA) ? 0 : rc;
597 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
598 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
604 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct kiobuf *bio)
606 int rc, blocks_per_page;
608 rc = brw_kiovec(rw, 1, &bio, inode->i_dev,
609 bio->blocks, 1 << inode->i_blkbits);
611 blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
613 if (rc != (1 << inode->i_blkbits) * bio->nr_pages * blocks_per_page) {
614 CERROR("short write? expected %d, wrote %d\n",
615 (1 << inode->i_blkbits) * bio->nr_pages *
616 blocks_per_page, rc);
623 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
626 struct inode *inode = file->f_dentry->d_inode;
629 if (S_ISREG(inode->i_mode))
630 rc = file->f_op->read(file, buf, count, off);
632 const int blkbits = inode->i_sb->s_blocksize_bits;
633 const int blksize = inode->i_sb->s_blocksize;
635 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
636 count, inode->i_ino, *off);
638 struct buffer_head *bh;
641 if (*off < inode->i_size) {
644 bh = ext3_bread(NULL, inode, *off >> blkbits,
647 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
650 memcpy(buf, bh->b_data, blksize);
653 /* XXX in theory we should just fake
654 * this buffer and continue like ext3,
655 * especially if this is a partial read
657 CERROR("error read dir %lu+%llu: %d\n",
658 inode->i_ino, *off, err);
663 struct ext3_dir_entry_2 *fake = (void *)buf;
665 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
666 memset(fake, 0, sizeof(*fake));
667 fake->rec_len = cpu_to_le32(blksize);
679 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
681 struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
683 fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
685 OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
686 atomic_dec(&fcb_cache_count);
689 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
690 void *handle, fsfilt_cb_t cb_func,
693 struct fsfilt_cb_data *fcb;
695 OBD_SLAB_ALLOC(fcb, fcb_cache, GFP_NOFS, sizeof *fcb);
699 atomic_inc(&fcb_cache_count);
700 fcb->cb_func = cb_func;
702 fcb->cb_last_rcvd = last_rcvd;
703 fcb->cb_data = cb_data;
705 CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
707 journal_callback_set(handle, fsfilt_ext3_cb_func,
708 (struct journal_callback *)fcb);
715 * We need to hack the return value for the free inode counts because
716 * the current EA code requires one filesystem block per inode with EAs,
717 * so it is possible to run out of blocks before we run out of inodes.
719 * This can be removed when the ext3 EA code is fixed.
721 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
726 memset(&sfs, 0, sizeof(sfs));
728 rc = sb->s_op->statfs(sb, &sfs);
730 if (!rc && sfs.f_bfree < sfs.f_ffree) {
731 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
732 sfs.f_ffree = sfs.f_bfree;
735 statfs_pack(osfs, &sfs);
739 static int fsfilt_ext3_sync(struct super_block *sb)
741 return ext3_force_commit(sb);
744 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
745 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
746 #define ext3_up_truncate_sem(inode) up_write(&EXT3_I(inode)->truncate_sem);
747 #define ext3_down_truncate_sem(inode) down_write(&EXT3_I(inode)->truncate_sem);
749 #define ext3_up_truncate_sem(inode) up(&EXT3_I(inode)->truncate_sem);
750 #define ext3_down_truncate_sem(inode) down(&EXT3_I(inode)->truncate_sem);
753 #include <linux/lustre_version.h>
754 #if EXT3_EXT_MAGIC == 0xf301
755 #define ee_start e_start
756 #define ee_block e_block
759 #ifndef EXT3_BB_MAX_BLOCKS
760 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
761 ext3_new_blocks(handle, inode, count, goal, err)
765 unsigned long *blocks;
773 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
774 unsigned long block, int *aflags)
776 struct ext3_inode_info *ei = EXT3_I(inode);
777 unsigned long bg_start;
778 unsigned long colour;
782 struct ext3_extent *ex;
783 depth = path->p_depth;
785 /* try to predict block placement */
786 if ((ex = path[depth].p_ext)) {
788 /* This prefers to eat into a contiguous extent
789 * rather than find an extent that the whole
790 * request will fit into. This can fragment data
791 * block allocation and prevents our lovely 1M I/Os
792 * from reaching the disk intact. */
793 if (ex->ee_block + ex->ee_len == block)
796 return ex->ee_start + (block - ex->ee_block);
799 /* it looks index is empty
800 * try to find starting from index itself */
801 if (path[depth].p_bh)
802 return path[depth].p_bh->b_blocknr;
805 /* OK. use inode's group */
806 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
807 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
808 colour = (current->pid % 16) *
809 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
810 return bg_start + colour + block;
813 static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
814 struct ext3_ext_path *path,
815 struct ext3_extent *newex, int exist)
817 struct inode *inode = tree->inode;
818 struct bpointers *bp = tree->private;
819 int count, err, goal;
820 unsigned long pblock;
827 EXT_ASSERT(i == path->p_depth);
828 EXT_ASSERT(path[i].p_hdr);
835 if (bp->create == 0) {
837 if (newex->ee_block < bp->start)
838 i = bp->start - newex->ee_block;
839 if (i >= newex->ee_len)
840 CERROR("nothing to do?! i = %d, e_num = %u\n",
842 for (; i < newex->ee_len && bp->num; i++) {
855 tgen = EXT_GENERATION(tree);
856 count = ext3_ext_calc_credits_for_insert(tree, path);
857 ext3_up_truncate_sem(inode);
860 handle = journal_start(EXT3_JOURNAL(inode), count + EXT3_ALLOC_NEEDED + 1);
862 if (IS_ERR(handle)) {
863 ext3_down_truncate_sem(inode);
864 return PTR_ERR(handle);
867 if (tgen != EXT_GENERATION(tree)) {
868 /* the tree has changed. so path can be invalid at moment */
870 journal_stop(handle);
872 ext3_down_truncate_sem(inode);
876 ext3_down_truncate_sem(inode);
877 count = newex->ee_len;
878 goal = ext3_ext_find_goal(inode, path, newex->ee_block, &aflags);
879 aflags |= 2; /* block have been already reserved */
880 pblock = ext3_mb_new_blocks(handle, inode, goal, &count, aflags, &err);
883 EXT_ASSERT(count <= newex->ee_len);
885 /* insert new extent */
886 newex->ee_start = pblock;
887 newex->ee_len = count;
888 err = ext3_ext_insert_extent(handle, tree, path, newex);
892 /* correct on-disk inode size */
893 if (newex->ee_len > 0) {
894 new_i_size = (loff_t) newex->ee_block + newex->ee_len;
895 new_i_size = new_i_size << inode->i_blkbits;
896 if (new_i_size > EXT3_I(inode)->i_disksize) {
897 EXT3_I(inode)->i_disksize = new_i_size;
898 err = ext3_mark_inode_dirty(handle, inode);
904 journal_stop(handle);
910 CERROR("hmm. why do we find this extent?\n");
911 CERROR("initial space: %lu:%u\n",
912 bp->start, bp->init_num);
913 CERROR("current extent: %u/%u/%u %d\n",
914 newex->ee_block, newex->ee_len,
915 newex->ee_start, exist);
918 if (newex->ee_block < bp->start)
919 i = bp->start - newex->ee_block;
920 if (i >= newex->ee_len)
921 CERROR("nothing to do?! i = %d, e_num = %u\n",
923 for (; i < newex->ee_len && bp->num; i++) {
924 *(bp->created) = (exist == 0 ? 1 : 0);
926 *(bp->blocks) = newex->ee_start + i;
935 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
936 unsigned long num, unsigned long *blocks,
937 int *created, int create)
939 struct ext3_extents_tree tree;
943 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
944 block, block + num, (unsigned) inode->i_ino);
946 ext3_init_tree_desc(&tree, inode);
949 bp.created = created;
951 bp.init_num = bp.num = num;
954 ext3_down_truncate_sem(inode);
955 err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
956 ext3_ext_invalidate_cache(&tree);
957 ext3_up_truncate_sem(inode);
962 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
963 int pages, unsigned long *blocks,
964 int *created, int create)
966 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
968 struct page *fp = NULL;
971 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
972 inode->i_ino, pages, (*page)->index);
974 /* pages are sorted already. so, we just have to find
975 * contig. space and process them properly */
978 /* start new extent */
983 } else if (fp->index + clen == (*page)->index) {
984 /* continue the extent */
991 /* process found extent */
992 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
993 clen * blocks_per_page, blocks,
998 /* look for next extent */
1000 blocks += blocks_per_page * clen;
1001 created += blocks_per_page * clen;
1005 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1006 clen * blocks_per_page, blocks,
1013 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
1014 unsigned long *blocks, int *created, int create);
1015 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
1016 int pages, unsigned long *blocks,
1017 int *created, int create)
1019 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1023 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
1024 rc = ext3_map_inode_page(inode, *page, b, cr, create);
1026 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
1027 inode->i_ino, *b, *cr, create, rc);
1031 b += blocks_per_page;
1032 cr += blocks_per_page;
1037 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
1038 int pages, unsigned long *blocks,
1039 int *created, int create,
1040 struct semaphore *optional_sem)
1043 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
1044 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
1045 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
1046 blocks, created, create);
1050 if (optional_sem != NULL)
1052 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
1054 if (optional_sem != NULL)
1060 extern int ext3_prep_san_write(struct inode *inode, long *blocks,
1061 int nblocks, loff_t newsize);
1062 static int fsfilt_ext3_prep_san_write(struct inode *inode, long *blocks,
1063 int nblocks, loff_t newsize)
1065 return ext3_prep_san_write(inode, blocks, nblocks, newsize);
1068 static int fsfilt_ext3_read_record(struct file * file, void *buf,
1069 int size, loff_t *offs)
1071 struct inode *inode = file->f_dentry->d_inode;
1072 unsigned long block;
1073 struct buffer_head *bh;
1074 int err, blocksize, csize, boffs;
1076 /* prevent reading after eof */
1078 if (inode->i_size < *offs + size) {
1079 size = inode->i_size - *offs;
1082 CERROR("size %llu is too short for read %u@%llu\n",
1083 inode->i_size, size, *offs);
1085 } else if (size == 0) {
1092 blocksize = 1 << inode->i_blkbits;
1095 block = *offs >> inode->i_blkbits;
1096 boffs = *offs & (blocksize - 1);
1097 csize = min(blocksize - boffs, size);
1098 bh = ext3_bread(NULL, inode, block, 0, &err);
1100 CERROR("can't read block: %d\n", err);
1104 memcpy(buf, bh->b_data + boffs, csize);
1114 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1115 loff_t *offs, int force_sync)
1117 struct buffer_head *bh = NULL;
1118 unsigned long block;
1119 struct inode *inode = file->f_dentry->d_inode;
1120 loff_t old_size = inode->i_size, offset = *offs;
1121 loff_t new_size = inode->i_size;
1124 int err, block_count = 0, blocksize, size, boffs;
1126 /* Determine how many transaction credits are needed */
1127 blocksize = 1 << inode->i_blkbits;
1128 block_count = (*offs & (blocksize - 1)) + bufsize;
1129 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1131 journal = EXT3_SB(inode->i_sb)->s_journal;
1133 handle = journal_start(journal,
1134 block_count * EXT3_DATA_TRANS_BLOCKS + 2);
1136 if (IS_ERR(handle)) {
1137 CERROR("can't start transaction\n");
1138 return PTR_ERR(handle);
1141 while (bufsize > 0) {
1145 block = offset >> inode->i_blkbits;
1146 boffs = offset & (blocksize - 1);
1147 size = min(blocksize - boffs, bufsize);
1148 bh = ext3_bread(handle, inode, block, 1, &err);
1150 CERROR("can't read/create block: %d\n", err);
1154 err = ext3_journal_get_write_access(handle, bh);
1156 CERROR("journal_get_write_access() returned error %d\n",
1160 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1161 memcpy(bh->b_data + boffs, buf, size);
1162 err = ext3_journal_dirty_metadata(handle, bh);
1164 CERROR("journal_dirty_metadata() returned error %d\n",
1168 if (offset + size > new_size)
1169 new_size = offset + size;
1176 handle->h_sync = 1; /* recovery likes this */
1181 /* correct in-core and on-disk sizes */
1182 if (new_size > inode->i_size) {
1184 if (new_size > inode->i_size)
1185 inode->i_size = new_size;
1186 if (inode->i_size > EXT3_I(inode)->i_disksize)
1187 EXT3_I(inode)->i_disksize = inode->i_size;
1188 if (inode->i_size > old_size)
1189 mark_inode_dirty(inode);
1194 journal_stop(handle);
1202 static int fsfilt_ext3_setup(struct super_block *sb)
1205 EXT3_SB(sb)->dx_lock = fsfilt_ext3_dx_lock;
1206 EXT3_SB(sb)->dx_unlock = fsfilt_ext3_dx_unlock;
1209 CWARN("Enabling PDIROPS\n");
1210 set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
1211 sb->s_flags |= S_PDIROPS;
1216 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1217 objects. Logs is number of logfiles to update */
1218 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1222 case FSFILT_OP_CREATE:
1223 /* directory leaf, index & indirect & EA*/
1224 return 4 + 3 * logs;
1225 case FSFILT_OP_UNLINK:
1231 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1232 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
1233 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1234 for (i = 0; i < op; i++, fso++) {
1235 int nblocks = fso->fso_bufcnt * blockpp;
1236 int ndindirect = min(nblocks, addrpp + 1);
1237 int nindir = nblocks + ndindirect + 1;
1241 return needed + 3 * logs;
1247 static struct fsfilt_operations fsfilt_ext3_ops = {
1249 .fs_owner = THIS_MODULE,
1250 .fs_start = fsfilt_ext3_start,
1251 .fs_brw_start = fsfilt_ext3_brw_start,
1252 .fs_commit = fsfilt_ext3_commit,
1253 .fs_commit_async = fsfilt_ext3_commit_async,
1254 .fs_commit_wait = fsfilt_ext3_commit_wait,
1255 .fs_setattr = fsfilt_ext3_setattr,
1256 .fs_iocontrol = fsfilt_ext3_iocontrol,
1257 .fs_set_md = fsfilt_ext3_set_md,
1258 .fs_get_md = fsfilt_ext3_get_md,
1259 .fs_readpage = fsfilt_ext3_readpage,
1260 .fs_add_journal_cb = fsfilt_ext3_add_journal_cb,
1261 .fs_statfs = fsfilt_ext3_statfs,
1262 .fs_sync = fsfilt_ext3_sync,
1263 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
1264 .fs_prep_san_write = fsfilt_ext3_prep_san_write,
1265 .fs_write_record = fsfilt_ext3_write_record,
1266 .fs_read_record = fsfilt_ext3_read_record,
1267 .fs_setup = fsfilt_ext3_setup,
1268 .fs_send_bio = fsfilt_ext3_send_bio,
1269 .fs_get_op_len = fsfilt_ext3_get_op_len,
1272 static int __init fsfilt_ext3_init(void)
1276 fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
1277 sizeof(struct fsfilt_cb_data), 0,
1280 CERROR("error allocating fsfilt journal callback cache\n");
1281 GOTO(out, rc = -ENOMEM);
1284 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
1287 kmem_cache_destroy(fcb_cache);
1292 static void __exit fsfilt_ext3_exit(void)
1294 fsfilt_unregister_ops(&fsfilt_ext3_ops);
1295 LASSERTF(kmem_cache_destroy(fcb_cache) == 0,
1296 "can't free fsfilt callback cache: count %d\n",
1297 atomic_read(&fcb_cache_count));
1300 module_init(fsfilt_ext3_init);
1301 module_exit(fsfilt_ext3_exit);
1303 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1304 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
1305 MODULE_LICENSE("GPL");