1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lib/fsfilt_ext3.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_FILTER
28 #include <linux/init.h>
29 #include <linux/module.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #include <linux/bitops.h>
39 #include <linux/quota.h>
40 #include <linux/quotaio_v1.h>
41 #include <linux/quotaio_v2.h>
42 #include <linux/parser.h>
43 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
44 #include <linux/ext3_xattr.h>
46 #include <ext3/xattr.h>
49 #include <libcfs/kp30.h>
50 #include <lustre_fsfilt.h>
52 #include <lustre_quota.h>
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
54 #include <linux/iobuf.h>
56 #include <linux/lustre_compat25.h>
57 #include <linux/lprocfs_status.h>
59 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
60 #include <linux/ext3_extents.h>
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
64 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS
65 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS
67 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS(sb)
68 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS(sb)
71 #ifdef EXT3_SINGLEDATA_TRANS_BLOCKS_HAS_SB
72 /* for kernels 2.6.18 and later */
73 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
75 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS
78 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
79 #define fsfilt_ext3_journal_start(inode, nblocks) \
80 journal_start(EXT3_JOURNAL(inode),nblocks)
81 #define fsfilt_ext3_journal_stop(handle) journal_stop(handle)
83 #define fsfilt_ext3_journal_start(inode, nblocks) ext3_journal_start(inode, nblocks)
84 #define fsfilt_ext3_journal_stop(handle) ext3_journal_stop(handle)
87 static cfs_mem_cache_t *fcb_cache;
89 struct fsfilt_cb_data {
90 struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
91 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
92 struct obd_device *cb_obd; /* MDS/OBD completion device */
93 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
94 void *cb_data; /* MDS/OST completion function data */
97 #ifndef EXT3_XATTR_INDEX_TRUSTED /* temporary until we hit l28 kernel */
98 #define EXT3_XATTR_INDEX_TRUSTED 4
101 static char *fsfilt_ext3_get_label(struct super_block *sb)
103 return EXT3_SB(sb)->s_es->s_volume_name;
106 static int fsfilt_ext3_set_label(struct super_block *sb, char *label)
108 /* see e.g. fsfilt_ext3_write_record() */
113 journal = EXT3_SB(sb)->s_journal;
115 handle = journal_start(journal, 1);
117 if (IS_ERR(handle)) {
118 CERROR("can't start transaction\n");
119 return(PTR_ERR(handle));
122 err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
126 memcpy(EXT3_SB(sb)->s_es->s_volume_name, label,
127 sizeof(EXT3_SB(sb)->s_es->s_volume_name));
129 err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
133 journal_stop(handle);
139 static char *fsfilt_ext3_uuid(struct super_block *sb)
141 return EXT3_SB(sb)->s_es->s_uuid;
144 #ifdef HAVE_DISK_INODE_VERSION
146 * Get the 64-bit version for an inode.
148 static __u64 fsfilt_ext3_get_version(struct inode *inode)
150 return EXT3_I(inode)->i_fs_version;
154 * Set the 64-bit version and return the old version.
156 static __u64 fsfilt_ext3_set_version(struct inode *inode, __u64 new_version)
158 __u64 old_version = EXT3_I(inode)->i_fs_version;
160 (EXT3_I(inode))->i_fs_version = new_version;
167 * We don't currently need any additional blocks for rmdir and
168 * unlink transactions because we are storing the OST oa_id inside
169 * the inode (which we will be changing anyways as part of this
172 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
175 /* For updates to the last received file */
176 int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
180 if (current->journal_info) {
181 CDEBUG(D_INODE, "increasing refcount on %p\n",
182 current->journal_info);
187 case FSFILT_OP_RMDIR:
188 case FSFILT_OP_UNLINK:
189 /* delete one file + create/update logs for each stripe */
190 nblocks += FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
191 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
192 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
194 case FSFILT_OP_RENAME:
195 /* modify additional directory */
196 nblocks += FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
198 case FSFILT_OP_SYMLINK:
199 /* additional block + block bitmap + GDT for long symlink */
202 case FSFILT_OP_CREATE: {
203 #if defined(EXT3_EXTENTS_FL) && defined(EXT3_INDEX_FL)
206 if (!test_opt(inode->i_sb, EXTENTS)) {
208 } else if (((EXT3_I(inode)->i_flags &
209 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL)) ==
210 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL))) {
211 CWARN("extent-mapped directory found - contact "
212 "CFS: support@clusterfs.com\n");
219 case FSFILT_OP_MKDIR:
220 case FSFILT_OP_MKNOD:
221 /* modify one inode + block bitmap + GDT */
225 /* modify parent directory */
226 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
227 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
228 /* create/update logs for each stripe */
229 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
230 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
232 case FSFILT_OP_SETATTR:
233 /* Setattr on inode */
235 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
236 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
237 /* quota chown log for each stripe */
238 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
239 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
241 case FSFILT_OP_CANCEL_UNLINK:
242 /* blocks for log header bitmap update OR
243 * blocks for catalog header bitmap update + unlink of logs */
244 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
245 FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb) * logs;
248 /* delete 2 file(file + array id) + create 1 file (array id)
249 * create/update logs for each stripe */
250 nblocks += 2 * FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
252 /*create array log for head file*/
254 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
255 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb));
256 /*update head file array */
257 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
258 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
260 default: CERROR("unknown transaction start op %d\n", op);
264 LASSERT(current->journal_info == desc_private);
265 journal = EXT3_SB(inode->i_sb)->s_journal;
266 if (nblocks > journal->j_max_transaction_buffers) {
267 CWARN("too many credits %d for op %ux%u using %d instead\n",
268 nblocks, op, logs, journal->j_max_transaction_buffers);
269 nblocks = journal->j_max_transaction_buffers;
273 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
275 handle = fsfilt_ext3_journal_start(inode, nblocks);
279 LASSERT(current->journal_info == handle);
281 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
282 op, nblocks, PTR_ERR(handle));
287 * Calculate the number of buffer credits needed to write multiple pages in
288 * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
289 * doesn't have a nice API for calculating this sort of thing in advance.
291 * See comment above ext3_writepage_trans_blocks for details. We assume
292 * no data journaling is being done, but it does allow for all of the pages
293 * being non-contiguous. If we are guaranteed contiguous pages we could
294 * reduce the number of (d)indirect blocks a lot.
296 * With N blocks per page and P pages, for each inode we have at most:
298 * min(N*P, blocksize/4 + 1) dindirect blocks
301 * For the entire filesystem, we have at most:
302 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
303 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
304 * objcount inode blocks
306 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
308 * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
310 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
311 int niocount, struct niobuf_local *nb)
313 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
315 const int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
316 int nbitmaps = 0, ngdblocks;
317 int needed = objcount + 1; /* inodes + superblock */
320 for (i = 0, j = 0; i < objcount; i++, fso++) {
321 /* two or more dindirect blocks in case we cross boundary */
322 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
324 sb->s_blocksize_bits) /
325 (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
326 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
328 /* leaf, indirect, tindirect blocks for first block */
329 nbitmaps += blockpp + 2;
331 j += fso->fso_bufcnt;
334 next_indir = nb[0].offset +
335 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
336 for (i = 1; i < niocount; i++) {
337 if (nb[i].offset >= next_indir) {
338 nbitmaps++; /* additional indirect */
339 next_indir = nb[i].offset +
340 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
341 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
342 nbitmaps++; /* additional indirect */
344 nbitmaps += blockpp; /* each leaf in different group? */
347 ngdblocks = nbitmaps;
348 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
349 nbitmaps = EXT3_SB(sb)->s_groups_count;
350 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
351 ngdblocks = EXT3_SB(sb)->s_gdb_count;
353 needed += nbitmaps + ngdblocks;
355 /* last_rcvd update */
356 needed += FSFILT_DATA_TRANS_BLOCKS(sb);
358 #if defined(CONFIG_QUOTA)
359 /* We assume that there will be 1 bit set in s_dquot.flags for each
360 * quota file that is active. This is at least true for now.
362 needed += hweight32(sb_any_quota_enabled(sb)) *
363 FSFILT_SINGLEDATA_TRANS_BLOCKS(sb);
369 /* We have to start a huge journal transaction here to hold all of the
370 * metadata for the pages being written here. This is necessitated by
371 * the fact that we do lots of prepare_write operations before we do
372 * any of the matching commit_write operations, so even if we split
373 * up to use "smaller" transactions none of them could complete until
374 * all of them were opened. By having a single journal transaction,
375 * we eliminate duplicate reservations for common blocks like the
376 * superblock and group descriptors or bitmaps.
378 * We will start the transaction here, but each prepare_write will
379 * add a refcount to the transaction, and each commit_write will
380 * remove a refcount. The transaction will be closed when all of
381 * the pages have been written.
383 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
384 int niocount, struct niobuf_local *nb,
385 void *desc_private, int logs)
392 LASSERT(current->journal_info == desc_private);
393 journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
394 needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
396 /* The number of blocks we could _possibly_ dirty can very large.
397 * We reduce our request if it is absurd (and we couldn't get that
398 * many credits for a single handle anyways).
400 * At some point we have to limit the size of I/Os sent at one time,
401 * increase the size of the journal, or we have to calculate the
402 * actual journal requirements more carefully by checking all of
403 * the blocks instead of being maximally pessimistic. It remains to
404 * be seen if this is a real problem or not.
406 if (needed > journal->j_max_transaction_buffers) {
407 CERROR("want too many journal credits (%d) using %d instead\n",
408 needed, journal->j_max_transaction_buffers);
409 needed = journal->j_max_transaction_buffers;
412 LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
414 handle = fsfilt_ext3_journal_start(fso->fso_dentry->d_inode, needed);
416 if (IS_ERR(handle)) {
417 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
420 LASSERT(handle->h_buffer_credits >= needed);
421 LASSERT(current->journal_info == handle);
427 static int fsfilt_ext3_extend(struct inode *inode, unsigned int nblocks,void *h)
429 handle_t *handle = h;
431 /* fsfilt_extend called with nblocks = 0 for testing in special cases */
433 handle->h_buffer_credits = 0;
434 CWARN("setting credits of handle %p to zero by request\n", h);
437 if (handle->h_buffer_credits > nblocks)
439 if (journal_extend(handle, nblocks) == 0)
442 ext3_mark_inode_dirty(handle, inode);
443 return journal_restart(handle, nblocks);
446 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
449 handle_t *handle = h;
451 LASSERT(current->journal_info == handle);
453 handle->h_sync = 1; /* recovery likes this */
456 rc = fsfilt_ext3_journal_stop(handle);
462 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
466 transaction_t *transaction;
467 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
470 handle_t *handle = h;
474 LASSERT(current->journal_info == handle);
477 transaction = handle->h_transaction;
478 journal = transaction->t_journal;
479 tid = transaction->t_tid;
480 /* we don't want to be blocked */
482 rc = fsfilt_ext3_journal_stop(handle);
484 CERROR("error while stopping transaction: %d\n", rc);
488 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
489 rtid = log_start_commit(journal, transaction);
491 CERROR("strange race: %lu != %lu\n",
492 (unsigned long) tid, (unsigned long) rtid);
494 log_start_commit(journal, tid);
498 *wait_handle = (void *) tid;
499 CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
503 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
505 journal_t *journal = EXT3_JOURNAL(inode);
506 tid_t tid = (tid_t)(long)h;
508 CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
509 if (unlikely(is_journal_aborted(journal)))
512 log_wait_commit(EXT3_JOURNAL(inode), tid);
514 if (unlikely(is_journal_aborted(journal)))
519 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
520 struct iattr *iattr, int do_trunc)
522 struct inode *inode = dentry->d_inode;
527 /* Avoid marking the inode dirty on the superblock list unnecessarily.
528 * We are already writing the inode to disk as part of this
529 * transaction and want to avoid a lot of extra inode writeout
530 * later on. b=9828 */
531 if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
532 /* ATTR_SIZE would invoke truncate: clear it */
533 iattr->ia_valid &= ~ATTR_SIZE;
534 EXT3_I(inode)->i_disksize = iattr->ia_size;
535 i_size_write(inode, iattr->ia_size);
537 if (iattr->ia_valid & ATTR_UID)
538 inode->i_uid = iattr->ia_uid;
539 if (iattr->ia_valid & ATTR_GID)
540 inode->i_gid = iattr->ia_gid;
541 if (iattr->ia_valid & ATTR_ATIME)
542 inode->i_atime = iattr->ia_atime;
543 if (iattr->ia_valid & ATTR_MTIME)
544 inode->i_mtime = iattr->ia_mtime;
545 if (iattr->ia_valid & ATTR_CTIME)
546 inode->i_ctime = iattr->ia_ctime;
547 if (iattr->ia_valid & ATTR_MODE) {
548 inode->i_mode = iattr->ia_mode;
550 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
551 inode->i_mode &= ~S_ISGID;
554 inode->i_sb->s_op->dirty_inode(inode);
559 /* Don't allow setattr to change file type */
560 if (iattr->ia_valid & ATTR_MODE)
561 iattr->ia_mode = (inode->i_mode & S_IFMT) |
562 (iattr->ia_mode & ~S_IFMT);
564 /* We set these flags on the client, but have already checked perms
565 * so don't confuse inode_change_ok. */
566 iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
568 if (inode->i_op->setattr) {
569 rc = inode->i_op->setattr(dentry, iattr);
571 rc = inode_change_ok(inode, iattr);
573 rc = inode_setattr(inode, iattr);
581 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
582 unsigned int cmd, unsigned long arg)
587 /* FIXME: Can't do this because of nested transaction deadlock */
588 if (cmd == EXT3_IOC_SETFLAGS && (*(int *)arg) & EXT3_JOURNAL_DATA_FL) {
589 CERROR("can't set data journal flag on file\n");
593 if (inode->i_fop->ioctl)
594 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
601 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
602 void *lmm, int lmm_size, const char *name)
606 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
609 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
610 name, lmm, lmm_size, 0);
614 if (rc && rc != -EROFS)
615 CERROR("error adding MD data to inode %lu: rc = %d\n",
620 /* Must be called with i_mutex held */
621 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size,
626 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
629 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
630 name, lmm, lmm_size);
633 /* This gives us the MD size */
635 return (rc == -ENODATA) ? 0 : rc;
638 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
639 EXT3_XATTR_INDEX_TRUSTED, name,
641 memset(lmm, 0, lmm_size);
642 return (rc == -ENODATA) ? 0 : rc;
648 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
649 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
655 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct kiobuf *bio)
657 int rc, blk_per_page;
659 rc = brw_kiovec(rw, 1, &bio, inode->i_dev,
660 KIOBUF_GET_BLOCKS(bio), 1 << inode->i_blkbits);
662 * brw_kiovec() returns number of bytes actually written. If error
663 * occurred after something was written, error code is returned though
664 * kiobuf->errno. (See bug 6854.)
667 blk_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
669 if (rc != (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page) {
670 CERROR("short write? expected %d, wrote %d (%d)\n",
671 (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
674 if (bio->errno != 0) {
675 CERROR("IO error. Wrote %d of %d (%d)\n",
677 (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
686 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
689 struct inode *inode = file->f_dentry->d_inode;
692 if (S_ISREG(inode->i_mode))
693 rc = file->f_op->read(file, buf, count, off);
695 const int blkbits = inode->i_sb->s_blocksize_bits;
696 const int blksize = inode->i_sb->s_blocksize;
698 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
699 count, inode->i_ino, *off);
701 struct buffer_head *bh;
704 if (*off < i_size_read(inode)) {
707 bh = ext3_bread(NULL, inode, *off >> blkbits,
710 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
713 memcpy(buf, bh->b_data, blksize);
716 /* XXX in theory we should just fake
717 * this buffer and continue like ext3,
718 * especially if this is a partial read
720 CERROR("error read dir %lu+%llu: %d\n",
721 inode->i_ino, *off, err);
726 struct ext3_dir_entry_2 *fake = (void *)buf;
728 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
729 memset(fake, 0, sizeof(*fake));
730 fake->rec_len = cpu_to_le16(blksize);
742 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
744 struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
746 fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
748 OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
751 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
752 void *handle, fsfilt_cb_t cb_func,
755 struct fsfilt_cb_data *fcb;
757 OBD_SLAB_ALLOC(fcb, fcb_cache, CFS_ALLOC_IO, sizeof *fcb);
761 fcb->cb_func = cb_func;
763 fcb->cb_last_rcvd = last_rcvd;
764 fcb->cb_data = cb_data;
766 CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
768 journal_callback_set(handle, fsfilt_ext3_cb_func,
769 (struct journal_callback *)fcb);
776 * We need to hack the return value for the free inode counts because
777 * the current EA code requires one filesystem block per inode with EAs,
778 * so it is possible to run out of blocks before we run out of inodes.
780 * This can be removed when the ext3 EA code is fixed.
782 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
787 memset(&sfs, 0, sizeof(sfs));
788 rc = ll_do_statfs(sb,&sfs);
790 if (!rc && sfs.f_bfree < sfs.f_ffree) {
791 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
792 sfs.f_ffree = sfs.f_bfree;
795 statfs_pack(osfs, &sfs);
799 static int fsfilt_ext3_sync(struct super_block *sb)
801 return ext3_force_commit(sb);
804 #if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))
805 #warning "kernel code has old extents/mballoc patch, disabling"
806 #undef EXT3_MULTIBLOCK_ALLOCATOR
808 #ifndef EXT3_EXTENTS_FL
809 #define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
812 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
813 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
814 #define ext3_up_truncate_sem(inode) up_write(&EXT3_I(inode)->truncate_sem);
815 #define ext3_down_truncate_sem(inode) down_write(&EXT3_I(inode)->truncate_sem);
816 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
817 #define ext3_up_truncate_sem(inode) up(&EXT3_I(inode)->truncate_sem);
818 #define ext3_down_truncate_sem(inode) down(&EXT3_I(inode)->truncate_sem);
820 #define ext3_up_truncate_sem(inode) mutex_unlock(&EXT3_I(inode)->truncate_mutex);
821 #define ext3_down_truncate_sem(inode) mutex_lock(&EXT3_I(inode)->truncate_mutex);
825 #define EXT_ASSERT(cond) BUG_ON(!(cond))
828 #ifdef EXT3_EXT_HAS_NO_TREE
829 /* for kernels 2.6.18 and later */
830 #define ext3_ext_base inode
831 #define ext3_ext_base2inode(inode) (inode)
832 #define EXT_DEPTH(inode) ext_depth(inode)
833 #define EXT_GENERATION(inode) ext_generation(inode)
834 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
835 ext3_ext_walk_space(inode, block, num, cb, cbdata);
837 #define ext3_ext_base ext3_extents_tree
838 #define ext3_ext_base2inode(tree) (tree->inode)
839 #define fsfilt_ext3_ext_walk_space(tree, block, num, cb, cbdata) \
840 ext3_ext_walk_space(tree, block, num, cb);
843 #include <linux/lustre_version.h>
844 #if EXT3_EXT_MAGIC == 0xf301
845 #define ee_start e_start
846 #define ee_block e_block
849 #ifndef EXT3_BB_MAX_BLOCKS
850 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
851 ext3_new_blocks(handle, inode, count, goal, err)
855 unsigned long *blocks;
863 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
864 unsigned long block, int *aflags)
866 struct ext3_inode_info *ei = EXT3_I(inode);
867 unsigned long bg_start;
868 unsigned long colour;
872 struct ext3_extent *ex;
873 depth = path->p_depth;
875 /* try to predict block placement */
876 if ((ex = path[depth].p_ext)) {
878 /* This prefers to eat into a contiguous extent
879 * rather than find an extent that the whole
880 * request will fit into. This can fragment data
881 * block allocation and prevents our lovely 1M I/Os
882 * from reaching the disk intact. */
883 if (ex->ee_block + ex->ee_len == block)
886 return ex->ee_start + (block - ex->ee_block);
889 /* it looks index is empty
890 * try to find starting from index itself */
891 if (path[depth].p_bh)
892 return path[depth].p_bh->b_blocknr;
895 /* OK. use inode's group */
896 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
897 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
898 colour = (current->pid % 16) *
899 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
900 return bg_start + colour + block;
903 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
904 #include <linux/locks.h>
905 static void ll_unmap_underlying_metadata(struct super_block *sb,
906 unsigned long blocknr)
908 struct buffer_head *old_bh;
910 old_bh = get_hash_table(sb->s_dev, blocknr, sb->s_blocksize);
912 mark_buffer_clean(old_bh);
913 wait_on_buffer(old_bh);
914 clear_bit(BH_Req, &old_bh->b_state);
919 #define ll_unmap_underlying_metadata(sb, blocknr) \
920 unmap_underlying_metadata((sb)->s_bdev, blocknr)
923 #ifndef EXT3_MB_HINT_GROUP_ALLOC
924 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
925 struct ext3_ext_path *path, unsigned long block,
926 unsigned long *count, int *err)
928 unsigned long pblock, goal;
930 struct inode *inode = ext3_ext_base2inode(base);
932 goal = ext3_ext_find_goal(inode, path, block, &aflags);
933 aflags |= 2; /* block have been already reserved */
935 pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
941 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
942 struct ext3_ext_path *path, unsigned long block,
943 unsigned long *count, int *err)
945 struct inode *inode = ext3_ext_base2inode(base);
946 struct ext3_allocation_request ar;
947 unsigned long pblock;
950 /* find neighbour allocated blocks */
952 *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
956 *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
960 /* allocate new block */
961 ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
965 ar.flags = EXT3_MB_HINT_DATA;
966 pblock = ext3_mb_new_blocks(handle, &ar, err);
972 #ifdef EXT3_EXT_HAS_NO_TREE
973 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
974 struct ext3_ext_path *path,
975 struct ext3_ext_cache *cex,
976 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
977 struct ext3_extent *ex,
981 struct bpointers *bp = cbdata;
983 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
984 struct ext3_ext_path *path,
985 struct ext3_ext_cache *cex
986 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
987 , struct ext3_extent *ex
991 struct bpointers *bp = base->private;
993 struct inode *inode = ext3_ext_base2inode(base);
994 struct ext3_extent nex;
995 unsigned long pblock;
1001 i = EXT_DEPTH(base);
1002 EXT_ASSERT(i == path->p_depth);
1003 EXT_ASSERT(path[i].p_hdr);
1005 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
1010 if (bp->create == 0) {
1012 if (cex->ec_block < bp->start)
1013 i = bp->start - cex->ec_block;
1014 if (i >= cex->ec_len)
1015 CERROR("nothing to do?! i = %d, e_num = %u\n",
1017 for (; i < cex->ec_len && bp->num; i++) {
1026 return EXT_CONTINUE;
1029 tgen = EXT_GENERATION(base);
1030 count = ext3_ext_calc_credits_for_insert(base, path);
1031 ext3_up_truncate_sem(inode);
1034 handle = fsfilt_ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
1036 if (IS_ERR(handle)) {
1037 ext3_down_truncate_sem(inode);
1038 return PTR_ERR(handle);
1041 ext3_down_truncate_sem(inode);
1042 if (tgen != EXT_GENERATION(base)) {
1043 /* the tree has changed. so path can be invalid at moment */
1045 fsfilt_ext3_journal_stop(handle);
1050 count = cex->ec_len;
1051 pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
1054 EXT_ASSERT(count <= cex->ec_len);
1056 /* insert new extent */
1057 nex.ee_block = cex->ec_block;
1058 nex.ee_start = pblock;
1060 err = ext3_ext_insert_extent(handle, base, path, &nex);
1062 CERROR("can't insert extent: %d\n", err);
1063 /* XXX: export ext3_free_blocks() */
1064 /*ext3_free_blocks(handle, inode, nex.ee_start, nex.ee_len, 0);*/
1069 * Putting len of the actual extent we just inserted,
1070 * we are asking ext3_ext_walk_space() to continue
1071 * scaning after that block
1073 cex->ec_len = nex.ee_len;
1074 cex->ec_start = nex.ee_start;
1075 BUG_ON(nex.ee_len == 0);
1076 BUG_ON(nex.ee_block != cex->ec_block);
1080 fsfilt_ext3_journal_stop(handle);
1086 CERROR("hmm. why do we find this extent?\n");
1087 CERROR("initial space: %lu:%u\n",
1088 bp->start, bp->init_num);
1089 CERROR("current extent: %u/%u/%u %d\n",
1090 cex->ec_block, cex->ec_len,
1091 cex->ec_start, cex->ec_type);
1094 if (cex->ec_block < bp->start)
1095 i = bp->start - cex->ec_block;
1096 if (i >= cex->ec_len)
1097 CERROR("nothing to do?! i = %d, e_num = %u\n",
1099 for (; i < cex->ec_len && bp->num; i++) {
1100 *(bp->blocks) = cex->ec_start + i;
1101 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
1105 /* unmap any possible underlying metadata from
1106 * the block device mapping. bug 6998. */
1107 ll_unmap_underlying_metadata(inode->i_sb,
1119 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
1120 unsigned long num, unsigned long *blocks,
1121 int *created, int create)
1123 #ifdef EXT3_EXT_HAS_NO_TREE
1124 struct ext3_ext_base *base = inode;
1126 struct ext3_extents_tree tree;
1127 struct ext3_ext_base *base = &tree;
1129 struct bpointers bp;
1132 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
1133 block, block + num - 1, (unsigned) inode->i_ino);
1135 #ifndef EXT3_EXT_HAS_NO_TREE
1136 ext3_init_tree_desc(base, inode);
1140 bp.created = created;
1142 bp.init_num = bp.num = num;
1145 ext3_down_truncate_sem(inode);
1146 err = fsfilt_ext3_ext_walk_space(base, block, num, ext3_ext_new_extent_cb, &bp);
1147 ext3_ext_invalidate_cache(base);
1148 ext3_up_truncate_sem(inode);
1153 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
1154 int pages, unsigned long *blocks,
1155 int *created, int create)
1157 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1159 struct page *fp = NULL;
1162 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1163 inode->i_ino, pages, (*page)->index);
1165 /* pages are sorted already. so, we just have to find
1166 * contig. space and process them properly */
1169 /* start new extent */
1174 } else if (fp->index + clen == (*page)->index) {
1175 /* continue the extent */
1182 /* process found extent */
1183 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1184 clen * blocks_per_page, blocks,
1189 /* look for next extent */
1191 blocks += blocks_per_page * clen;
1192 created += blocks_per_page * clen;
1196 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1197 clen * blocks_per_page, blocks,
1202 #endif /* EXT3_MULTIBLOCK_ALLOCATOR */
1204 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
1205 unsigned long *blocks, int *created, int create);
1206 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
1207 int pages, unsigned long *blocks,
1208 int *created, int create)
1210 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1214 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
1215 rc = ext3_map_inode_page(inode, *page, b, cr, create);
1217 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
1218 inode->i_ino, *b, *cr, create, rc);
1222 b += blocks_per_page;
1223 cr += blocks_per_page;
1228 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
1229 int pages, unsigned long *blocks,
1230 int *created, int create,
1231 struct semaphore *optional_sem)
1234 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
1235 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
1236 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
1237 blocks, created, create);
1241 if (optional_sem != NULL)
1243 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
1245 if (optional_sem != NULL)
1251 static int fsfilt_ext3_read_record(struct file * file, void *buf,
1252 int size, loff_t *offs)
1254 struct inode *inode = file->f_dentry->d_inode;
1255 unsigned long block;
1256 struct buffer_head *bh;
1257 int err, blocksize, csize, boffs;
1259 /* prevent reading after eof */
1261 if (i_size_read(inode) < *offs + size) {
1262 size = i_size_read(inode) - *offs;
1265 CERROR("size %llu is too short for read %u@%llu\n",
1266 i_size_read(inode), size, *offs);
1268 } else if (size == 0) {
1275 blocksize = 1 << inode->i_blkbits;
1278 block = *offs >> inode->i_blkbits;
1279 boffs = *offs & (blocksize - 1);
1280 csize = min(blocksize - boffs, size);
1281 bh = ext3_bread(NULL, inode, block, 0, &err);
1283 CERROR("can't read block: %d\n", err);
1287 memcpy(buf, bh->b_data + boffs, csize);
1297 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1298 loff_t *offs, int force_sync)
1300 struct buffer_head *bh = NULL;
1301 unsigned long block;
1302 struct inode *inode = file->f_dentry->d_inode;
1303 loff_t old_size = i_size_read(inode), offset = *offs;
1304 loff_t new_size = i_size_read(inode);
1306 int err = 0, block_count = 0, blocksize, size, boffs;
1308 /* Determine how many transaction credits are needed */
1309 blocksize = 1 << inode->i_blkbits;
1310 block_count = (*offs & (blocksize - 1)) + bufsize;
1311 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1314 handle = fsfilt_ext3_journal_start(inode,
1315 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
1317 if (IS_ERR(handle)) {
1318 CERROR("can't start transaction for %d blocks (%d bytes)\n",
1319 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2, bufsize);
1320 return PTR_ERR(handle);
1323 while (bufsize > 0) {
1327 block = offset >> inode->i_blkbits;
1328 boffs = offset & (blocksize - 1);
1329 size = min(blocksize - boffs, bufsize);
1330 bh = ext3_bread(handle, inode, block, 1, &err);
1332 CERROR("can't read/create block: %d\n", err);
1336 err = ext3_journal_get_write_access(handle, bh);
1338 CERROR("journal_get_write_access() returned error %d\n",
1342 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1343 memcpy(bh->b_data + boffs, buf, size);
1344 err = ext3_journal_dirty_metadata(handle, bh);
1346 CERROR("journal_dirty_metadata() returned error %d\n",
1350 if (offset + size > new_size)
1351 new_size = offset + size;
1358 handle->h_sync = 1; /* recovery likes this */
1363 /* correct in-core and on-disk sizes */
1364 if (new_size > i_size_read(inode)) {
1366 if (new_size > i_size_read(inode))
1367 i_size_write(inode, new_size);
1368 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
1369 EXT3_I(inode)->i_disksize = i_size_read(inode);
1370 if (i_size_read(inode) > old_size)
1371 mark_inode_dirty(inode);
1376 fsfilt_ext3_journal_stop(handle);
1384 static int fsfilt_ext3_setup(struct super_block *sb)
1386 struct ext3_sb_info *sbi = EXT3_SB(sb);
1388 sbi->dx_lock = fsfilt_ext3_dx_lock;
1389 sbi->dx_unlock = fsfilt_ext3_dx_unlock;
1392 CWARN("Enabling PDIROPS\n");
1393 set_opt(sbi->s_mount_opt, PDIROPS);
1394 sb->s_flags |= S_PDIROPS;
1396 if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
1397 CWARN("filesystem doesn't have dir_index feature enabled\n");
1398 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,6)) && HAVE_QUOTA_SUPPORT
1399 /* enable journaled quota support */
1400 /* kfreed in ext3_put_super() */
1401 sbi->s_qf_names[USRQUOTA] = kstrdup("lquota.user", GFP_KERNEL);
1402 if (!sbi->s_qf_names[USRQUOTA])
1404 sbi->s_qf_names[GRPQUOTA] = kstrdup("lquota.group", GFP_KERNEL);
1405 if (!sbi->s_qf_names[GRPQUOTA]) {
1406 kfree(sbi->s_qf_names[USRQUOTA]);
1407 sbi->s_qf_names[USRQUOTA] = NULL;
1410 sbi->s_jquota_fmt = QFMT_VFS_V0;
1411 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13))
1412 set_opt(sbi->s_mount_opt, QUOTA);
1418 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1419 objects. Logs is number of logfiles to update */
1420 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1424 case FSFILT_OP_CREATE:
1425 /* directory leaf, index & indirect & EA*/
1426 return 4 + 3 * logs;
1427 case FSFILT_OP_UNLINK:
1433 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1434 int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
1435 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1436 for (i = 0; i < op; i++, fso++) {
1437 int nblocks = fso->fso_bufcnt * blockpp;
1438 int ndindirect = min(nblocks, addrpp + 1);
1439 int nindir = nblocks + ndindirect + 1;
1443 return needed + 3 * logs;
1449 static const char *op_quotafile[] = { "lquota.user", "lquota.group" };
1451 #define DQINFO_COPY(out, in) \
1453 Q_COPY(out, in, dqi_bgrace); \
1454 Q_COPY(out, in, dqi_igrace); \
1455 Q_COPY(out, in, dqi_flags); \
1456 Q_COPY(out, in, dqi_valid); \
1459 #define DQBLK_COPY(out, in) \
1461 Q_COPY(out, in, dqb_bhardlimit); \
1462 Q_COPY(out, in, dqb_bsoftlimit); \
1463 Q_COPY(out, in, dqb_curspace); \
1464 Q_COPY(out, in, dqb_ihardlimit); \
1465 Q_COPY(out, in, dqb_isoftlimit); \
1466 Q_COPY(out, in, dqb_curinodes); \
1467 Q_COPY(out, in, dqb_btime); \
1468 Q_COPY(out, in, dqb_itime); \
1469 Q_COPY(out, in, dqb_valid); \
1474 static int fsfilt_ext3_quotactl(struct super_block *sb,
1475 struct obd_quotactl *oqc)
1477 int i, rc = 0, error = 0;
1478 struct quotactl_ops *qcop;
1479 struct if_dqinfo *info;
1480 struct if_dqblk *dqblk;
1486 OBD_ALLOC_PTR(info);
1489 OBD_ALLOC_PTR(dqblk);
1495 DQINFO_COPY(info, &oqc->qc_dqinfo);
1496 DQBLK_COPY(dqblk, &oqc->qc_dqblk);
1499 if (oqc->qc_cmd == Q_QUOTAON || oqc->qc_cmd == Q_QUOTAOFF) {
1500 for (i = 0; i < MAXQUOTAS; i++) {
1501 if (!Q_TYPESET(oqc, i))
1504 if (oqc->qc_cmd == Q_QUOTAON) {
1505 if (!qcop->quota_on)
1506 GOTO(out, rc = -ENOSYS);
1507 rc = qcop->quota_on(sb, i, oqc->qc_id,
1508 (char *)op_quotafile[i]);
1509 } else if (oqc->qc_cmd == Q_QUOTAOFF) {
1510 if (!qcop->quota_off)
1511 GOTO(out, rc = -ENOSYS);
1512 rc = qcop->quota_off(sb, i);
1520 GOTO(out, rc ?: error);
1523 switch (oqc->qc_cmd) {
1526 if (!qcop->get_info)
1527 GOTO(out, rc = -ENOSYS);
1528 rc = qcop->get_info(sb, oqc->qc_type, info);
1532 if (!qcop->set_dqblk)
1533 GOTO(out, rc = -ENOSYS);
1534 rc = qcop->set_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1538 if (!qcop->get_dqblk)
1539 GOTO(out, rc = -ENOSYS);
1540 rc = qcop->get_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1542 dqblk->dqb_valid = QIF_LIMITS | QIF_USAGE;
1545 if (!sb->s_qcop->quota_sync)
1546 GOTO(out, rc = -ENOSYS);
1547 qcop->quota_sync(sb, oqc->qc_type);
1550 CERROR("unsupported quotactl command: %d", oqc->qc_cmd);
1554 DQINFO_COPY(&oqc->qc_dqinfo, info);
1555 DQBLK_COPY(&oqc->qc_dqblk, dqblk);
1558 OBD_FREE_PTR(dqblk);
1561 CDEBUG(D_QUOTA, "quotactl command %#x, id %u, type %d "
1563 oqc->qc_cmd, oqc->qc_id, oqc->qc_type, rc);
1568 struct hlist_node dqb_hash; /* quotacheck hash */
1569 struct list_head dqb_list; /* in list also */
1570 qid_t dqb_id; /* uid/gid */
1571 short dqb_type; /* USRQUOTA/GRPQUOTA */
1572 qsize_t dqb_bhardlimit; /* block hard limit */
1573 qsize_t dqb_bsoftlimit; /* block soft limit */
1574 qsize_t dqb_curspace; /* current space */
1575 qsize_t dqb_ihardlimit; /* inode hard limit */
1576 qsize_t dqb_isoftlimit; /* inode soft limit */
1577 qsize_t dqb_curinodes; /* current inodes */
1578 __u64 dqb_btime; /* block grace time */
1579 __u64 dqb_itime; /* inode grace time */
1580 __u32 dqb_valid; /* flag for above fields */
1583 static inline unsigned int chkquot_hash(qid_t id, int type)
1584 __attribute__((__const__));
1586 static inline unsigned int chkquot_hash(qid_t id, int type)
1588 return (id * (MAXQUOTAS - type)) % NR_DQHASH;
1591 static inline struct chk_dqblk *
1592 find_chkquot(struct hlist_head *head, qid_t id, int type)
1594 struct hlist_node *node;
1595 struct chk_dqblk *cdqb;
1597 hlist_for_each(node, head) {
1598 cdqb = hlist_entry(node, struct chk_dqblk, dqb_hash);
1599 if (cdqb->dqb_id == id && cdqb->dqb_type == type)
1606 static struct chk_dqblk *alloc_chkquot(qid_t id, int type)
1608 struct chk_dqblk *cdqb;
1610 OBD_ALLOC_PTR(cdqb);
1612 INIT_HLIST_NODE(&cdqb->dqb_hash);
1613 INIT_LIST_HEAD(&cdqb->dqb_list);
1615 cdqb->dqb_type = type;
1621 static struct chk_dqblk *
1622 cqget(struct super_block *sb, struct hlist_head *hash, struct list_head *list,
1623 qid_t id, int type, int first_check)
1625 struct hlist_head *head = hash + chkquot_hash(id, type);
1626 struct if_dqblk dqb;
1627 struct chk_dqblk *cdqb;
1630 cdqb = find_chkquot(head, id, type);
1634 cdqb = alloc_chkquot(id, type);
1639 rc = sb->s_qcop->get_dqblk(sb, type, id, &dqb);
1641 CERROR("get_dqblk of id %u, type %d failed: %d\n",
1644 DQBLK_COPY(cdqb, &dqb);
1645 cdqb->dqb_curspace = 0;
1646 cdqb->dqb_curinodes = 0;
1650 hlist_add_head(&cdqb->dqb_hash, head);
1651 list_add_tail(&cdqb->dqb_list, list);
1656 static inline int quota_onoff(struct super_block *sb, int cmd, int type)
1658 struct obd_quotactl *oqctl;
1661 OBD_ALLOC_PTR(oqctl);
1665 oqctl->qc_cmd = cmd;
1666 oqctl->qc_id = QFMT_LDISKFS;
1667 oqctl->qc_type = type;
1668 rc = fsfilt_ext3_quotactl(sb, oqctl);
1670 OBD_FREE_PTR(oqctl);
1674 static inline int read_old_dqinfo(struct super_block *sb, int type,
1675 struct if_dqinfo *dqinfo)
1677 struct obd_quotactl *oqctl;
1681 OBD_ALLOC_PTR(oqctl);
1685 oqctl->qc_cmd = Q_GETINFO;
1686 oqctl->qc_type = type;
1687 rc = fsfilt_ext3_quotactl(sb, oqctl);
1689 ((struct obd_dqinfo *)dqinfo)[type] = oqctl->qc_dqinfo;
1691 OBD_FREE_PTR(oqctl);
1695 static inline struct ext3_group_desc *
1696 get_group_desc(struct super_block *sb, int group)
1698 unsigned long desc_block, desc;
1699 struct ext3_group_desc *gdp;
1701 desc_block = group / EXT3_DESC_PER_BLOCK(sb);
1702 desc = group % EXT3_DESC_PER_BLOCK(sb);
1703 gdp = (struct ext3_group_desc *)
1704 EXT3_SB(sb)->s_group_desc[desc_block]->b_data;
1709 static inline struct buffer_head *
1710 read_inode_bitmap(struct super_block *sb, unsigned long group)
1712 struct ext3_group_desc *desc;
1713 struct buffer_head *bh;
1715 desc = get_group_desc(sb, group);
1716 bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
1721 static inline struct inode *ext3_iget_inuse(struct super_block *sb,
1722 struct buffer_head *bitmap_bh,
1723 int index, unsigned long ino)
1725 struct inode *inode = NULL;
1727 if (ext3_test_bit(index, bitmap_bh->b_data))
1728 inode = iget(sb, ino);
1734 struct hlist_head qckt_hash[NR_DQHASH]; /* quotacheck hash */
1735 struct list_head qckt_list; /* quotacheck list */
1736 int qckt_first_check[MAXQUOTAS]; /* 1 if no old quotafile */
1737 struct if_dqinfo qckt_dqinfo[MAXQUOTAS]; /* old dqinfo */
1740 static int add_inode_quota(struct inode *inode, struct qchk_ctxt *qctxt,
1741 struct obd_quotactl *oqc)
1743 struct chk_dqblk *cdqb[MAXQUOTAS] = { NULL, };
1745 qid_t qid[MAXQUOTAS];
1751 qid[USRQUOTA] = inode->i_uid;
1752 qid[GRPQUOTA] = inode->i_gid;
1754 if (S_ISDIR(inode->i_mode) ||
1755 S_ISREG(inode->i_mode) ||
1756 S_ISLNK(inode->i_mode))
1757 size = inode_get_bytes(inode);
1759 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1760 if (!Q_TYPESET(oqc, cnt))
1763 cdqb[cnt] = cqget(inode->i_sb, qctxt->qckt_hash,
1764 &qctxt->qckt_list, qid[cnt], cnt,
1765 qctxt->qckt_first_check[cnt]);
1771 cdqb[cnt]->dqb_curspace += size;
1772 cdqb[cnt]->dqb_curinodes++;
1776 for (i = 0; i < cnt; i++) {
1777 if (!Q_TYPESET(oqc, i))
1780 cdqb[i]->dqb_curspace -= size;
1781 cdqb[i]->dqb_curinodes--;
1788 static int v2_write_dqheader(struct file *f, int type)
1790 static const __u32 quota_magics[] = V2_INITQMAGICS;
1791 static const __u32 quota_versions[] = V2_INITQVERSIONS;
1792 struct v2_disk_dqheader dqhead;
1795 CLASSERT(ARRAY_SIZE(quota_magics) == ARRAY_SIZE(quota_versions));
1796 LASSERT(0 <= type && type < ARRAY_SIZE(quota_magics));
1798 dqhead.dqh_magic = cpu_to_le32(quota_magics[type]);
1799 dqhead.dqh_version = cpu_to_le32(quota_versions[type]);
1801 return cfs_user_write(f, (char *)&dqhead, sizeof(dqhead), &offset);
1804 /* write dqinfo struct in a new quota file */
1805 static int v2_write_dqinfo(struct file *f, int type, struct if_dqinfo *info)
1807 struct v2_disk_dqinfo dqinfo;
1808 __u32 blocks = V2_DQTREEOFF + 1;
1809 loff_t offset = V2_DQINFOOFF;
1812 dqinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
1813 dqinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
1814 dqinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK &
1817 dqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1818 dqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1819 dqinfo.dqi_flags = 0;
1822 dqinfo.dqi_blocks = cpu_to_le32(blocks);
1823 dqinfo.dqi_free_blk = 0;
1824 dqinfo.dqi_free_entry = 0;
1826 return cfs_user_write(f, (char *)&dqinfo, sizeof(dqinfo), &offset);
1829 static int create_new_quota_files(struct qchk_ctxt *qctxt,
1830 struct obd_quotactl *oqc)
1835 for (i = 0; i < MAXQUOTAS; i++) {
1836 struct if_dqinfo *info = qctxt->qckt_first_check[i]?
1837 NULL : &qctxt->qckt_dqinfo[i];
1840 if (!Q_TYPESET(oqc, i))
1843 file = filp_open(op_quotafile[i], O_RDWR | O_CREAT | O_TRUNC,
1847 CERROR("can't create %s file: rc = %d\n",
1848 op_quotafile[i], rc);
1852 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1853 CERROR("file %s is not regular", op_quotafile[i]);
1854 filp_close(file, 0);
1855 GOTO(out, rc = -EINVAL);
1858 rc = v2_write_dqheader(file, i);
1860 filp_close(file, 0);
1864 rc = v2_write_dqinfo(file, i, info);
1865 filp_close(file, 0);
1875 static int commit_chkquot(struct super_block *sb, struct qchk_ctxt *qctxt,
1876 struct chk_dqblk *cdqb)
1878 struct obd_quotactl *oqc;
1887 now = CURRENT_SECONDS;
1889 if (cdqb->dqb_bsoftlimit &&
1890 toqb(cdqb->dqb_curspace) >= cdqb->dqb_bsoftlimit &&
1893 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_bgrace;
1895 if (cdqb->dqb_isoftlimit &&
1896 cdqb->dqb_curinodes >= cdqb->dqb_isoftlimit &&
1899 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_igrace;
1901 cdqb->dqb_valid = QIF_ALL;
1903 oqc->qc_cmd = Q_SETQUOTA;
1904 oqc->qc_type = cdqb->dqb_type;
1905 oqc->qc_id = cdqb->dqb_id;
1906 DQBLK_COPY(&oqc->qc_dqblk, cdqb);
1908 rc = fsfilt_ext3_quotactl(sb, oqc);
1913 static int prune_chkquots(struct super_block *sb,
1914 struct qchk_ctxt *qctxt, int error)
1916 struct chk_dqblk *cdqb, *tmp;
1919 list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
1921 rc = commit_chkquot(sb, qctxt, cdqb);
1925 hlist_del_init(&cdqb->dqb_hash);
1926 list_del(&cdqb->dqb_list);
1933 static int fsfilt_ext3_quotacheck(struct super_block *sb,
1934 struct obd_quotactl *oqc)
1936 struct ext3_sb_info *sbi = EXT3_SB(sb);
1938 struct qchk_ctxt *qctxt;
1939 struct buffer_head *bitmap_bh = NULL;
1941 struct inode *inode;
1945 /* turn on quota and read dqinfo if existed */
1946 OBD_ALLOC_PTR(qctxt);
1948 oqc->qc_stat = -ENOMEM;
1952 for (i = 0; i < NR_DQHASH; i++)
1953 INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
1954 INIT_LIST_HEAD(&qctxt->qckt_list);
1956 for (i = 0; i < MAXQUOTAS; i++) {
1957 if (!Q_TYPESET(oqc, i))
1960 rc = quota_onoff(sb, Q_QUOTAON, i);
1961 if (!rc || rc == -EBUSY) {
1962 rc = read_old_dqinfo(sb, i, qctxt->qckt_dqinfo);
1965 } else if (rc == -ENOENT) {
1966 qctxt->qckt_first_check[i] = 1;
1972 /* check quota and update in hash */
1973 for (group = 0; group < sbi->s_groups_count; group++) {
1974 ino = group * sbi->s_inodes_per_group + 1;
1975 bitmap_bh = read_inode_bitmap(sb, group);
1977 CERROR("read_inode_bitmap group %d failed", group);
1981 for (i = 0; i < sbi->s_inodes_per_group; i++, ino++) {
1982 if (ino < sbi->s_first_ino)
1985 inode = ext3_iget_inuse(sb, bitmap_bh, i, ino);
1986 rc = add_inode_quota(inode, qctxt, oqc);
1997 /* read old quota limits from old quota file. (only for the user
1998 * has limits but hasn't file) */
1999 #ifdef HAVE_QUOTA_SUPPORT
2000 for (i = 0; i < MAXQUOTAS; i++) {
2001 struct list_head id_list;
2002 struct dquot_id *dqid, *tmp;
2004 if (!Q_TYPESET(oqc, i))
2007 if (qctxt->qckt_first_check[i])
2011 LASSERT(sb_dqopt(sb)->files[i] != NULL);
2012 INIT_LIST_HEAD(&id_list);
2013 #ifndef KERNEL_SUPPORTS_QUOTA_READ
2014 rc = lustre_get_qids(sb_dqopt(sb)->files[i], NULL, i, &id_list);
2016 rc = lustre_get_qids(NULL, sb_dqopt(sb)->files[i], i, &id_list);
2019 CERROR("read old limits failed. (rc:%d)\n", rc);
2021 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
2022 list_del_init(&dqid->di_link);
2025 cqget(sb, qctxt->qckt_hash, &qctxt->qckt_list,
2027 qctxt->qckt_first_check[i]);
2032 /* turn off quota cause we are to dump chk_dqblk to files */
2033 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type);
2035 rc = create_new_quota_files(qctxt, oqc);
2039 /* we use vfs functions to set dqblk, so turn quota on */
2040 rc = quota_onoff(sb, Q_QUOTAON, oqc->qc_type);
2042 /* dump and free chk_dqblk */
2043 rc = prune_chkquots(sb, qctxt, rc);
2044 OBD_FREE_PTR(qctxt);
2046 /* turn off quota, `lfs quotacheck` will turn on when all
2047 * nodes quotacheck finish. */
2048 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type);
2052 CERROR("quotacheck failed: rc = %d\n", rc);
2057 #ifdef HAVE_QUOTA_SUPPORT
2058 static int fsfilt_ext3_quotainfo(struct lustre_quota_info *lqi, int type,
2064 if (lqi->qi_files[type] == NULL) {
2065 CERROR("operate qinfo before it's enabled!\n");
2071 rc = lustre_check_quota_file(lqi, type);
2074 rc = lustre_read_quota_info(lqi, type);
2077 rc = lustre_write_quota_info(lqi, type);
2079 case QFILE_INIT_INFO:
2080 rc = lustre_init_quota_info(lqi, type);
2083 rc = lustre_quota_convert(lqi, type);
2086 CERROR("Unsupported admin quota file cmd %d\n", cmd);
2093 static int fsfilt_ext3_qids(struct file *file, struct inode *inode, int type,
2094 struct list_head *list)
2096 return lustre_get_qids(file, inode, type, list);
2099 static int fsfilt_ext3_dquot(struct lustre_dquot *dquot, int cmd)
2104 if (dquot->dq_info->qi_files[dquot->dq_type] == NULL) {
2105 CERROR("operate dquot before it's enabled!\n");
2110 case QFILE_RD_DQUOT:
2111 rc = lustre_read_dquot(dquot);
2113 case QFILE_WR_DQUOT:
2114 if (dquot->dq_dqb.dqb_ihardlimit ||
2115 dquot->dq_dqb.dqb_isoftlimit ||
2116 dquot->dq_dqb.dqb_bhardlimit ||
2117 dquot->dq_dqb.dqb_bsoftlimit)
2118 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2120 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2122 rc = lustre_commit_dquot(dquot);
2127 CERROR("Unsupported admin quota file cmd %d\n", cmd);
2135 static lvfs_sbdev_type fsfilt_ext3_journal_sbdev(struct super_block *sb)
2137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
2138 return (EXT3_SB(sb)->journal_bdev);
2140 return kdev_t_to_nr(EXT3_SB(sb)->s_journal->j_dev);
2144 static struct fsfilt_operations fsfilt_ext3_ops = {
2146 .fs_owner = THIS_MODULE,
2147 .fs_getlabel = fsfilt_ext3_get_label,
2148 .fs_setlabel = fsfilt_ext3_set_label,
2149 .fs_uuid = fsfilt_ext3_uuid,
2150 .fs_start = fsfilt_ext3_start,
2151 .fs_brw_start = fsfilt_ext3_brw_start,
2152 .fs_extend = fsfilt_ext3_extend,
2153 .fs_commit = fsfilt_ext3_commit,
2154 .fs_commit_async = fsfilt_ext3_commit_async,
2155 .fs_commit_wait = fsfilt_ext3_commit_wait,
2156 .fs_setattr = fsfilt_ext3_setattr,
2157 .fs_iocontrol = fsfilt_ext3_iocontrol,
2158 .fs_set_md = fsfilt_ext3_set_md,
2159 .fs_get_md = fsfilt_ext3_get_md,
2160 .fs_readpage = fsfilt_ext3_readpage,
2161 .fs_add_journal_cb = fsfilt_ext3_add_journal_cb,
2162 .fs_statfs = fsfilt_ext3_statfs,
2163 .fs_sync = fsfilt_ext3_sync,
2164 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
2165 .fs_write_record = fsfilt_ext3_write_record,
2166 .fs_read_record = fsfilt_ext3_read_record,
2167 .fs_setup = fsfilt_ext3_setup,
2168 .fs_send_bio = fsfilt_ext3_send_bio,
2169 .fs_get_op_len = fsfilt_ext3_get_op_len,
2170 .fs_quotactl = fsfilt_ext3_quotactl,
2171 .fs_quotacheck = fsfilt_ext3_quotacheck,
2172 #ifdef HAVE_DISK_INODE_VERSION
2173 .fs_get_version = fsfilt_ext3_get_version,
2174 .fs_set_version = fsfilt_ext3_set_version,
2176 #ifdef HAVE_QUOTA_SUPPORT
2177 .fs_quotainfo = fsfilt_ext3_quotainfo,
2178 .fs_qids = fsfilt_ext3_qids,
2179 .fs_dquot = fsfilt_ext3_dquot,
2181 .fs_journal_sbdev = fsfilt_ext3_journal_sbdev,
2184 static int __init fsfilt_ext3_init(void)
2188 fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
2189 sizeof(struct fsfilt_cb_data), 0, 0);
2191 CERROR("error allocating fsfilt journal callback cache\n");
2192 GOTO(out, rc = -ENOMEM);
2195 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
2198 int err = cfs_mem_cache_destroy(fcb_cache);
2199 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
2205 static void __exit fsfilt_ext3_exit(void)
2209 fsfilt_unregister_ops(&fsfilt_ext3_ops);
2210 rc = cfs_mem_cache_destroy(fcb_cache);
2211 LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
2214 module_init(fsfilt_ext3_init);
2215 module_exit(fsfilt_ext3_exit);
2217 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2218 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
2219 MODULE_LICENSE("GPL");