1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lib/fsfilt_ext3.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_FILTER
28 #include <linux/init.h>
29 #include <linux/module.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #include <linux/bitops.h>
39 #include <linux/quota.h>
40 #include <linux/quotaio_v1.h>
41 #include <linux/quotaio_v2.h>
42 #include <linux/parser.h>
43 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
44 #include <linux/ext3_xattr.h>
46 #include <ext3/xattr.h>
49 #include <libcfs/kp30.h>
50 #include <lustre_fsfilt.h>
52 #include <lustre_quota.h>
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
54 #include <linux/iobuf.h>
56 #include <linux/lustre_compat25.h>
57 #include <linux/lprocfs_status.h>
59 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
60 #include <linux/ext3_extents.h>
63 #include "lustre_quota_fmt.h"
65 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
66 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS
67 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS
69 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS(sb)
70 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS(sb)
73 #ifdef EXT3_SINGLEDATA_TRANS_BLOCKS_HAS_SB
74 /* for kernels 2.6.18 and later */
75 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
77 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS
80 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
81 #define fsfilt_ext3_journal_start(inode, nblocks) \
82 journal_start(EXT3_JOURNAL(inode),nblocks)
83 #define fsfilt_ext3_journal_stop(handle) journal_stop(handle)
85 #define fsfilt_ext3_journal_start(inode, nblocks) ext3_journal_start(inode, nblocks)
86 #define fsfilt_ext3_journal_stop(handle) ext3_journal_stop(handle)
89 static cfs_mem_cache_t *fcb_cache;
91 struct fsfilt_cb_data {
92 struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
93 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
94 struct obd_device *cb_obd; /* MDS/OBD completion device */
95 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
96 void *cb_data; /* MDS/OST completion function data */
99 #ifndef EXT3_XATTR_INDEX_TRUSTED /* temporary until we hit l28 kernel */
100 #define EXT3_XATTR_INDEX_TRUSTED 4
103 static char *fsfilt_ext3_get_label(struct super_block *sb)
105 return EXT3_SB(sb)->s_es->s_volume_name;
108 static int fsfilt_ext3_set_label(struct super_block *sb, char *label)
110 /* see e.g. fsfilt_ext3_write_record() */
115 journal = EXT3_SB(sb)->s_journal;
117 handle = journal_start(journal, 1);
119 if (IS_ERR(handle)) {
120 CERROR("can't start transaction\n");
121 return(PTR_ERR(handle));
124 err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
128 memcpy(EXT3_SB(sb)->s_es->s_volume_name, label,
129 sizeof(EXT3_SB(sb)->s_es->s_volume_name));
131 err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
135 journal_stop(handle);
141 static char *fsfilt_ext3_uuid(struct super_block *sb)
143 return EXT3_SB(sb)->s_es->s_uuid;
146 #ifdef HAVE_DISK_INODE_VERSION
148 * Get the 64-bit version for an inode.
150 static __u64 fsfilt_ext3_get_version(struct inode *inode)
152 return EXT3_I(inode)->i_fs_version;
156 * Set the 64-bit version and return the old version.
158 static __u64 fsfilt_ext3_set_version(struct inode *inode, __u64 new_version)
160 __u64 old_version = EXT3_I(inode)->i_fs_version;
162 (EXT3_I(inode))->i_fs_version = new_version;
169 * We don't currently need any additional blocks for rmdir and
170 * unlink transactions because we are storing the OST oa_id inside
171 * the inode (which we will be changing anyways as part of this
174 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
177 /* For updates to the last received file */
178 int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
182 if (current->journal_info) {
183 CDEBUG(D_INODE, "increasing refcount on %p\n",
184 current->journal_info);
189 case FSFILT_OP_RMDIR:
190 case FSFILT_OP_UNLINK:
191 /* delete one file + create/update logs for each stripe */
192 nblocks += FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
193 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
194 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
196 case FSFILT_OP_RENAME:
197 /* modify additional directory */
198 nblocks += FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
200 case FSFILT_OP_SYMLINK:
201 /* additional block + block bitmap + GDT for long symlink */
204 case FSFILT_OP_CREATE: {
205 #if defined(EXT3_EXTENTS_FL) && defined(EXT3_INDEX_FL)
208 if (!test_opt(inode->i_sb, EXTENTS)) {
210 } else if (((EXT3_I(inode)->i_flags &
211 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL)) ==
212 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL))) {
213 CWARN("extent-mapped directory found - contact "
214 "CFS: support@clusterfs.com\n");
221 case FSFILT_OP_MKDIR:
222 case FSFILT_OP_MKNOD:
223 /* modify one inode + block bitmap + GDT */
227 /* modify parent directory */
228 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
229 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
230 /* create/update logs for each stripe */
231 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
232 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
234 case FSFILT_OP_SETATTR:
235 /* Setattr on inode */
237 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
238 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
239 /* quota chown log for each stripe */
240 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
241 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
243 case FSFILT_OP_CANCEL_UNLINK:
244 /* blocks for log header bitmap update OR
245 * blocks for catalog header bitmap update + unlink of logs */
246 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
247 FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb) * logs;
250 /* delete 2 file(file + array id) + create 1 file (array id)
251 * create/update logs for each stripe */
252 nblocks += 2 * FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
254 /*create array log for head file*/
256 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
257 FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb));
258 /*update head file array */
259 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
260 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
262 default: CERROR("unknown transaction start op %d\n", op);
266 LASSERT(current->journal_info == desc_private);
267 journal = EXT3_SB(inode->i_sb)->s_journal;
268 if (nblocks > journal->j_max_transaction_buffers) {
269 CWARN("too many credits %d for op %ux%u using %d instead\n",
270 nblocks, op, logs, journal->j_max_transaction_buffers);
271 nblocks = journal->j_max_transaction_buffers;
275 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
277 handle = fsfilt_ext3_journal_start(inode, nblocks);
281 LASSERT(current->journal_info == handle);
283 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
284 op, nblocks, PTR_ERR(handle));
289 * Calculate the number of buffer credits needed to write multiple pages in
290 * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
291 * doesn't have a nice API for calculating this sort of thing in advance.
293 * See comment above ext3_writepage_trans_blocks for details. We assume
294 * no data journaling is being done, but it does allow for all of the pages
295 * being non-contiguous. If we are guaranteed contiguous pages we could
296 * reduce the number of (d)indirect blocks a lot.
298 * With N blocks per page and P pages, for each inode we have at most:
300 * min(N*P, blocksize/4 + 1) dindirect blocks
303 * For the entire filesystem, we have at most:
304 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
305 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
306 * objcount inode blocks
308 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
310 * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
312 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
313 int niocount, struct niobuf_local *nb)
315 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
317 const int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
318 int nbitmaps = 0, ngdblocks;
319 int needed = objcount + 1; /* inodes + superblock */
322 for (i = 0, j = 0; i < objcount; i++, fso++) {
323 /* two or more dindirect blocks in case we cross boundary */
324 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
326 sb->s_blocksize_bits) /
327 (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
328 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
330 /* leaf, indirect, tindirect blocks for first block */
331 nbitmaps += blockpp + 2;
333 j += fso->fso_bufcnt;
336 next_indir = nb[0].offset +
337 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
338 for (i = 1; i < niocount; i++) {
339 if (nb[i].offset >= next_indir) {
340 nbitmaps++; /* additional indirect */
341 next_indir = nb[i].offset +
342 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
343 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
344 nbitmaps++; /* additional indirect */
346 nbitmaps += blockpp; /* each leaf in different group? */
349 ngdblocks = nbitmaps;
350 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
351 nbitmaps = EXT3_SB(sb)->s_groups_count;
352 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
353 ngdblocks = EXT3_SB(sb)->s_gdb_count;
355 needed += nbitmaps + ngdblocks;
357 /* last_rcvd update */
358 needed += FSFILT_DATA_TRANS_BLOCKS(sb);
360 #if defined(CONFIG_QUOTA)
361 /* We assume that there will be 1 bit set in s_dquot.flags for each
362 * quota file that is active. This is at least true for now.
364 needed += hweight32(sb_any_quota_enabled(sb)) *
365 FSFILT_SINGLEDATA_TRANS_BLOCKS(sb);
371 /* We have to start a huge journal transaction here to hold all of the
372 * metadata for the pages being written here. This is necessitated by
373 * the fact that we do lots of prepare_write operations before we do
374 * any of the matching commit_write operations, so even if we split
375 * up to use "smaller" transactions none of them could complete until
376 * all of them were opened. By having a single journal transaction,
377 * we eliminate duplicate reservations for common blocks like the
378 * superblock and group descriptors or bitmaps.
380 * We will start the transaction here, but each prepare_write will
381 * add a refcount to the transaction, and each commit_write will
382 * remove a refcount. The transaction will be closed when all of
383 * the pages have been written.
385 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
386 int niocount, struct niobuf_local *nb,
387 void *desc_private, int logs)
394 LASSERT(current->journal_info == desc_private);
395 journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
396 needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
398 /* The number of blocks we could _possibly_ dirty can very large.
399 * We reduce our request if it is absurd (and we couldn't get that
400 * many credits for a single handle anyways).
402 * At some point we have to limit the size of I/Os sent at one time,
403 * increase the size of the journal, or we have to calculate the
404 * actual journal requirements more carefully by checking all of
405 * the blocks instead of being maximally pessimistic. It remains to
406 * be seen if this is a real problem or not.
408 if (needed > journal->j_max_transaction_buffers) {
409 CERROR("want too many journal credits (%d) using %d instead\n",
410 needed, journal->j_max_transaction_buffers);
411 needed = journal->j_max_transaction_buffers;
414 LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
416 handle = fsfilt_ext3_journal_start(fso->fso_dentry->d_inode, needed);
418 if (IS_ERR(handle)) {
419 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
422 LASSERT(handle->h_buffer_credits >= needed);
423 LASSERT(current->journal_info == handle);
429 static int fsfilt_ext3_extend(struct inode *inode, unsigned int nblocks,void *h)
431 handle_t *handle = h;
433 /* fsfilt_extend called with nblocks = 0 for testing in special cases */
435 handle->h_buffer_credits = 0;
436 CWARN("setting credits of handle %p to zero by request\n", h);
439 if (handle->h_buffer_credits > nblocks)
441 if (journal_extend(handle, nblocks) == 0)
444 ext3_mark_inode_dirty(handle, inode);
445 return journal_restart(handle, nblocks);
448 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
451 handle_t *handle = h;
453 LASSERT(current->journal_info == handle);
455 handle->h_sync = 1; /* recovery likes this */
458 rc = fsfilt_ext3_journal_stop(handle);
464 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
468 transaction_t *transaction;
469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
472 handle_t *handle = h;
476 LASSERT(current->journal_info == handle);
479 transaction = handle->h_transaction;
480 journal = transaction->t_journal;
481 tid = transaction->t_tid;
482 /* we don't want to be blocked */
484 rc = fsfilt_ext3_journal_stop(handle);
486 CERROR("error while stopping transaction: %d\n", rc);
490 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
491 rtid = log_start_commit(journal, transaction);
493 CERROR("strange race: %lu != %lu\n",
494 (unsigned long) tid, (unsigned long) rtid);
496 log_start_commit(journal, tid);
500 *wait_handle = (void *) tid;
501 CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
505 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
507 journal_t *journal = EXT3_JOURNAL(inode);
508 tid_t tid = (tid_t)(long)h;
510 CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
511 if (unlikely(is_journal_aborted(journal)))
514 log_wait_commit(EXT3_JOURNAL(inode), tid);
516 if (unlikely(is_journal_aborted(journal)))
521 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
522 struct iattr *iattr, int do_trunc)
524 struct inode *inode = dentry->d_inode;
529 /* Avoid marking the inode dirty on the superblock list unnecessarily.
530 * We are already writing the inode to disk as part of this
531 * transaction and want to avoid a lot of extra inode writeout
532 * later on. b=9828 */
533 if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
534 /* ATTR_SIZE would invoke truncate: clear it */
535 iattr->ia_valid &= ~ATTR_SIZE;
536 EXT3_I(inode)->i_disksize = iattr->ia_size;
537 i_size_write(inode, iattr->ia_size);
539 if (iattr->ia_valid & ATTR_UID)
540 inode->i_uid = iattr->ia_uid;
541 if (iattr->ia_valid & ATTR_GID)
542 inode->i_gid = iattr->ia_gid;
543 if (iattr->ia_valid & ATTR_ATIME)
544 inode->i_atime = iattr->ia_atime;
545 if (iattr->ia_valid & ATTR_MTIME)
546 inode->i_mtime = iattr->ia_mtime;
547 if (iattr->ia_valid & ATTR_CTIME)
548 inode->i_ctime = iattr->ia_ctime;
549 if (iattr->ia_valid & ATTR_MODE) {
550 inode->i_mode = iattr->ia_mode;
552 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
553 inode->i_mode &= ~S_ISGID;
556 inode->i_sb->s_op->dirty_inode(inode);
561 /* Don't allow setattr to change file type */
562 if (iattr->ia_valid & ATTR_MODE)
563 iattr->ia_mode = (inode->i_mode & S_IFMT) |
564 (iattr->ia_mode & ~S_IFMT);
566 /* We set these flags on the client, but have already checked perms
567 * so don't confuse inode_change_ok. */
568 iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
570 if (inode->i_op->setattr) {
571 rc = inode->i_op->setattr(dentry, iattr);
573 rc = inode_change_ok(inode, iattr);
575 rc = inode_setattr(inode, iattr);
583 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
584 unsigned int cmd, unsigned long arg)
589 /* FIXME: Can't do this because of nested transaction deadlock */
590 if (cmd == EXT3_IOC_SETFLAGS && (*(int *)arg) & EXT3_JOURNAL_DATA_FL) {
591 CERROR("can't set data journal flag on file\n");
595 if (inode->i_fop->ioctl)
596 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
603 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
604 void *lmm, int lmm_size, const char *name)
608 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
611 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
612 name, lmm, lmm_size, 0);
616 if (rc && rc != -EROFS)
617 CERROR("error adding MD data to inode %lu: rc = %d\n",
622 /* Must be called with i_mutex held */
623 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size,
628 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
631 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
632 name, lmm, lmm_size);
635 /* This gives us the MD size */
637 return (rc == -ENODATA) ? 0 : rc;
640 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
641 EXT3_XATTR_INDEX_TRUSTED, name,
643 memset(lmm, 0, lmm_size);
644 return (rc == -ENODATA) ? 0 : rc;
650 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
651 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
657 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct kiobuf *bio)
659 int rc, blk_per_page;
661 rc = brw_kiovec(rw, 1, &bio, inode->i_dev,
662 KIOBUF_GET_BLOCKS(bio), 1 << inode->i_blkbits);
664 * brw_kiovec() returns number of bytes actually written. If error
665 * occurred after something was written, error code is returned though
666 * kiobuf->errno. (See bug 6854.)
669 blk_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
671 if (rc != (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page) {
672 CERROR("short write? expected %d, wrote %d (%d)\n",
673 (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
676 if (bio->errno != 0) {
677 CERROR("IO error. Wrote %d of %d (%d)\n",
679 (1 << inode->i_blkbits) * bio->nr_pages * blk_per_page,
688 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
691 struct inode *inode = file->f_dentry->d_inode;
694 if (S_ISREG(inode->i_mode))
695 rc = file->f_op->read(file, buf, count, off);
697 const int blkbits = inode->i_sb->s_blocksize_bits;
698 const int blksize = inode->i_sb->s_blocksize;
700 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
701 count, inode->i_ino, *off);
703 struct buffer_head *bh;
706 if (*off < i_size_read(inode)) {
709 bh = ext3_bread(NULL, inode, *off >> blkbits,
712 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
715 memcpy(buf, bh->b_data, blksize);
718 /* XXX in theory we should just fake
719 * this buffer and continue like ext3,
720 * especially if this is a partial read
722 CERROR("error read dir %lu+%llu: %d\n",
723 inode->i_ino, *off, err);
728 struct ext3_dir_entry_2 *fake = (void *)buf;
730 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
731 memset(fake, 0, sizeof(*fake));
732 fake->rec_len = cpu_to_le16(blksize);
744 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
746 struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
748 fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
750 OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
753 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
754 void *handle, fsfilt_cb_t cb_func,
757 struct fsfilt_cb_data *fcb;
759 OBD_SLAB_ALLOC(fcb, fcb_cache, CFS_ALLOC_IO, sizeof *fcb);
763 fcb->cb_func = cb_func;
765 fcb->cb_last_rcvd = last_rcvd;
766 fcb->cb_data = cb_data;
768 CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
770 journal_callback_set(handle, fsfilt_ext3_cb_func,
771 (struct journal_callback *)fcb);
778 * We need to hack the return value for the free inode counts because
779 * the current EA code requires one filesystem block per inode with EAs,
780 * so it is possible to run out of blocks before we run out of inodes.
782 * This can be removed when the ext3 EA code is fixed.
784 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
789 memset(&sfs, 0, sizeof(sfs));
790 rc = ll_do_statfs(sb,&sfs);
792 if (!rc && sfs.f_bfree < sfs.f_ffree) {
793 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
794 sfs.f_ffree = sfs.f_bfree;
797 statfs_pack(osfs, &sfs);
801 static int fsfilt_ext3_sync(struct super_block *sb)
803 return ext3_force_commit(sb);
806 #if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))
807 #warning "kernel code has old extents/mballoc patch, disabling"
808 #undef EXT3_MULTIBLOCK_ALLOCATOR
810 #ifndef EXT3_EXTENTS_FL
811 #define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
814 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
815 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
816 #define ext3_up_truncate_sem(inode) up_write(&EXT3_I(inode)->truncate_sem);
817 #define ext3_down_truncate_sem(inode) down_write(&EXT3_I(inode)->truncate_sem);
818 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
819 #define ext3_up_truncate_sem(inode) up(&EXT3_I(inode)->truncate_sem);
820 #define ext3_down_truncate_sem(inode) down(&EXT3_I(inode)->truncate_sem);
822 #define ext3_up_truncate_sem(inode) mutex_unlock(&EXT3_I(inode)->truncate_mutex);
823 #define ext3_down_truncate_sem(inode) mutex_lock(&EXT3_I(inode)->truncate_mutex);
827 #define EXT_ASSERT(cond) BUG_ON(!(cond))
830 #ifdef EXT3_EXT_HAS_NO_TREE
831 /* for kernels 2.6.18 and later */
832 #define ext3_ext_base inode
833 #define ext3_ext_base2inode(inode) (inode)
834 #define EXT_DEPTH(inode) ext_depth(inode)
835 #define EXT_GENERATION(inode) ext_generation(inode)
836 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
837 ext3_ext_walk_space(inode, block, num, cb, cbdata);
839 #define ext3_ext_base ext3_extents_tree
840 #define ext3_ext_base2inode(tree) (tree->inode)
841 #define fsfilt_ext3_ext_walk_space(tree, block, num, cb, cbdata) \
842 ext3_ext_walk_space(tree, block, num, cb);
845 #include <linux/lustre_version.h>
846 #if EXT3_EXT_MAGIC == 0xf301
847 #define ee_start e_start
848 #define ee_block e_block
851 #ifndef EXT3_BB_MAX_BLOCKS
852 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
853 ext3_new_blocks(handle, inode, count, goal, err)
857 unsigned long *blocks;
865 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
866 unsigned long block, int *aflags)
868 struct ext3_inode_info *ei = EXT3_I(inode);
869 unsigned long bg_start;
870 unsigned long colour;
874 struct ext3_extent *ex;
875 depth = path->p_depth;
877 /* try to predict block placement */
878 if ((ex = path[depth].p_ext)) {
880 /* This prefers to eat into a contiguous extent
881 * rather than find an extent that the whole
882 * request will fit into. This can fragment data
883 * block allocation and prevents our lovely 1M I/Os
884 * from reaching the disk intact. */
885 if (ex->ee_block + ex->ee_len == block)
888 return ex->ee_start + (block - ex->ee_block);
891 /* it looks index is empty
892 * try to find starting from index itself */
893 if (path[depth].p_bh)
894 return path[depth].p_bh->b_blocknr;
897 /* OK. use inode's group */
898 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
899 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
900 colour = (current->pid % 16) *
901 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
902 return bg_start + colour + block;
905 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
906 #include <linux/locks.h>
907 static void ll_unmap_underlying_metadata(struct super_block *sb,
908 unsigned long blocknr)
910 struct buffer_head *old_bh;
912 old_bh = get_hash_table(sb->s_dev, blocknr, sb->s_blocksize);
914 mark_buffer_clean(old_bh);
915 wait_on_buffer(old_bh);
916 clear_bit(BH_Req, &old_bh->b_state);
921 #define ll_unmap_underlying_metadata(sb, blocknr) \
922 unmap_underlying_metadata((sb)->s_bdev, blocknr)
925 #ifndef EXT3_MB_HINT_GROUP_ALLOC
926 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
927 struct ext3_ext_path *path, unsigned long block,
928 unsigned long *count, int *err)
930 unsigned long pblock, goal;
932 struct inode *inode = ext3_ext_base2inode(base);
934 goal = ext3_ext_find_goal(inode, path, block, &aflags);
935 aflags |= 2; /* block have been already reserved */
937 pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
943 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
944 struct ext3_ext_path *path, unsigned long block,
945 unsigned long *count, int *err)
947 struct inode *inode = ext3_ext_base2inode(base);
948 struct ext3_allocation_request ar;
949 unsigned long pblock;
952 /* find neighbour allocated blocks */
954 *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
958 *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
962 /* allocate new block */
963 ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
967 ar.flags = EXT3_MB_HINT_DATA;
968 pblock = ext3_mb_new_blocks(handle, &ar, err);
974 #ifdef EXT3_EXT_HAS_NO_TREE
975 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
976 struct ext3_ext_path *path,
977 struct ext3_ext_cache *cex,
978 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
979 struct ext3_extent *ex,
983 struct bpointers *bp = cbdata;
985 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
986 struct ext3_ext_path *path,
987 struct ext3_ext_cache *cex
988 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
989 , struct ext3_extent *ex
993 struct bpointers *bp = base->private;
995 struct inode *inode = ext3_ext_base2inode(base);
996 struct ext3_extent nex;
997 unsigned long pblock;
1000 unsigned long count;
1003 i = EXT_DEPTH(base);
1004 EXT_ASSERT(i == path->p_depth);
1005 EXT_ASSERT(path[i].p_hdr);
1007 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
1012 if (bp->create == 0) {
1014 if (cex->ec_block < bp->start)
1015 i = bp->start - cex->ec_block;
1016 if (i >= cex->ec_len)
1017 CERROR("nothing to do?! i = %d, e_num = %u\n",
1019 for (; i < cex->ec_len && bp->num; i++) {
1028 return EXT_CONTINUE;
1031 tgen = EXT_GENERATION(base);
1032 count = ext3_ext_calc_credits_for_insert(base, path);
1033 ext3_up_truncate_sem(inode);
1036 handle = fsfilt_ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
1038 if (IS_ERR(handle)) {
1039 ext3_down_truncate_sem(inode);
1040 return PTR_ERR(handle);
1043 ext3_down_truncate_sem(inode);
1044 if (tgen != EXT_GENERATION(base)) {
1045 /* the tree has changed. so path can be invalid at moment */
1047 fsfilt_ext3_journal_stop(handle);
1052 count = cex->ec_len;
1053 pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
1056 EXT_ASSERT(count <= cex->ec_len);
1058 /* insert new extent */
1059 nex.ee_block = cex->ec_block;
1060 nex.ee_start = pblock;
1062 err = ext3_ext_insert_extent(handle, base, path, &nex);
1064 CERROR("can't insert extent: %d\n", err);
1065 /* XXX: export ext3_free_blocks() */
1066 /*ext3_free_blocks(handle, inode, nex.ee_start, nex.ee_len, 0);*/
1071 * Putting len of the actual extent we just inserted,
1072 * we are asking ext3_ext_walk_space() to continue
1073 * scaning after that block
1075 cex->ec_len = nex.ee_len;
1076 cex->ec_start = nex.ee_start;
1077 BUG_ON(nex.ee_len == 0);
1078 BUG_ON(nex.ee_block != cex->ec_block);
1082 fsfilt_ext3_journal_stop(handle);
1088 CERROR("hmm. why do we find this extent?\n");
1089 CERROR("initial space: %lu:%u\n",
1090 bp->start, bp->init_num);
1091 CERROR("current extent: %u/%u/%u %d\n",
1092 cex->ec_block, cex->ec_len,
1093 cex->ec_start, cex->ec_type);
1096 if (cex->ec_block < bp->start)
1097 i = bp->start - cex->ec_block;
1098 if (i >= cex->ec_len)
1099 CERROR("nothing to do?! i = %d, e_num = %u\n",
1101 for (; i < cex->ec_len && bp->num; i++) {
1102 *(bp->blocks) = cex->ec_start + i;
1103 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
1107 /* unmap any possible underlying metadata from
1108 * the block device mapping. bug 6998. */
1109 ll_unmap_underlying_metadata(inode->i_sb,
1121 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
1122 unsigned long num, unsigned long *blocks,
1123 int *created, int create)
1125 #ifdef EXT3_EXT_HAS_NO_TREE
1126 struct ext3_ext_base *base = inode;
1128 struct ext3_extents_tree tree;
1129 struct ext3_ext_base *base = &tree;
1131 struct bpointers bp;
1134 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
1135 block, block + num - 1, (unsigned) inode->i_ino);
1137 #ifndef EXT3_EXT_HAS_NO_TREE
1138 ext3_init_tree_desc(base, inode);
1142 bp.created = created;
1144 bp.init_num = bp.num = num;
1147 ext3_down_truncate_sem(inode);
1148 err = fsfilt_ext3_ext_walk_space(base, block, num, ext3_ext_new_extent_cb, &bp);
1149 ext3_ext_invalidate_cache(base);
1150 ext3_up_truncate_sem(inode);
1155 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
1156 int pages, unsigned long *blocks,
1157 int *created, int create)
1159 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1161 struct page *fp = NULL;
1164 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1165 inode->i_ino, pages, (*page)->index);
1167 /* pages are sorted already. so, we just have to find
1168 * contig. space and process them properly */
1171 /* start new extent */
1176 } else if (fp->index + clen == (*page)->index) {
1177 /* continue the extent */
1184 /* process found extent */
1185 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1186 clen * blocks_per_page, blocks,
1191 /* look for next extent */
1193 blocks += blocks_per_page * clen;
1194 created += blocks_per_page * clen;
1198 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1199 clen * blocks_per_page, blocks,
1204 #endif /* EXT3_MULTIBLOCK_ALLOCATOR */
1206 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
1207 unsigned long *blocks, int *created, int create);
1208 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
1209 int pages, unsigned long *blocks,
1210 int *created, int create)
1212 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1216 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
1217 rc = ext3_map_inode_page(inode, *page, b, cr, create);
1219 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
1220 inode->i_ino, *b, *cr, create, rc);
1224 b += blocks_per_page;
1225 cr += blocks_per_page;
1230 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
1231 int pages, unsigned long *blocks,
1232 int *created, int create,
1233 struct semaphore *optional_sem)
1236 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
1237 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
1238 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
1239 blocks, created, create);
1243 if (optional_sem != NULL)
1245 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
1247 if (optional_sem != NULL)
1253 static int fsfilt_ext3_read_record(struct file * file, void *buf,
1254 int size, loff_t *offs)
1256 struct inode *inode = file->f_dentry->d_inode;
1257 unsigned long block;
1258 struct buffer_head *bh;
1259 int err, blocksize, csize, boffs;
1261 /* prevent reading after eof */
1263 if (i_size_read(inode) < *offs + size) {
1264 size = i_size_read(inode) - *offs;
1267 CERROR("size %llu is too short for read %u@%llu\n",
1268 i_size_read(inode), size, *offs);
1270 } else if (size == 0) {
1277 blocksize = 1 << inode->i_blkbits;
1280 block = *offs >> inode->i_blkbits;
1281 boffs = *offs & (blocksize - 1);
1282 csize = min(blocksize - boffs, size);
1283 bh = ext3_bread(NULL, inode, block, 0, &err);
1285 CERROR("can't read block: %d\n", err);
1289 memcpy(buf, bh->b_data + boffs, csize);
1299 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1300 loff_t *offs, int force_sync)
1302 struct buffer_head *bh = NULL;
1303 unsigned long block;
1304 struct inode *inode = file->f_dentry->d_inode;
1305 loff_t old_size = i_size_read(inode), offset = *offs;
1306 loff_t new_size = i_size_read(inode);
1308 int err = 0, block_count = 0, blocksize, size, boffs;
1310 /* Determine how many transaction credits are needed */
1311 blocksize = 1 << inode->i_blkbits;
1312 block_count = (*offs & (blocksize - 1)) + bufsize;
1313 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1316 handle = fsfilt_ext3_journal_start(inode,
1317 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
1319 if (IS_ERR(handle)) {
1320 CERROR("can't start transaction for %d blocks (%d bytes)\n",
1321 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2, bufsize);
1322 return PTR_ERR(handle);
1325 while (bufsize > 0) {
1329 block = offset >> inode->i_blkbits;
1330 boffs = offset & (blocksize - 1);
1331 size = min(blocksize - boffs, bufsize);
1332 bh = ext3_bread(handle, inode, block, 1, &err);
1334 CERROR("can't read/create block: %d\n", err);
1338 err = ext3_journal_get_write_access(handle, bh);
1340 CERROR("journal_get_write_access() returned error %d\n",
1344 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1345 memcpy(bh->b_data + boffs, buf, size);
1346 err = ext3_journal_dirty_metadata(handle, bh);
1348 CERROR("journal_dirty_metadata() returned error %d\n",
1352 if (offset + size > new_size)
1353 new_size = offset + size;
1360 handle->h_sync = 1; /* recovery likes this */
1365 /* correct in-core and on-disk sizes */
1366 if (new_size > i_size_read(inode)) {
1368 if (new_size > i_size_read(inode))
1369 i_size_write(inode, new_size);
1370 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
1371 EXT3_I(inode)->i_disksize = i_size_read(inode);
1372 if (i_size_read(inode) > old_size)
1373 mark_inode_dirty(inode);
1378 fsfilt_ext3_journal_stop(handle);
1386 static int fsfilt_ext3_setup(struct super_block *sb)
1388 struct ext3_sb_info *sbi = EXT3_SB(sb);
1390 sbi->dx_lock = fsfilt_ext3_dx_lock;
1391 sbi->dx_unlock = fsfilt_ext3_dx_unlock;
1394 CWARN("Enabling PDIROPS\n");
1395 set_opt(sbi->s_mount_opt, PDIROPS);
1396 sb->s_flags |= S_PDIROPS;
1398 if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
1399 CWARN("filesystem doesn't have dir_index feature enabled\n");
1400 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,6)) && HAVE_QUOTA_SUPPORT
1401 /* enable journaled quota support */
1402 /* kfreed in ext3_put_super() */
1403 sbi->s_qf_names[USRQUOTA] = kstrdup("lquota.user.reserved", GFP_KERNEL);
1404 if (!sbi->s_qf_names[USRQUOTA])
1406 sbi->s_qf_names[GRPQUOTA] = kstrdup("lquota.group.reserved", GFP_KERNEL);
1407 if (!sbi->s_qf_names[GRPQUOTA]) {
1408 kfree(sbi->s_qf_names[USRQUOTA]);
1409 sbi->s_qf_names[USRQUOTA] = NULL;
1412 sbi->s_jquota_fmt = QFMT_VFS_V0;
1413 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13))
1414 set_opt(sbi->s_mount_opt, QUOTA);
1420 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1421 objects. Logs is number of logfiles to update */
1422 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1426 case FSFILT_OP_CREATE:
1427 /* directory leaf, index & indirect & EA*/
1428 return 4 + 3 * logs;
1429 case FSFILT_OP_UNLINK:
1435 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1436 int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
1437 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1438 for (i = 0; i < op; i++, fso++) {
1439 int nblocks = fso->fso_bufcnt * blockpp;
1440 int ndindirect = min(nblocks, addrpp + 1);
1441 int nindir = nblocks + ndindirect + 1;
1445 return needed + 3 * logs;
1451 #define DQINFO_COPY(out, in) \
1453 Q_COPY(out, in, dqi_bgrace); \
1454 Q_COPY(out, in, dqi_igrace); \
1455 Q_COPY(out, in, dqi_flags); \
1456 Q_COPY(out, in, dqi_valid); \
1459 #define DQBLK_COPY(out, in) \
1461 Q_COPY(out, in, dqb_bhardlimit); \
1462 Q_COPY(out, in, dqb_bsoftlimit); \
1463 Q_COPY(out, in, dqb_curspace); \
1464 Q_COPY(out, in, dqb_ihardlimit); \
1465 Q_COPY(out, in, dqb_isoftlimit); \
1466 Q_COPY(out, in, dqb_curinodes); \
1467 Q_COPY(out, in, dqb_btime); \
1468 Q_COPY(out, in, dqb_itime); \
1469 Q_COPY(out, in, dqb_valid); \
1474 static int fsfilt_ext3_quotactl(struct super_block *sb,
1475 struct obd_quotactl *oqc)
1477 int i, rc = 0, error = 0;
1478 struct quotactl_ops *qcop;
1479 struct if_dqinfo *info;
1480 struct if_dqblk *dqblk;
1486 OBD_ALLOC_PTR(info);
1489 OBD_ALLOC_PTR(dqblk);
1495 DQINFO_COPY(info, &oqc->qc_dqinfo);
1496 DQBLK_COPY(dqblk, &oqc->qc_dqblk);
1499 if (oqc->qc_cmd == Q_QUOTAON || oqc->qc_cmd == Q_QUOTAOFF) {
1500 for (i = 0; i < MAXQUOTAS; i++) {
1501 if (!Q_TYPESET(oqc, i))
1504 if (oqc->qc_cmd == Q_QUOTAON) {
1505 lustre_quota_version_t qfmt = oqc->qc_id;
1506 char *name[][MAXQUOTAS] = LUSTRE_OPQFILES_NAMES;
1508 if (!qcop->quota_on)
1509 GOTO(out, rc = -ENOSYS);
1511 rc = qcop->quota_on(sb, i, QFMT_VFS_V0,
1514 if (rc == -ENOENT || rc == -EINVAL) {
1516 rc = lustre_slave_quota_convert(qfmt, i);
1518 rc = qcop->quota_on(sb, i,
1521 else if (rc == -ESTALE)
1525 } else if (oqc->qc_cmd == Q_QUOTAOFF) {
1526 if (!qcop->quota_off)
1527 GOTO(out, rc = -ENOSYS);
1528 rc = qcop->quota_off(sb, i);
1536 GOTO(out, rc ?: error);
1539 switch (oqc->qc_cmd) {
1542 if (!qcop->get_info)
1543 GOTO(out, rc = -ENOSYS);
1544 rc = qcop->get_info(sb, oqc->qc_type, info);
1548 if (!qcop->set_dqblk)
1549 GOTO(out, rc = -ENOSYS);
1550 rc = qcop->set_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1554 if (!qcop->get_dqblk)
1555 GOTO(out, rc = -ENOSYS);
1556 rc = qcop->get_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1558 dqblk->dqb_valid = QIF_LIMITS | QIF_USAGE;
1561 if (!sb->s_qcop->quota_sync)
1562 GOTO(out, rc = -ENOSYS);
1563 qcop->quota_sync(sb, oqc->qc_type);
1566 CDEBUG(D_WARNING, "invalidating operational quota files\n");
1567 for (i = 0; i < MAXQUOTAS; i++) {
1569 lustre_quota_version_t qfmt = oqc->qc_id;
1570 char *name[][MAXQUOTAS] = LUSTRE_OPQFILES_NAMES;
1572 if (!Q_TYPESET(oqc, i))
1575 fp = filp_open(name[qfmt][i], O_CREAT | O_TRUNC | O_RDWR, 0644);
1578 CERROR("error invalidating operational quota file"
1579 " %s (rc:%d)\n", name[qfmt][i], rc);
1587 CERROR("unsupported quotactl command: %d\n", oqc->qc_cmd);
1591 DQINFO_COPY(&oqc->qc_dqinfo, info);
1592 DQBLK_COPY(&oqc->qc_dqblk, dqblk);
1595 OBD_FREE_PTR(dqblk);
1598 CDEBUG(D_QUOTA, "quotactl command %#x, id %u, type %d "
1600 oqc->qc_cmd, oqc->qc_id, oqc->qc_type, rc);
1605 struct hlist_node dqb_hash; /* quotacheck hash */
1606 struct list_head dqb_list; /* in list also */
1607 qid_t dqb_id; /* uid/gid */
1608 short dqb_type; /* USRQUOTA/GRPQUOTA */
1609 qsize_t dqb_bhardlimit; /* block hard limit */
1610 qsize_t dqb_bsoftlimit; /* block soft limit */
1611 qsize_t dqb_curspace; /* current space */
1612 qsize_t dqb_ihardlimit; /* inode hard limit */
1613 qsize_t dqb_isoftlimit; /* inode soft limit */
1614 qsize_t dqb_curinodes; /* current inodes */
1615 __u64 dqb_btime; /* block grace time */
1616 __u64 dqb_itime; /* inode grace time */
1617 __u32 dqb_valid; /* flag for above fields */
1620 static inline unsigned int chkquot_hash(qid_t id, int type)
1621 __attribute__((__const__));
1623 static inline unsigned int chkquot_hash(qid_t id, int type)
1625 return (id * (MAXQUOTAS - type)) % NR_DQHASH;
1628 static inline struct chk_dqblk *
1629 find_chkquot(struct hlist_head *head, qid_t id, int type)
1631 struct hlist_node *node;
1632 struct chk_dqblk *cdqb;
1634 hlist_for_each(node, head) {
1635 cdqb = hlist_entry(node, struct chk_dqblk, dqb_hash);
1636 if (cdqb->dqb_id == id && cdqb->dqb_type == type)
1643 static struct chk_dqblk *alloc_chkquot(qid_t id, int type)
1645 struct chk_dqblk *cdqb;
1647 OBD_ALLOC_PTR(cdqb);
1649 INIT_HLIST_NODE(&cdqb->dqb_hash);
1650 INIT_LIST_HEAD(&cdqb->dqb_list);
1652 cdqb->dqb_type = type;
1658 static struct chk_dqblk *
1659 cqget(struct super_block *sb, struct hlist_head *hash, struct list_head *list,
1660 qid_t id, int type, int first_check)
1662 struct hlist_head *head = hash + chkquot_hash(id, type);
1663 struct if_dqblk dqb;
1664 struct chk_dqblk *cdqb;
1667 cdqb = find_chkquot(head, id, type);
1671 cdqb = alloc_chkquot(id, type);
1676 rc = sb->s_qcop->get_dqblk(sb, type, id, &dqb);
1678 CERROR("get_dqblk of id %u, type %d failed: %d\n",
1681 DQBLK_COPY(cdqb, &dqb);
1682 cdqb->dqb_curspace = 0;
1683 cdqb->dqb_curinodes = 0;
1687 hlist_add_head(&cdqb->dqb_hash, head);
1688 list_add_tail(&cdqb->dqb_list, list);
1693 static inline int quota_onoff(struct super_block *sb, int cmd, int type, int qfmt)
1695 struct obd_quotactl *oqctl;
1698 OBD_ALLOC_PTR(oqctl);
1702 oqctl->qc_cmd = cmd;
1703 oqctl->qc_id = qfmt;
1704 oqctl->qc_type = type;
1705 rc = fsfilt_ext3_quotactl(sb, oqctl);
1707 OBD_FREE_PTR(oqctl);
1711 static inline int read_old_dqinfo(struct super_block *sb, int type,
1712 struct if_dqinfo *dqinfo)
1714 struct obd_quotactl *oqctl;
1718 OBD_ALLOC_PTR(oqctl);
1722 oqctl->qc_cmd = Q_GETINFO;
1723 oqctl->qc_type = type;
1724 rc = fsfilt_ext3_quotactl(sb, oqctl);
1726 ((struct obd_dqinfo *)dqinfo)[type] = oqctl->qc_dqinfo;
1728 OBD_FREE_PTR(oqctl);
1732 static inline struct ext3_group_desc *
1733 get_group_desc(struct super_block *sb, int group)
1735 unsigned long desc_block, desc;
1736 struct ext3_group_desc *gdp;
1738 desc_block = group / EXT3_DESC_PER_BLOCK(sb);
1739 desc = group % EXT3_DESC_PER_BLOCK(sb);
1740 gdp = (struct ext3_group_desc *)
1741 EXT3_SB(sb)->s_group_desc[desc_block]->b_data;
1746 static inline struct buffer_head *
1747 read_inode_bitmap(struct super_block *sb, unsigned long group)
1749 struct ext3_group_desc *desc;
1750 struct buffer_head *bh;
1752 desc = get_group_desc(sb, group);
1753 bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
1758 static inline struct inode *ext3_iget_inuse(struct super_block *sb,
1759 struct buffer_head *bitmap_bh,
1760 int index, unsigned long ino)
1762 struct inode *inode = NULL;
1764 if (ext3_test_bit(index, bitmap_bh->b_data))
1765 inode = iget(sb, ino);
1771 struct hlist_head qckt_hash[NR_DQHASH]; /* quotacheck hash */
1772 struct list_head qckt_list; /* quotacheck list */
1773 int qckt_first_check[MAXQUOTAS]; /* 1 if no old quotafile */
1774 struct if_dqinfo qckt_dqinfo[MAXQUOTAS]; /* old dqinfo */
1777 static int add_inode_quota(struct inode *inode, struct qchk_ctxt *qctxt,
1778 struct obd_quotactl *oqc)
1780 struct chk_dqblk *cdqb[MAXQUOTAS] = { NULL, };
1782 qid_t qid[MAXQUOTAS];
1788 qid[USRQUOTA] = inode->i_uid;
1789 qid[GRPQUOTA] = inode->i_gid;
1791 if (S_ISDIR(inode->i_mode) ||
1792 S_ISREG(inode->i_mode) ||
1793 S_ISLNK(inode->i_mode))
1794 size = inode_get_bytes(inode);
1796 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1797 if (!Q_TYPESET(oqc, cnt))
1800 cdqb[cnt] = cqget(inode->i_sb, qctxt->qckt_hash,
1801 &qctxt->qckt_list, qid[cnt], cnt,
1802 qctxt->qckt_first_check[cnt]);
1808 cdqb[cnt]->dqb_curspace += size;
1809 cdqb[cnt]->dqb_curinodes++;
1813 for (i = 0; i < cnt; i++) {
1814 if (!Q_TYPESET(oqc, i))
1817 cdqb[i]->dqb_curspace -= size;
1818 cdqb[i]->dqb_curinodes--;
1825 static int v2_write_dqheader(struct file *f, int type)
1827 static const __u32 quota_magics[] = V2_INITQMAGICS;
1829 static const __u32 quota_versions[] = V2_INITQVERSIONS_R0;
1831 static const __u32 quota_versions[] = V2_INITQVERSIONS;
1833 struct v2_disk_dqheader dqhead;
1836 CLASSERT(ARRAY_SIZE(quota_magics) == ARRAY_SIZE(quota_versions));
1837 LASSERT(0 <= type && type < ARRAY_SIZE(quota_magics));
1839 dqhead.dqh_magic = cpu_to_le32(quota_magics[type]);
1840 dqhead.dqh_version = cpu_to_le32(quota_versions[type]);
1842 return cfs_user_write(f, (char *)&dqhead, sizeof(dqhead), &offset);
1845 /* write dqinfo struct in a new quota file */
1846 static int v2_write_dqinfo(struct file *f, int type, struct if_dqinfo *info)
1848 struct v2_disk_dqinfo dqinfo;
1849 __u32 blocks = V2_DQTREEOFF + 1;
1850 loff_t offset = V2_DQINFOOFF;
1853 dqinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
1854 dqinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
1855 dqinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK &
1858 dqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1859 dqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1860 dqinfo.dqi_flags = 0;
1863 dqinfo.dqi_blocks = cpu_to_le32(blocks);
1864 dqinfo.dqi_free_blk = 0;
1865 dqinfo.dqi_free_entry = 0;
1867 return cfs_user_write(f, (char *)&dqinfo, sizeof(dqinfo), &offset);
1871 static int v3_write_dqheader(struct file *f, int type)
1873 static const __u32 quota_magics[] = V2_INITQMAGICS;
1874 static const __u32 quota_versions[] = V2_INITQVERSIONS_R1;
1875 struct v2_disk_dqheader dqhead;
1878 CLASSERT(ARRAY_SIZE(quota_magics) == ARRAY_SIZE(quota_versions));
1879 LASSERT(0 <= type && type < ARRAY_SIZE(quota_magics));
1881 dqhead.dqh_magic = cpu_to_le32(quota_magics[type]);
1882 dqhead.dqh_version = cpu_to_le32(quota_versions[type]);
1884 return cfs_user_write(f, (char *)&dqhead, sizeof(dqhead), &offset);
1887 /* write dqinfo struct in a new quota file */
1888 static int v3_write_dqinfo(struct file *f, int type, struct if_dqinfo *info)
1890 return v2_write_dqinfo(f, type, info);
1894 static int create_new_quota_files(struct qchk_ctxt *qctxt,
1895 struct obd_quotactl *oqc)
1900 for (i = 0; i < MAXQUOTAS; i++) {
1901 struct if_dqinfo *info = qctxt->qckt_first_check[i]?
1902 NULL : &qctxt->qckt_dqinfo[i];
1904 const char *name[][MAXQUOTAS] = LUSTRE_OPQFILES_NAMES;
1905 int (*write_dqheader)(struct file *, int);
1906 int (*write_dqinfo)(struct file *, int, struct if_dqinfo *);
1908 if (!Q_TYPESET(oqc, i))
1911 file = filp_open(name[oqc->qc_id][i],
1912 O_RDWR | O_CREAT | O_TRUNC, 0644);
1915 CERROR("can't create %s file: rc = %d\n",
1916 name[oqc->qc_id][i], rc);
1920 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1921 CERROR("file %s is not regular", name[oqc->qc_id][i]);
1922 filp_close(file, 0);
1923 GOTO(out, rc = -EINVAL);
1926 DQUOT_DROP(file->f_dentry->d_inode);
1928 switch (oqc->qc_id) {
1929 case LUSTRE_QUOTA_V1 : write_dqheader = v2_write_dqheader;
1930 write_dqinfo = v2_write_dqinfo;
1933 case LUSTRE_QUOTA_V2 : write_dqheader = v3_write_dqheader;
1934 write_dqinfo = v3_write_dqinfo;
1937 default : CERROR("unknown quota format!\n");
1941 rc = (*write_dqheader)(file, i);
1943 filp_close(file, 0);
1947 rc = (*write_dqinfo)(file, i, info);
1948 filp_close(file, 0);
1958 static int commit_chkquot(struct super_block *sb, struct qchk_ctxt *qctxt,
1959 struct chk_dqblk *cdqb)
1961 struct obd_quotactl *oqc;
1970 now = CURRENT_SECONDS;
1972 if (cdqb->dqb_bsoftlimit &&
1973 toqb(cdqb->dqb_curspace) >= cdqb->dqb_bsoftlimit &&
1976 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_bgrace;
1978 if (cdqb->dqb_isoftlimit &&
1979 cdqb->dqb_curinodes >= cdqb->dqb_isoftlimit &&
1982 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_igrace;
1984 cdqb->dqb_valid = QIF_ALL;
1986 oqc->qc_cmd = Q_SETQUOTA;
1987 oqc->qc_type = cdqb->dqb_type;
1988 oqc->qc_id = cdqb->dqb_id;
1989 DQBLK_COPY(&oqc->qc_dqblk, cdqb);
1991 rc = fsfilt_ext3_quotactl(sb, oqc);
1996 static int prune_chkquots(struct super_block *sb,
1997 struct qchk_ctxt *qctxt, int error)
1999 struct chk_dqblk *cdqb, *tmp;
2002 list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
2004 rc = commit_chkquot(sb, qctxt, cdqb);
2008 hlist_del_init(&cdqb->dqb_hash);
2009 list_del(&cdqb->dqb_list);
2016 static int fsfilt_ext3_quotacheck(struct super_block *sb,
2017 struct obd_quotactl *oqc)
2019 struct ext3_sb_info *sbi = EXT3_SB(sb);
2021 struct qchk_ctxt *qctxt;
2022 struct buffer_head *bitmap_bh = NULL;
2024 struct inode *inode;
2028 /* turn on quota and read dqinfo if existed */
2029 OBD_ALLOC_PTR(qctxt);
2031 oqc->qc_stat = -ENOMEM;
2035 for (i = 0; i < NR_DQHASH; i++)
2036 INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
2037 INIT_LIST_HEAD(&qctxt->qckt_list);
2039 for (i = 0; i < MAXQUOTAS; i++) {
2040 if (!Q_TYPESET(oqc, i))
2043 rc = quota_onoff(sb, Q_QUOTAON, i, oqc->qc_id);
2044 if (!rc || rc == -EBUSY) {
2045 rc = read_old_dqinfo(sb, i, qctxt->qckt_dqinfo);
2048 } else if (rc == -ENOENT || rc == -EINVAL || rc == -EEXIST) {
2049 qctxt->qckt_first_check[i] = 1;
2055 /* check quota and update in hash */
2056 for (group = 0; group < sbi->s_groups_count; group++) {
2057 ino = group * sbi->s_inodes_per_group + 1;
2058 bitmap_bh = read_inode_bitmap(sb, group);
2060 CERROR("read_inode_bitmap group %d failed", group);
2064 for (i = 0; i < sbi->s_inodes_per_group; i++, ino++) {
2065 if (ino < sbi->s_first_ino)
2068 inode = ext3_iget_inuse(sb, bitmap_bh, i, ino);
2069 rc = add_inode_quota(inode, qctxt, oqc);
2080 /* read old quota limits from old quota file. (only for the user
2081 * has limits but hasn't file) */
2082 #ifdef HAVE_QUOTA_SUPPORT
2083 for (i = 0; i < MAXQUOTAS; i++) {
2084 struct list_head id_list;
2085 struct dquot_id *dqid, *tmp;
2087 if (!Q_TYPESET(oqc, i))
2090 if (qctxt->qckt_first_check[i])
2094 LASSERT(sb_dqopt(sb)->files[i] != NULL);
2095 INIT_LIST_HEAD(&id_list);
2096 #ifndef KERNEL_SUPPORTS_QUOTA_READ
2097 rc = lustre_get_qids(sb_dqopt(sb)->files[i], NULL, i, &id_list);
2099 rc = lustre_get_qids(NULL, sb_dqopt(sb)->files[i], i, &id_list);
2102 CERROR("read old limits failed. (rc:%d)\n", rc);
2104 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
2105 list_del_init(&dqid->di_link);
2108 cqget(sb, qctxt->qckt_hash, &qctxt->qckt_list,
2110 qctxt->qckt_first_check[i]);
2115 /* turn off quota cause we are to dump chk_dqblk to files */
2116 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type, oqc->qc_id);
2118 rc = create_new_quota_files(qctxt, oqc);
2122 /* we use vfs functions to set dqblk, so turn quota on */
2123 rc = quota_onoff(sb, Q_QUOTAON, oqc->qc_type, oqc->qc_id);
2125 /* dump and free chk_dqblk */
2126 rc = prune_chkquots(sb, qctxt, rc);
2127 OBD_FREE_PTR(qctxt);
2129 /* turn off quota, `lfs quotacheck` will turn on when all
2130 * nodes quotacheck finish. */
2131 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type, oqc->qc_id);
2135 CERROR("quotacheck failed: rc = %d\n", rc);
2140 #ifdef HAVE_QUOTA_SUPPORT
2141 static int fsfilt_ext3_quotainfo(struct lustre_quota_info *lqi, int type,
2147 if (lqi->qi_files[type] == NULL) {
2148 CERROR("operate qinfo before it's enabled!\n");
2154 rc = lustre_check_quota_file(lqi, type);
2157 rc = lustre_read_quota_info(lqi, type);
2160 rc = lustre_write_quota_info(lqi, type);
2162 case QFILE_INIT_INFO:
2163 rc = lustre_init_quota_info(lqi, type);
2166 rc = lustre_quota_convert(lqi, type);
2170 CERROR("Unsupported admin quota file cmd %d\n"
2171 "Are lquota.ko and fsfilt_ldiskfs.ko modules in sync?\n",
2178 static int fsfilt_ext3_qids(struct file *file, struct inode *inode, int type,
2179 struct list_head *list)
2181 return lustre_get_qids(file, inode, type, list);
2184 static int fsfilt_ext3_dquot(struct lustre_dquot *dquot, int cmd)
2189 if (dquot->dq_info->qi_files[dquot->dq_type] == NULL) {
2190 CERROR("operate dquot before it's enabled!\n");
2195 case QFILE_RD_DQUOT:
2196 rc = lustre_read_dquot(dquot);
2198 case QFILE_WR_DQUOT:
2199 if (dquot->dq_dqb.dqb_ihardlimit ||
2200 dquot->dq_dqb.dqb_isoftlimit ||
2201 dquot->dq_dqb.dqb_bhardlimit ||
2202 dquot->dq_dqb.dqb_bsoftlimit)
2203 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2205 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2207 rc = lustre_commit_dquot(dquot);
2212 CERROR("Unsupported admin quota file cmd %d\n", cmd);
2220 static lvfs_sbdev_type fsfilt_ext3_journal_sbdev(struct super_block *sb)
2222 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
2223 return (EXT3_SB(sb)->journal_bdev);
2225 return kdev_t_to_nr(EXT3_SB(sb)->s_journal->j_dev);
2229 static struct fsfilt_operations fsfilt_ext3_ops = {
2231 .fs_owner = THIS_MODULE,
2232 .fs_getlabel = fsfilt_ext3_get_label,
2233 .fs_setlabel = fsfilt_ext3_set_label,
2234 .fs_uuid = fsfilt_ext3_uuid,
2235 .fs_start = fsfilt_ext3_start,
2236 .fs_brw_start = fsfilt_ext3_brw_start,
2237 .fs_extend = fsfilt_ext3_extend,
2238 .fs_commit = fsfilt_ext3_commit,
2239 .fs_commit_async = fsfilt_ext3_commit_async,
2240 .fs_commit_wait = fsfilt_ext3_commit_wait,
2241 .fs_setattr = fsfilt_ext3_setattr,
2242 .fs_iocontrol = fsfilt_ext3_iocontrol,
2243 .fs_set_md = fsfilt_ext3_set_md,
2244 .fs_get_md = fsfilt_ext3_get_md,
2245 .fs_readpage = fsfilt_ext3_readpage,
2246 .fs_add_journal_cb = fsfilt_ext3_add_journal_cb,
2247 .fs_statfs = fsfilt_ext3_statfs,
2248 .fs_sync = fsfilt_ext3_sync,
2249 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
2250 .fs_write_record = fsfilt_ext3_write_record,
2251 .fs_read_record = fsfilt_ext3_read_record,
2252 .fs_setup = fsfilt_ext3_setup,
2253 .fs_send_bio = fsfilt_ext3_send_bio,
2254 .fs_get_op_len = fsfilt_ext3_get_op_len,
2255 .fs_quotactl = fsfilt_ext3_quotactl,
2256 .fs_quotacheck = fsfilt_ext3_quotacheck,
2257 #ifdef HAVE_DISK_INODE_VERSION
2258 .fs_get_version = fsfilt_ext3_get_version,
2259 .fs_set_version = fsfilt_ext3_set_version,
2261 #ifdef HAVE_QUOTA_SUPPORT
2262 .fs_quotainfo = fsfilt_ext3_quotainfo,
2263 .fs_qids = fsfilt_ext3_qids,
2264 .fs_dquot = fsfilt_ext3_dquot,
2266 .fs_journal_sbdev = fsfilt_ext3_journal_sbdev,
2269 static int __init fsfilt_ext3_init(void)
2273 fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
2274 sizeof(struct fsfilt_cb_data), 0, 0);
2276 CERROR("error allocating fsfilt journal callback cache\n");
2277 GOTO(out, rc = -ENOMEM);
2280 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
2283 int err = cfs_mem_cache_destroy(fcb_cache);
2284 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
2290 static void __exit fsfilt_ext3_exit(void)
2294 fsfilt_unregister_ops(&fsfilt_ext3_ops);
2295 rc = cfs_mem_cache_destroy(fcb_cache);
2296 LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
2299 module_init(fsfilt_ext3_init);
2300 module_exit(fsfilt_ext3_exit);
2302 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2303 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
2304 MODULE_LICENSE("GPL");