1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lib/fsfilt_ext3.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_FILTER
28 #include <linux/init.h>
29 #include <linux/module.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #include <linux/bitops.h>
39 #include <linux/quota.h>
40 #include <linux/quotaio_v1.h>
41 #include <linux/quotaio_v2.h>
42 #include <ext3/xattr.h>
44 #include <libcfs/kp30.h>
45 #include <lustre_fsfilt.h>
47 #include <obd_class.h>
48 #include <lustre_quota.h>
49 #include <linux/lustre_compat25.h>
50 #include <linux/lprocfs_status.h>
52 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
53 #include <linux/ext3_extents.h>
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
57 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS
58 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS
60 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS(sb)
61 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS(sb)
64 #define fsfilt_ext3_journal_start(inode, nblocks) ext3_journal_start(inode, nblocks)
65 #define fsfilt_ext3_journal_stop(handle) ext3_journal_stop(handle)
67 static cfs_mem_cache_t *fcb_cache;
69 struct fsfilt_cb_data {
70 struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
71 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
72 struct obd_device *cb_obd; /* MDS/OBD completion device */
73 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
74 void *cb_data; /* MDS/OST completion function data */
77 #ifndef EXT3_XATTR_INDEX_TRUSTED /* temporary until we hit l28 kernel */
78 #define EXT3_XATTR_INDEX_TRUSTED 4
81 static char *fsfilt_ext3_get_label(struct super_block *sb)
83 return EXT3_SB(sb)->s_es->s_volume_name;
86 static int fsfilt_ext3_set_label(struct super_block *sb, char *label)
88 /* see e.g. fsfilt_ext3_write_record() */
93 journal = EXT3_SB(sb)->s_journal;
95 handle = journal_start(journal, 1);
98 CERROR("can't start transaction\n");
99 return(PTR_ERR(handle));
102 err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
106 memcpy(EXT3_SB(sb)->s_es->s_volume_name, label,
107 sizeof(EXT3_SB(sb)->s_es->s_volume_name));
109 err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
113 journal_stop(handle);
119 static char *fsfilt_ext3_uuid(struct super_block *sb)
121 return EXT3_SB(sb)->s_es->s_uuid;
124 #ifdef HAVE_DISK_INODE_VERSION
126 * Get the 64-bit version for an inode.
128 static __u64 fsfilt_ext3_get_version(struct inode *inode)
130 return EXT3_I(inode)->i_fs_version;
134 * Set the 64-bit version and return the old version.
136 static __u64 fsfilt_ext3_set_version(struct inode *inode, __u64 new_version)
138 __u64 old_version = EXT3_I(inode)->i_fs_version;
140 (EXT3_I(inode))->i_fs_version = new_version;
147 * We don't currently need any additional blocks for rmdir and
148 * unlink transactions because we are storing the OST oa_id inside
149 * the inode (which we will be changing anyways as part of this
152 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
155 /* For updates to the last received file */
156 int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
160 if (current->journal_info) {
161 CDEBUG(D_INODE, "increasing refcount on %p\n",
162 current->journal_info);
167 case FSFILT_OP_RMDIR:
168 case FSFILT_OP_UNLINK:
169 /* delete one file + create/update logs for each stripe */
170 nblocks += FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
171 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
172 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
174 case FSFILT_OP_RENAME:
175 /* modify additional directory */
176 nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
178 case FSFILT_OP_SYMLINK:
179 /* additional block + block bitmap + GDT for long symlink */
182 case FSFILT_OP_CREATE: {
183 #if defined(EXT3_EXTENTS_FL) && defined(EXT3_INDEX_FL)
186 if (!test_opt(inode->i_sb, EXTENTS)) {
188 } else if (((EXT3_I(inode)->i_flags &
189 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL)) ==
190 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL))) {
191 CWARN("extent-mapped directory found - contact "
192 "CFS: support@clusterfs.com\n");
199 case FSFILT_OP_MKDIR:
200 case FSFILT_OP_MKNOD:
201 /* modify one inode + block bitmap + GDT */
205 /* modify parent directory */
206 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
207 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
208 /* create/update logs for each stripe */
209 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
210 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
212 case FSFILT_OP_SETATTR:
213 /* Setattr on inode */
215 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
216 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
217 /* quota chown log for each stripe */
218 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
219 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
221 case FSFILT_OP_CANCEL_UNLINK:
222 /* blocks for log header bitmap update OR
223 * blocks for catalog header bitmap update + unlink of logs */
224 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
225 FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb) * logs;
228 /* delete 2 file(file + array id) + create 1 file (array id)
229 * create/update logs for each stripe */
230 nblocks += 2 * FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
232 /*create array log for head file*/
234 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
235 EXT3_SINGLEDATA_TRANS_BLOCKS);
236 /*update head file array */
237 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
238 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
240 default: CERROR("unknown transaction start op %d\n", op);
244 LASSERT(current->journal_info == desc_private);
245 journal = EXT3_SB(inode->i_sb)->s_journal;
246 if (nblocks > journal->j_max_transaction_buffers) {
247 CWARN("too many credits %d for op %ux%u using %d instead\n",
248 nblocks, op, logs, journal->j_max_transaction_buffers);
249 nblocks = journal->j_max_transaction_buffers;
253 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
255 handle = fsfilt_ext3_journal_start(inode, nblocks);
259 LASSERT(current->journal_info == handle);
261 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
262 op, nblocks, PTR_ERR(handle));
267 * Calculate the number of buffer credits needed to write multiple pages in
268 * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
269 * doesn't have a nice API for calculating this sort of thing in advance.
271 * See comment above ext3_writepage_trans_blocks for details. We assume
272 * no data journaling is being done, but it does allow for all of the pages
273 * being non-contiguous. If we are guaranteed contiguous pages we could
274 * reduce the number of (d)indirect blocks a lot.
276 * With N blocks per page and P pages, for each inode we have at most:
278 * min(N*P, blocksize/4 + 1) dindirect blocks
281 * For the entire filesystem, we have at most:
282 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
283 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
284 * objcount inode blocks
286 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
288 * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
290 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
291 int niocount, struct niobuf_local *nb)
293 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
295 const int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
296 int nbitmaps = 0, ngdblocks;
297 int needed = objcount + 1; /* inodes + superblock */
300 for (i = 0, j = 0; i < objcount; i++, fso++) {
301 /* two or more dindirect blocks in case we cross boundary */
302 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
304 sb->s_blocksize_bits) /
305 (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
306 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
308 /* leaf, indirect, tindirect blocks for first block */
309 nbitmaps += blockpp + 2;
311 j += fso->fso_bufcnt;
314 next_indir = nb[0].offset +
315 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
316 for (i = 1; i < niocount; i++) {
317 if (nb[i].offset >= next_indir) {
318 nbitmaps++; /* additional indirect */
319 next_indir = nb[i].offset +
320 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
321 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
322 nbitmaps++; /* additional indirect */
324 nbitmaps += blockpp; /* each leaf in different group? */
327 ngdblocks = nbitmaps;
328 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
329 nbitmaps = EXT3_SB(sb)->s_groups_count;
330 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
331 ngdblocks = EXT3_SB(sb)->s_gdb_count;
333 needed += nbitmaps + ngdblocks;
335 /* last_rcvd update */
336 needed += FSFILT_DATA_TRANS_BLOCKS(sb);
338 #if defined(CONFIG_QUOTA)
339 /* We assume that there will be 1 bit set in s_dquot.flags for each
340 * quota file that is active. This is at least true for now.
342 needed += hweight32(sb_any_quota_enabled(sb)) *
343 EXT3_SINGLEDATA_TRANS_BLOCKS;
349 /* We have to start a huge journal transaction here to hold all of the
350 * metadata for the pages being written here. This is necessitated by
351 * the fact that we do lots of prepare_write operations before we do
352 * any of the matching commit_write operations, so even if we split
353 * up to use "smaller" transactions none of them could complete until
354 * all of them were opened. By having a single journal transaction,
355 * we eliminate duplicate reservations for common blocks like the
356 * superblock and group descriptors or bitmaps.
358 * We will start the transaction here, but each prepare_write will
359 * add a refcount to the transaction, and each commit_write will
360 * remove a refcount. The transaction will be closed when all of
361 * the pages have been written.
363 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
364 int niocount, struct niobuf_local *nb,
365 void *desc_private, int logs)
372 LASSERT(current->journal_info == desc_private);
373 journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
374 needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
376 /* The number of blocks we could _possibly_ dirty can very large.
377 * We reduce our request if it is absurd (and we couldn't get that
378 * many credits for a single handle anyways).
380 * At some point we have to limit the size of I/Os sent at one time,
381 * increase the size of the journal, or we have to calculate the
382 * actual journal requirements more carefully by checking all of
383 * the blocks instead of being maximally pessimistic. It remains to
384 * be seen if this is a real problem or not.
386 if (needed > journal->j_max_transaction_buffers) {
387 CERROR("want too many journal credits (%d) using %d instead\n",
388 needed, journal->j_max_transaction_buffers);
389 needed = journal->j_max_transaction_buffers;
392 LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
394 handle = fsfilt_ext3_journal_start(fso->fso_dentry->d_inode, needed);
396 if (IS_ERR(handle)) {
397 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
400 LASSERT(handle->h_buffer_credits >= needed);
401 LASSERT(current->journal_info == handle);
407 static int fsfilt_ext3_extend(struct inode *inode, unsigned int nblocks,void *h)
409 handle_t *handle = h;
411 /* fsfilt_extend called with nblocks = 0 for testing in special cases */
413 handle->h_buffer_credits = 0;
414 CWARN("setting credits of handle %p to zero by request\n", h);
417 if (handle->h_buffer_credits > nblocks)
419 if (journal_extend(handle, nblocks) == 0)
422 ext3_mark_inode_dirty(handle, inode);
423 return journal_restart(handle, nblocks);
426 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
429 handle_t *handle = h;
431 LASSERT(current->journal_info == handle);
433 handle->h_sync = 1; /* recovery likes this */
436 rc = fsfilt_ext3_journal_stop(handle);
442 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
446 transaction_t *transaction;
447 handle_t *handle = h;
451 LASSERT(current->journal_info == handle);
454 transaction = handle->h_transaction;
455 journal = transaction->t_journal;
456 tid = transaction->t_tid;
457 /* we don't want to be blocked */
459 rc = fsfilt_ext3_journal_stop(handle);
461 CERROR("error while stopping transaction: %d\n", rc);
465 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
466 rtid = log_start_commit(journal, transaction);
468 CERROR("strange race: %lu != %lu\n",
469 (unsigned long) tid, (unsigned long) rtid);
471 log_start_commit(journal, tid);
475 *wait_handle = (void *) tid;
476 CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
480 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
482 journal_t *journal = EXT3_JOURNAL(inode);
483 tid_t tid = (tid_t)(long)h;
485 CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
486 if (unlikely(is_journal_aborted(journal)))
489 log_wait_commit(EXT3_JOURNAL(inode), tid);
491 if (unlikely(is_journal_aborted(journal)))
496 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
497 struct iattr *iattr, int do_trunc)
499 struct inode *inode = dentry->d_inode;
504 /* Avoid marking the inode dirty on the superblock list unnecessarily.
505 * We are already writing the inode to disk as part of this
506 * transaction and want to avoid a lot of extra inode writeout
507 * later on. b=9828 */
508 if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
509 /* ATTR_SIZE would invoke truncate: clear it */
510 iattr->ia_valid &= ~ATTR_SIZE;
511 EXT3_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
513 if (iattr->ia_valid & ATTR_UID)
514 inode->i_uid = iattr->ia_uid;
515 if (iattr->ia_valid & ATTR_GID)
516 inode->i_gid = iattr->ia_gid;
517 if (iattr->ia_valid & ATTR_ATIME)
518 inode->i_atime = iattr->ia_atime;
519 if (iattr->ia_valid & ATTR_MTIME)
520 inode->i_mtime = iattr->ia_mtime;
521 if (iattr->ia_valid & ATTR_CTIME)
522 inode->i_ctime = iattr->ia_ctime;
523 if (iattr->ia_valid & ATTR_MODE) {
524 inode->i_mode = iattr->ia_mode;
526 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
527 inode->i_mode &= ~S_ISGID;
530 inode->i_sb->s_op->dirty_inode(inode);
535 /* Don't allow setattr to change file type */
536 if (iattr->ia_valid & ATTR_MODE)
537 iattr->ia_mode = (inode->i_mode & S_IFMT) |
538 (iattr->ia_mode & ~S_IFMT);
540 /* We set these flags on the client, but have already checked perms
541 * so don't confuse inode_change_ok. */
542 iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
544 if (inode->i_op->setattr) {
545 rc = inode->i_op->setattr(dentry, iattr);
547 rc = inode_change_ok(inode, iattr);
549 rc = inode_setattr(inode, iattr);
557 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
558 unsigned int cmd, unsigned long arg)
563 /* FIXME: Can't do this because of nested transaction deadlock */
564 if (cmd == EXT3_IOC_SETFLAGS && (*(int *)arg) & EXT3_JOURNAL_DATA_FL) {
565 CERROR("can't set data journal flag on file\n");
569 if (inode->i_fop->ioctl)
570 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
577 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
578 void *lmm, int lmm_size, const char *name)
582 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
585 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
586 name, lmm, lmm_size, 0);
591 CERROR("error adding MD data to inode %lu: rc = %d\n",
596 /* Must be called with i_mutex held */
597 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size,
602 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
605 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
606 name, lmm, lmm_size);
609 /* This gives us the MD size */
611 return (rc == -ENODATA) ? 0 : rc;
614 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
615 EXT3_XATTR_INDEX_TRUSTED, name,
617 memset(lmm, 0, lmm_size);
618 return (rc == -ENODATA) ? 0 : rc;
624 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
630 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
633 struct inode *inode = file->f_dentry->d_inode;
636 if (S_ISREG(inode->i_mode))
637 rc = file->f_op->read(file, buf, count, off);
639 const int blkbits = inode->i_sb->s_blocksize_bits;
640 const int blksize = inode->i_sb->s_blocksize;
642 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
643 count, inode->i_ino, *off);
645 struct buffer_head *bh;
648 if (*off < inode->i_size) {
651 bh = ext3_bread(NULL, inode, *off >> blkbits,
654 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
657 memcpy(buf, bh->b_data, blksize);
660 /* XXX in theory we should just fake
661 * this buffer and continue like ext3,
662 * especially if this is a partial read
664 CERROR("error read dir %lu+%llu: %d\n",
665 inode->i_ino, *off, err);
670 struct ext3_dir_entry_2 *fake = (void *)buf;
672 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
673 memset(fake, 0, sizeof(*fake));
674 fake->rec_len = cpu_to_le16(blksize);
686 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
688 struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
690 fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
692 OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
695 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
696 void *handle, fsfilt_cb_t cb_func,
699 struct fsfilt_cb_data *fcb;
701 OBD_SLAB_ALLOC(fcb, fcb_cache, CFS_ALLOC_IO, sizeof *fcb);
705 fcb->cb_func = cb_func;
707 fcb->cb_last_rcvd = last_rcvd;
708 fcb->cb_data = cb_data;
710 CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
712 journal_callback_set(handle, fsfilt_ext3_cb_func,
713 (struct journal_callback *)fcb);
720 * We need to hack the return value for the free inode counts because
721 * the current EA code requires one filesystem block per inode with EAs,
722 * so it is possible to run out of blocks before we run out of inodes.
724 * This can be removed when the ext3 EA code is fixed.
726 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
731 memset(&sfs, 0, sizeof(sfs));
733 rc = ll_do_statfs(sb, &sfs);
735 if (!rc && sfs.f_bfree < sfs.f_ffree) {
736 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
737 sfs.f_ffree = sfs.f_bfree;
740 statfs_pack(osfs, &sfs);
744 static int fsfilt_ext3_sync(struct super_block *sb)
746 return ext3_force_commit(sb);
749 #if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))
750 #warning "kernel code has old extents/mballoc patch, disabling"
751 #undef EXT3_MULTIBLOCK_ALLOCATOR
753 #ifndef EXT3_EXTENTS_FL
754 #define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
757 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
758 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
759 #define ext3_up_truncate_sem(inode) up(&EXT3_I(inode)->truncate_sem);
760 #define ext3_down_truncate_sem(inode) down(&EXT3_I(inode)->truncate_sem);
762 #define ext3_up_truncate_sem(inode) mutex_unlock(&EXT3_I(inode)->truncate_mutex);
763 #define ext3_down_truncate_sem(inode) mutex_lock(&EXT3_I(inode)->truncate_mutex);
766 #include <linux/lustre_version.h>
767 #if EXT3_EXT_MAGIC == 0xf301
768 #define ee_start e_start
769 #define ee_block e_block
772 #ifndef EXT3_BB_MAX_BLOCKS
773 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
774 ext3_new_blocks(handle, inode, count, goal, err)
778 unsigned long *blocks;
786 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
787 unsigned long block, int *aflags)
789 struct ext3_inode_info *ei = EXT3_I(inode);
790 unsigned long bg_start;
791 unsigned long colour;
795 struct ext3_extent *ex;
796 depth = path->p_depth;
798 /* try to predict block placement */
799 if ((ex = path[depth].p_ext)) {
801 /* This prefers to eat into a contiguous extent
802 * rather than find an extent that the whole
803 * request will fit into. This can fragment data
804 * block allocation and prevents our lovely 1M I/Os
805 * from reaching the disk intact. */
806 if (ex->ee_block + ex->ee_len == block)
809 return ex->ee_start + (block - ex->ee_block);
812 /* it looks index is empty
813 * try to find starting from index itself */
814 if (path[depth].p_bh)
815 return path[depth].p_bh->b_blocknr;
818 /* OK. use inode's group */
819 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
820 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
821 colour = (current->pid % 16) *
822 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
823 return bg_start + colour + block;
826 #define ll_unmap_underlying_metadata(sb, blocknr) \
827 unmap_underlying_metadata((sb)->s_bdev, blocknr)
829 #ifndef EXT3_MB_HINT_GROUP_ALLOC
830 static unsigned long new_blocks(handle_t *handle, struct ext3_extents_tree *tree,
831 struct ext3_ext_path *path, unsigned long block,
832 int *count, int *err)
834 unsigned long pblock, goal;
837 goal = ext3_ext_find_goal(tree->inode, path, block, &aflags);
838 aflags |= 2; /* block have been already reserved */
840 pblock = ext3_mb_new_blocks(handle, tree->inode, goal, count, aflags, err);
846 static unsigned long new_blocks(handle_t *handle, struct ext3_extents_tree *tree,
847 struct ext3_ext_path *path, unsigned long block,
848 int *count, int *err)
850 struct ext3_allocation_request ar;
851 unsigned long pblock;
854 /* find neighbour allocated blocks */
856 *err = ext3_ext_search_left(tree, path, &ar.lleft, &ar.pleft);
860 *err = ext3_ext_search_right(tree, path, &ar.lright, &ar.pright);
864 /* allocate new block */
865 ar.goal = ext3_ext_find_goal(tree->inode, path, block, &aflags);
866 ar.inode = tree->inode;
869 ar.flags = EXT3_MB_HINT_DATA;
870 pblock = ext3_mb_new_blocks(handle, &ar, err);
877 static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
878 struct ext3_ext_path *path,
879 struct ext3_ext_cache *cex)
881 struct inode *inode = tree->inode;
882 struct bpointers *bp = tree->private;
883 struct ext3_extent nex;
884 unsigned long pblock;
890 EXT_ASSERT(i == path->p_depth);
891 EXT_ASSERT(path[i].p_hdr);
893 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
898 if (bp->create == 0) {
900 if (cex->ec_block < bp->start)
901 i = bp->start - cex->ec_block;
902 if (i >= cex->ec_len)
903 CERROR("nothing to do?! i = %d, e_num = %u\n",
905 for (; i < cex->ec_len && bp->num; i++) {
917 tgen = EXT_GENERATION(tree);
918 count = ext3_ext_calc_credits_for_insert(tree, path);
919 ext3_up_truncate_sem(inode);
922 handle = fsfilt_ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
924 if (IS_ERR(handle)) {
925 ext3_down_truncate_sem(inode);
926 return PTR_ERR(handle);
929 ext3_down_truncate_sem(inode);
930 if (tgen != EXT_GENERATION(tree)) {
931 /* the tree has changed. so path can be invalid at moment */
933 fsfilt_ext3_journal_stop(handle);
939 pblock = new_blocks(handle, tree, path, cex->ec_block, &count, &err);
942 EXT_ASSERT(count <= cex->ec_len);
944 /* insert new extent */
945 nex.ee_block = cex->ec_block;
946 nex.ee_start = pblock;
948 err = ext3_ext_insert_extent(handle, tree, path, &nex);
950 CERROR("can't insert extent: %d\n", err);
951 /* XXX: export ext3_free_blocks() */
952 /*ext3_free_blocks(handle, inode, nex.ee_start, nex.ee_len, 0);*/
957 * Putting len of the actual extent we just inserted,
958 * we are asking ext3_ext_walk_space() to continue
959 * scaning after that block
961 cex->ec_len = nex.ee_len;
962 cex->ec_start = nex.ee_start;
963 BUG_ON(nex.ee_len == 0);
964 BUG_ON(nex.ee_block != cex->ec_block);
968 fsfilt_ext3_journal_stop(handle);
974 CERROR("hmm. why do we find this extent?\n");
975 CERROR("initial space: %lu:%u\n",
976 bp->start, bp->init_num);
977 CERROR("current extent: %u/%u/%u %d\n",
978 cex->ec_block, cex->ec_len,
979 cex->ec_start, cex->ec_type);
982 if (cex->ec_block < bp->start)
983 i = bp->start - cex->ec_block;
984 if (i >= cex->ec_len)
985 CERROR("nothing to do?! i = %d, e_num = %u\n",
987 for (; i < cex->ec_len && bp->num; i++) {
988 *(bp->blocks) = cex->ec_start + i;
989 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
993 /* unmap any possible underlying metadata from
994 * the block device mapping. bug 6998. */
995 ll_unmap_underlying_metadata(inode->i_sb,
1007 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
1008 unsigned long num, unsigned long *blocks,
1009 int *created, int create)
1011 struct ext3_extents_tree tree;
1012 struct bpointers bp;
1015 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
1016 block, block + num - 1, (unsigned) inode->i_ino);
1018 ext3_init_tree_desc(&tree, inode);
1021 bp.created = created;
1023 bp.init_num = bp.num = num;
1026 ext3_down_truncate_sem(inode);
1027 err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
1028 ext3_ext_invalidate_cache(&tree);
1029 ext3_up_truncate_sem(inode);
1034 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
1035 int pages, unsigned long *blocks,
1036 int *created, int create)
1038 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1040 struct page *fp = NULL;
1043 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1044 inode->i_ino, pages, (*page)->index);
1046 /* pages are sorted already. so, we just have to find
1047 * contig. space and process them properly */
1050 /* start new extent */
1055 } else if (fp->index + clen == (*page)->index) {
1056 /* continue the extent */
1063 /* process found extent */
1064 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1065 clen * blocks_per_page, blocks,
1070 /* look for next extent */
1072 blocks += blocks_per_page * clen;
1073 created += blocks_per_page * clen;
1077 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1078 clen * blocks_per_page, blocks,
1083 #endif /* EXT3_MULTIBLOCK_ALLOCATOR */
1085 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
1086 unsigned long *blocks, int *created, int create);
1087 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
1088 int pages, unsigned long *blocks,
1089 int *created, int create)
1091 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1095 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
1096 rc = ext3_map_inode_page(inode, *page, b, cr, create);
1098 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
1099 inode->i_ino, *b, *cr, create, rc);
1103 b += blocks_per_page;
1104 cr += blocks_per_page;
1109 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
1110 int pages, unsigned long *blocks,
1111 int *created, int create,
1112 struct semaphore *optional_sem)
1115 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
1116 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
1117 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
1118 blocks, created, create);
1122 if (optional_sem != NULL)
1124 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
1126 if (optional_sem != NULL)
1132 int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
1134 unsigned long block;
1135 struct buffer_head *bh;
1136 int err, blocksize, csize, boffs, osize = size;
1138 /* prevent reading after eof */
1140 if (inode->i_size < *offs + size) {
1141 size = inode->i_size - *offs;
1144 CERROR("size %llu is too short for read %u@%llu\n",
1145 inode->i_size, size, *offs);
1147 } else if (size == 0) {
1154 blocksize = 1 << inode->i_blkbits;
1157 block = *offs >> inode->i_blkbits;
1158 boffs = *offs & (blocksize - 1);
1159 csize = min(blocksize - boffs, size);
1160 bh = ext3_bread(NULL, inode, block, 0, &err);
1162 CERROR("can't read block: %d\n", err);
1166 memcpy(buf, bh->b_data + boffs, csize);
1175 EXPORT_SYMBOL(fsfilt_ext3_read);
1177 static int fsfilt_ext3_read_record(struct file * file, void *buf,
1178 int size, loff_t *offs)
1181 rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
1187 int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
1188 loff_t *offs, handle_t *handle)
1190 struct buffer_head *bh = NULL;
1191 loff_t old_size = inode->i_size, offset = *offs;
1192 loff_t new_size = inode->i_size;
1193 unsigned long block;
1194 int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
1196 while (bufsize > 0) {
1200 block = offset >> inode->i_blkbits;
1201 boffs = offset & (blocksize - 1);
1202 size = min(blocksize - boffs, bufsize);
1203 bh = ext3_bread(handle, inode, block, 1, &err);
1205 CERROR("can't read/create block: %d\n", err);
1209 err = ext3_journal_get_write_access(handle, bh);
1211 CERROR("journal_get_write_access() returned error %d\n",
1215 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1216 memcpy(bh->b_data + boffs, buf, size);
1217 err = ext3_journal_dirty_metadata(handle, bh);
1219 CERROR("journal_dirty_metadata() returned error %d\n",
1223 if (offset + size > new_size)
1224 new_size = offset + size;
1232 /* correct in-core and on-disk sizes */
1233 if (new_size > inode->i_size) {
1235 if (new_size > inode->i_size)
1236 inode->i_size = new_size;
1237 if (inode->i_size > EXT3_I(inode)->i_disksize)
1238 EXT3_I(inode)->i_disksize = inode->i_size;
1239 if (inode->i_size > old_size)
1240 mark_inode_dirty(inode);
1248 EXPORT_SYMBOL(fsfilt_ext3_write_handle);
1250 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1251 loff_t *offs, int force_sync)
1253 struct inode *inode = file->f_dentry->d_inode;
1255 int err, block_count = 0, blocksize;
1257 /* Determine how many transaction credits are needed */
1258 blocksize = 1 << inode->i_blkbits;
1259 block_count = (*offs & (blocksize - 1)) + bufsize;
1260 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1263 handle = fsfilt_ext3_journal_start(inode,
1264 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
1266 if (IS_ERR(handle)) {
1267 CERROR("can't start transaction for %d blocks (%d bytes)\n",
1268 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2, bufsize);
1269 return PTR_ERR(handle);
1272 err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
1274 if (!err && force_sync)
1275 handle->h_sync = 1; /* recovery likes this */
1278 fsfilt_ext3_journal_stop(handle);
1284 static int fsfilt_ext3_setup(struct super_block *sb)
1287 EXT3_SB(sb)->dx_lock = fsfilt_ext3_dx_lock;
1288 EXT3_SB(sb)->dx_unlock = fsfilt_ext3_dx_unlock;
1291 CWARN("Enabling PDIROPS\n");
1292 set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
1293 sb->s_flags |= S_PDIROPS;
1295 if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
1296 CWARN("filesystem doesn't have dir_index feature enabled\n");
1297 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)) && HAVE_QUOTA_SUPPORT
1298 set_opt(EXT3_SB(sb)->s_mount_opt, QUOTA);
1303 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1304 objects. Logs is number of logfiles to update */
1305 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1309 case FSFILT_OP_CREATE:
1310 /* directory leaf, index & indirect & EA*/
1311 return 4 + 3 * logs;
1312 case FSFILT_OP_UNLINK:
1318 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1319 int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
1320 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1321 for (i = 0; i < op; i++, fso++) {
1322 int nblocks = fso->fso_bufcnt * blockpp;
1323 int ndindirect = min(nblocks, addrpp + 1);
1324 int nindir = nblocks + ndindirect + 1;
1328 return needed + 3 * logs;
1334 static const char *op_quotafile[] = { "lquota.user", "lquota.group" };
1336 #define DQINFO_COPY(out, in) \
1338 Q_COPY(out, in, dqi_bgrace); \
1339 Q_COPY(out, in, dqi_igrace); \
1340 Q_COPY(out, in, dqi_flags); \
1341 Q_COPY(out, in, dqi_valid); \
1344 #define DQBLK_COPY(out, in) \
1346 Q_COPY(out, in, dqb_bhardlimit); \
1347 Q_COPY(out, in, dqb_bsoftlimit); \
1348 Q_COPY(out, in, dqb_curspace); \
1349 Q_COPY(out, in, dqb_ihardlimit); \
1350 Q_COPY(out, in, dqb_isoftlimit); \
1351 Q_COPY(out, in, dqb_curinodes); \
1352 Q_COPY(out, in, dqb_btime); \
1353 Q_COPY(out, in, dqb_itime); \
1354 Q_COPY(out, in, dqb_valid); \
1359 static int fsfilt_ext3_quotactl(struct super_block *sb,
1360 struct obd_quotactl *oqc)
1362 int i, rc = 0, error = 0;
1363 struct quotactl_ops *qcop;
1364 struct if_dqinfo *info;
1365 struct if_dqblk *dqblk;
1371 OBD_ALLOC_PTR(info);
1374 OBD_ALLOC_PTR(dqblk);
1380 DQINFO_COPY(info, &oqc->qc_dqinfo);
1381 DQBLK_COPY(dqblk, &oqc->qc_dqblk);
1384 if (oqc->qc_cmd == Q_QUOTAON || oqc->qc_cmd == Q_QUOTAOFF) {
1385 for (i = 0; i < MAXQUOTAS; i++) {
1386 if (!Q_TYPESET(oqc, i))
1389 if (oqc->qc_cmd == Q_QUOTAON) {
1390 if (!qcop->quota_on)
1391 GOTO(out, rc = -ENOSYS);
1392 rc = qcop->quota_on(sb, i, oqc->qc_id,
1393 (char *)op_quotafile[i]);
1394 } else if (oqc->qc_cmd == Q_QUOTAOFF) {
1395 if (!qcop->quota_off)
1396 GOTO(out, rc = -ENOSYS);
1397 rc = qcop->quota_off(sb, i);
1405 GOTO(out, rc ?: error);
1408 switch (oqc->qc_cmd) {
1411 if (!qcop->get_info)
1412 GOTO(out, rc = -ENOSYS);
1413 rc = qcop->get_info(sb, oqc->qc_type, info);
1417 if (!qcop->set_dqblk)
1418 GOTO(out, rc = -ENOSYS);
1419 rc = qcop->set_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1423 if (!qcop->get_dqblk)
1424 GOTO(out, rc = -ENOSYS);
1425 rc = qcop->get_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1428 if (!sb->s_qcop->quota_sync)
1429 GOTO(out, rc = -ENOSYS);
1430 qcop->quota_sync(sb, oqc->qc_type);
1433 CERROR("unsupported quotactl command: %d", oqc->qc_cmd);
1437 DQINFO_COPY(&oqc->qc_dqinfo, info);
1438 DQBLK_COPY(&oqc->qc_dqblk, dqblk);
1441 OBD_FREE_PTR(dqblk);
1444 CDEBUG(D_QUOTA, "quotactl command %#x, id %u, type %d "
1446 oqc->qc_cmd, oqc->qc_id, oqc->qc_type, rc);
1451 struct hlist_node dqb_hash; /* quotacheck hash */
1452 struct list_head dqb_list; /* in list also */
1453 qid_t dqb_id; /* uid/gid */
1454 short dqb_type; /* USRQUOTA/GRPQUOTA */
1455 __u32 dqb_bhardlimit; /* block hard limit */
1456 __u32 dqb_bsoftlimit; /* block soft limit */
1457 qsize_t dqb_curspace; /* current space */
1458 __u32 dqb_ihardlimit; /* inode hard limit */
1459 __u32 dqb_isoftlimit; /* inode soft limit */
1460 __u32 dqb_curinodes; /* current inodes */
1461 __u64 dqb_btime; /* block grace time */
1462 __u64 dqb_itime; /* inode grace time */
1463 __u32 dqb_valid; /* flag for above fields */
1466 static inline unsigned int chkquot_hash(qid_t id, int type)
1467 __attribute__((__const__));
1469 static inline unsigned int chkquot_hash(qid_t id, int type)
1471 return (id * (MAXQUOTAS - type)) % NR_DQHASH;
1474 static inline struct chk_dqblk *
1475 find_chkquot(struct hlist_head *head, qid_t id, int type)
1477 struct hlist_node *node;
1478 struct chk_dqblk *cdqb;
1480 hlist_for_each(node, head) {
1481 cdqb = hlist_entry(node, struct chk_dqblk, dqb_hash);
1482 if (cdqb->dqb_id == id && cdqb->dqb_type == type)
1489 static struct chk_dqblk *alloc_chkquot(qid_t id, int type)
1491 struct chk_dqblk *cdqb;
1493 OBD_ALLOC_PTR(cdqb);
1495 INIT_HLIST_NODE(&cdqb->dqb_hash);
1496 INIT_LIST_HEAD(&cdqb->dqb_list);
1498 cdqb->dqb_type = type;
1504 static struct chk_dqblk *
1505 cqget(struct super_block *sb, struct hlist_head *hash, struct list_head *list,
1506 qid_t id, int type, int first_check)
1508 struct hlist_head *head = hash + chkquot_hash(id, type);
1509 struct if_dqblk dqb;
1510 struct chk_dqblk *cdqb;
1513 cdqb = find_chkquot(head, id, type);
1517 cdqb = alloc_chkquot(id, type);
1522 rc = sb->s_qcop->get_dqblk(sb, type, id, &dqb);
1524 CERROR("get_dqblk of id %u, type %d failed: %d\n",
1527 DQBLK_COPY(cdqb, &dqb);
1528 cdqb->dqb_curspace = 0;
1529 cdqb->dqb_curinodes = 0;
1533 hlist_add_head(&cdqb->dqb_hash, head);
1534 list_add_tail(&cdqb->dqb_list, list);
1539 static inline int quota_onoff(struct super_block *sb, int cmd, int type)
1541 struct obd_quotactl *oqctl;
1544 OBD_ALLOC_PTR(oqctl);
1548 oqctl->qc_cmd = cmd;
1549 oqctl->qc_id = QFMT_LDISKFS;
1550 oqctl->qc_type = type;
1551 rc = fsfilt_ext3_quotactl(sb, oqctl);
1553 OBD_FREE_PTR(oqctl);
1557 static inline int read_old_dqinfo(struct super_block *sb, int type,
1558 struct if_dqinfo *dqinfo)
1560 struct obd_quotactl *oqctl;
1564 OBD_ALLOC_PTR(oqctl);
1568 oqctl->qc_cmd = Q_GETINFO;
1569 oqctl->qc_type = type;
1570 rc = fsfilt_ext3_quotactl(sb, oqctl);
1572 ((struct obd_dqinfo *)dqinfo)[type] = oqctl->qc_dqinfo;
1574 OBD_FREE_PTR(oqctl);
1578 static inline struct ext3_group_desc *
1579 get_group_desc(struct super_block *sb, int group)
1581 unsigned long desc_block, desc;
1582 struct ext3_group_desc *gdp;
1584 desc_block = group / EXT3_DESC_PER_BLOCK(sb);
1585 desc = group % EXT3_DESC_PER_BLOCK(sb);
1586 gdp = (struct ext3_group_desc *)
1587 EXT3_SB(sb)->s_group_desc[desc_block]->b_data;
1592 static inline struct buffer_head *
1593 read_inode_bitmap(struct super_block *sb, unsigned long group)
1595 struct ext3_group_desc *desc;
1596 struct buffer_head *bh;
1598 desc = get_group_desc(sb, group);
1599 bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
1604 static inline struct inode *ext3_iget_inuse(struct super_block *sb,
1605 struct buffer_head *bitmap_bh,
1606 int index, unsigned long ino)
1608 struct inode *inode = NULL;
1610 if (ext3_test_bit(index, bitmap_bh->b_data))
1611 inode = iget(sb, ino);
1617 struct hlist_head qckt_hash[NR_DQHASH]; /* quotacheck hash */
1618 struct list_head qckt_list; /* quotacheck list */
1619 int qckt_first_check[MAXQUOTAS]; /* 1 if no old quotafile */
1620 struct if_dqinfo qckt_dqinfo[MAXQUOTAS]; /* old dqinfo */
1623 static int add_inode_quota(struct inode *inode, struct qchk_ctxt *qctxt,
1624 struct obd_quotactl *oqc)
1626 struct chk_dqblk *cdqb[MAXQUOTAS] = { NULL, };
1628 qid_t qid[MAXQUOTAS];
1634 qid[USRQUOTA] = inode->i_uid;
1635 qid[GRPQUOTA] = inode->i_gid;
1637 if (S_ISDIR(inode->i_mode) ||
1638 S_ISREG(inode->i_mode) ||
1639 S_ISLNK(inode->i_mode))
1640 size = inode_get_bytes(inode);
1642 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1643 if (!Q_TYPESET(oqc, cnt))
1646 cdqb[cnt] = cqget(inode->i_sb, qctxt->qckt_hash,
1647 &qctxt->qckt_list, qid[cnt], cnt,
1648 qctxt->qckt_first_check[cnt]);
1654 cdqb[cnt]->dqb_curspace += size;
1655 cdqb[cnt]->dqb_curinodes++;
1659 for (i = 0; i < cnt; i++) {
1660 if (!Q_TYPESET(oqc, i))
1663 cdqb[i]->dqb_curspace -= size;
1664 cdqb[i]->dqb_curinodes--;
1671 static int v2_write_dqheader(struct file *f, int type)
1673 static const __u32 quota_magics[] = V2_INITQMAGICS;
1674 static const __u32 quota_versions[] = V2_INITQVERSIONS;
1675 struct v2_disk_dqheader dqhead;
1678 CLASSERT(ARRAY_SIZE(quota_magics) == ARRAY_SIZE(quota_versions));
1679 LASSERT(0 <= type && type < ARRAY_SIZE(quota_magics));
1681 dqhead.dqh_magic = cpu_to_le32(quota_magics[type]);
1682 dqhead.dqh_version = cpu_to_le32(quota_versions[type]);
1684 return cfs_user_write(f, (char *)&dqhead, sizeof(dqhead), &offset);
1687 /* write dqinfo struct in a new quota file */
1688 static int v2_write_dqinfo(struct file *f, int type, struct if_dqinfo *info)
1690 struct v2_disk_dqinfo dqinfo;
1691 __u32 blocks = V2_DQTREEOFF + 1;
1692 loff_t offset = V2_DQINFOOFF;
1695 dqinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
1696 dqinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
1697 dqinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK &
1700 dqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1701 dqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1702 dqinfo.dqi_flags = 0;
1705 dqinfo.dqi_blocks = cpu_to_le32(blocks);
1706 dqinfo.dqi_free_blk = 0;
1707 dqinfo.dqi_free_entry = 0;
1709 return cfs_user_write(f, (char *)&dqinfo, sizeof(dqinfo), &offset);
1712 static int create_new_quota_files(struct qchk_ctxt *qctxt,
1713 struct obd_quotactl *oqc)
1718 for (i = 0; i < MAXQUOTAS; i++) {
1719 struct if_dqinfo *info = qctxt->qckt_first_check[i]?
1720 NULL : &qctxt->qckt_dqinfo[i];
1723 if (!Q_TYPESET(oqc, i))
1726 file = filp_open(op_quotafile[i], O_RDWR | O_CREAT | O_TRUNC,
1730 CERROR("can't create %s file: rc = %d\n",
1731 op_quotafile[i], rc);
1735 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1736 CERROR("file %s is not regular", op_quotafile[i]);
1737 filp_close(file, 0);
1738 GOTO(out, rc = -EINVAL);
1741 rc = v2_write_dqheader(file, i);
1743 filp_close(file, 0);
1747 rc = v2_write_dqinfo(file, i, info);
1748 filp_close(file, 0);
1758 static int commit_chkquot(struct super_block *sb, struct qchk_ctxt *qctxt,
1759 struct chk_dqblk *cdqb)
1761 struct obd_quotactl *oqc;
1770 now = CURRENT_SECONDS;
1772 if (cdqb->dqb_bsoftlimit &&
1773 toqb(cdqb->dqb_curspace) >= cdqb->dqb_bsoftlimit &&
1776 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_bgrace;
1778 if (cdqb->dqb_isoftlimit &&
1779 cdqb->dqb_curinodes >= cdqb->dqb_isoftlimit &&
1782 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_igrace;
1784 cdqb->dqb_valid = QIF_ALL;
1786 oqc->qc_cmd = Q_SETQUOTA;
1787 oqc->qc_type = cdqb->dqb_type;
1788 oqc->qc_id = cdqb->dqb_id;
1789 DQBLK_COPY(&oqc->qc_dqblk, cdqb);
1791 rc = fsfilt_ext3_quotactl(sb, oqc);
1796 static int prune_chkquots(struct super_block *sb,
1797 struct qchk_ctxt *qctxt, int error)
1799 struct chk_dqblk *cdqb, *tmp;
1802 list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
1804 rc = commit_chkquot(sb, qctxt, cdqb);
1808 hlist_del_init(&cdqb->dqb_hash);
1809 list_del(&cdqb->dqb_list);
1816 static int fsfilt_ext3_quotacheck(struct super_block *sb,
1817 struct obd_quotactl *oqc)
1819 struct ext3_sb_info *sbi = EXT3_SB(sb);
1821 struct qchk_ctxt *qctxt;
1822 struct buffer_head *bitmap_bh = NULL;
1824 struct inode *inode;
1828 /* turn on quota and read dqinfo if existed */
1829 OBD_ALLOC_PTR(qctxt);
1831 oqc->qc_stat = -ENOMEM;
1835 for (i = 0; i < NR_DQHASH; i++)
1836 INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
1837 INIT_LIST_HEAD(&qctxt->qckt_list);
1839 for (i = 0; i < MAXQUOTAS; i++) {
1840 if (!Q_TYPESET(oqc, i))
1843 rc = quota_onoff(sb, Q_QUOTAON, i);
1844 if (!rc || rc == -EBUSY) {
1845 rc = read_old_dqinfo(sb, i, qctxt->qckt_dqinfo);
1848 } else if (rc == -ENOENT) {
1849 qctxt->qckt_first_check[i] = 1;
1855 /* check quota and update in hash */
1856 for (group = 0; group < sbi->s_groups_count; group++) {
1857 ino = group * sbi->s_inodes_per_group + 1;
1858 bitmap_bh = read_inode_bitmap(sb, group);
1860 CERROR("read_inode_bitmap group %d failed", group);
1861 GOTO(out, rc = -EIO);
1864 for (i = 0; i < sbi->s_inodes_per_group; i++, ino++) {
1865 if (ino < sbi->s_first_ino)
1868 inode = ext3_iget_inuse(sb, bitmap_bh, i, ino);
1869 rc = add_inode_quota(inode, qctxt, oqc);
1880 /* read old quota limits from old quota file. (only for the user
1881 * has limits but hasn't file) */
1882 #ifdef HAVE_QUOTA_SUPPORT
1883 for (i = 0; i < MAXQUOTAS; i++) {
1884 struct list_head id_list;
1885 struct dquot_id *dqid, *tmp;
1887 if (!Q_TYPESET(oqc, i))
1890 if (qctxt->qckt_first_check[i])
1894 LASSERT(sb_dqopt(sb)->files[i] != NULL);
1895 INIT_LIST_HEAD(&id_list);
1896 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1897 rc = lustre_get_qids(sb_dqopt(sb)->files[i], NULL, i, &id_list);
1899 rc = lustre_get_qids(NULL, sb_dqopt(sb)->files[i], i, &id_list);
1902 CERROR("read old limits failed. (rc:%d)\n", rc);
1904 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1905 list_del_init(&dqid->di_link);
1908 cqget(sb, qctxt->qckt_hash, &qctxt->qckt_list,
1910 qctxt->qckt_first_check[i]);
1915 /* turn off quota cause we are to dump chk_dqblk to files */
1916 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type);
1918 rc = create_new_quota_files(qctxt, oqc);
1922 /* we use vfs functions to set dqblk, so turn quota on */
1923 rc = quota_onoff(sb, Q_QUOTAON, oqc->qc_type);
1925 /* dump and free chk_dqblk */
1926 rc = prune_chkquots(sb, qctxt, rc);
1927 OBD_FREE_PTR(qctxt);
1929 /* turn off quota, `lfs quotacheck` will turn on when all
1930 * nodes quotacheck finish. */
1931 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type);
1935 CERROR("quotacheck failed: rc = %d\n", rc);
1940 #ifdef HAVE_QUOTA_SUPPORT
1941 static int fsfilt_ext3_quotainfo(struct lustre_quota_info *lqi, int type,
1947 if (lqi->qi_files[type] == NULL) {
1948 CERROR("operate qinfo before it's enabled!\n");
1954 rc = lustre_check_quota_file(lqi, type);
1957 rc = lustre_read_quota_info(lqi, type);
1960 rc = lustre_write_quota_info(lqi, type);
1962 case QFILE_INIT_INFO:
1963 rc = lustre_init_quota_info(lqi, type);
1966 CERROR("Unsupported admin quota file cmd %d\n", cmd);
1973 static int fsfilt_ext3_qids(struct file *file, struct inode *inode, int type,
1974 struct list_head *list)
1976 return lustre_get_qids(file, inode, type, list);
1979 static int fsfilt_ext3_dquot(struct lustre_dquot *dquot, int cmd)
1984 if (dquot->dq_info->qi_files[dquot->dq_type] == NULL) {
1985 CERROR("operate dquot before it's enabled!\n");
1990 case QFILE_RD_DQUOT:
1991 rc = lustre_read_dquot(dquot);
1993 case QFILE_WR_DQUOT:
1994 if (dquot->dq_dqb.dqb_ihardlimit ||
1995 dquot->dq_dqb.dqb_isoftlimit ||
1996 dquot->dq_dqb.dqb_bhardlimit ||
1997 dquot->dq_dqb.dqb_bsoftlimit)
1998 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2000 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2002 rc = lustre_commit_dquot(dquot);
2007 CERROR("Unsupported admin quota file cmd %d\n", cmd);
2015 lvfs_sbdev_type fsfilt_ext3_journal_sbdev(struct super_block *sb)
2017 return (EXT3_SB(sb)->journal_bdev);
2019 EXPORT_SYMBOL(fsfilt_ext3_journal_sbdev);
2021 static struct fsfilt_operations fsfilt_ext3_ops = {
2023 .fs_owner = THIS_MODULE,
2024 .fs_getlabel = fsfilt_ext3_get_label,
2025 .fs_setlabel = fsfilt_ext3_set_label,
2026 .fs_uuid = fsfilt_ext3_uuid,
2027 .fs_start = fsfilt_ext3_start,
2028 .fs_brw_start = fsfilt_ext3_brw_start,
2029 .fs_extend = fsfilt_ext3_extend,
2030 .fs_commit = fsfilt_ext3_commit,
2031 .fs_commit_async = fsfilt_ext3_commit_async,
2032 .fs_commit_wait = fsfilt_ext3_commit_wait,
2033 .fs_setattr = fsfilt_ext3_setattr,
2034 .fs_iocontrol = fsfilt_ext3_iocontrol,
2035 .fs_set_md = fsfilt_ext3_set_md,
2036 .fs_get_md = fsfilt_ext3_get_md,
2037 .fs_readpage = fsfilt_ext3_readpage,
2038 .fs_add_journal_cb = fsfilt_ext3_add_journal_cb,
2039 .fs_statfs = fsfilt_ext3_statfs,
2040 .fs_sync = fsfilt_ext3_sync,
2041 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
2042 .fs_write_record = fsfilt_ext3_write_record,
2043 .fs_read_record = fsfilt_ext3_read_record,
2044 .fs_setup = fsfilt_ext3_setup,
2045 .fs_send_bio = fsfilt_ext3_send_bio,
2046 .fs_get_op_len = fsfilt_ext3_get_op_len,
2047 .fs_quotactl = fsfilt_ext3_quotactl,
2048 .fs_quotacheck = fsfilt_ext3_quotacheck,
2049 #ifdef HAVE_DISK_INODE_VERSION
2050 .fs_get_version = fsfilt_ext3_get_version,
2051 .fs_set_version = fsfilt_ext3_set_version,
2053 #ifdef HAVE_QUOTA_SUPPORT
2054 .fs_quotainfo = fsfilt_ext3_quotainfo,
2055 .fs_qids = fsfilt_ext3_qids,
2056 .fs_dquot = fsfilt_ext3_dquot,
2058 .fs_journal_sbdev = fsfilt_ext3_journal_sbdev,
2061 static int __init fsfilt_ext3_init(void)
2065 fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
2066 sizeof(struct fsfilt_cb_data), 0, 0);
2068 CERROR("error allocating fsfilt journal callback cache\n");
2069 GOTO(out, rc = -ENOMEM);
2072 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
2075 int err = cfs_mem_cache_destroy(fcb_cache);
2076 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
2082 static void __exit fsfilt_ext3_exit(void)
2086 fsfilt_unregister_ops(&fsfilt_ext3_ops);
2087 rc = cfs_mem_cache_destroy(fcb_cache);
2088 LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
2091 module_init(fsfilt_ext3_init);
2092 module_exit(fsfilt_ext3_exit);
2094 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2095 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
2096 MODULE_LICENSE("GPL");