1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lib/fsfilt_ext3.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_FILTER
28 #include <linux/init.h>
29 #include <linux/module.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #include <linux/bitops.h>
39 #include <linux/quota.h>
40 #include <linux/quotaio_v1.h>
41 #include <linux/quotaio_v2.h>
42 #include <ext3/xattr.h>
44 #include <libcfs/kp30.h>
45 #include <lustre_fsfilt.h>
47 #include <obd_class.h>
48 #include <lustre_quota.h>
49 #include <linux/lustre_compat25.h>
50 #include <linux/lprocfs_status.h>
52 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
53 #include <linux/ext3_extents.h>
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
57 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS
58 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS
60 #define FSFILT_DATA_TRANS_BLOCKS(sb) EXT3_DATA_TRANS_BLOCKS(sb)
61 #define FSFILT_DELETE_TRANS_BLOCKS(sb) EXT3_DELETE_TRANS_BLOCKS(sb)
64 #define fsfilt_ext3_journal_start(inode, nblocks) ext3_journal_start(inode, nblocks)
65 #define fsfilt_ext3_journal_stop(handle) ext3_journal_stop(handle)
67 static cfs_mem_cache_t *fcb_cache;
69 struct fsfilt_cb_data {
70 struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
71 fsfilt_cb_t cb_func; /* MDS/OBD completion function */
72 struct obd_device *cb_obd; /* MDS/OBD completion device */
73 __u64 cb_last_rcvd; /* MDS/OST last committed operation */
74 void *cb_data; /* MDS/OST completion function data */
77 #ifndef EXT3_XATTR_INDEX_TRUSTED /* temporary until we hit l28 kernel */
78 #define EXT3_XATTR_INDEX_TRUSTED 4
81 static char *fsfilt_ext3_get_label(struct super_block *sb)
83 return EXT3_SB(sb)->s_es->s_volume_name;
86 static int fsfilt_ext3_set_label(struct super_block *sb, char *label)
88 /* see e.g. fsfilt_ext3_write_record() */
93 journal = EXT3_SB(sb)->s_journal;
94 handle = journal_start(journal, 1);
96 CERROR("can't start transaction\n");
97 return(PTR_ERR(handle));
100 err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
104 memcpy(EXT3_SB(sb)->s_es->s_volume_name, label,
105 sizeof(EXT3_SB(sb)->s_es->s_volume_name));
107 err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
110 journal_stop(handle);
115 static char *fsfilt_ext3_uuid(struct super_block *sb)
117 return EXT3_SB(sb)->s_es->s_uuid;
120 #ifdef HAVE_DISK_INODE_VERSION
122 * Get the 64-bit version for an inode.
124 static __u64 fsfilt_ext3_get_version(struct inode *inode)
126 return EXT3_I(inode)->i_fs_version;
130 * Set the 64-bit version and return the old version.
132 static __u64 fsfilt_ext3_set_version(struct inode *inode, __u64 new_version)
134 __u64 old_version = EXT3_I(inode)->i_fs_version;
136 (EXT3_I(inode))->i_fs_version = new_version;
143 * We don't currently need any additional blocks for rmdir and
144 * unlink transactions because we are storing the OST oa_id inside
145 * the inode (which we will be changing anyways as part of this
148 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
151 /* For updates to the last received file */
152 int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
156 if (current->journal_info) {
157 CDEBUG(D_INODE, "increasing refcount on %p\n",
158 current->journal_info);
163 case FSFILT_OP_RMDIR:
164 case FSFILT_OP_UNLINK:
165 /* delete one file + create/update logs for each stripe */
166 nblocks += FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
167 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
168 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
170 case FSFILT_OP_RENAME:
171 /* modify additional directory */
172 nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
174 case FSFILT_OP_SYMLINK:
175 /* additional block + block bitmap + GDT for long symlink */
178 case FSFILT_OP_CREATE: {
179 #if defined(EXT3_EXTENTS_FL) && defined(EXT3_INDEX_FL)
182 if (!test_opt(inode->i_sb, EXTENTS)) {
184 } else if (((EXT3_I(inode)->i_flags &
185 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL)) ==
186 cpu_to_le32(EXT3_EXTENTS_FL | EXT3_INDEX_FL))) {
187 CWARN("extent-mapped directory found - contact "
188 "CFS: support@clusterfs.com\n");
195 case FSFILT_OP_MKDIR:
196 case FSFILT_OP_MKNOD:
197 /* modify one inode + block bitmap + GDT */
201 /* modify parent directory */
202 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
203 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
204 /* create/update logs for each stripe */
205 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
206 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
208 case FSFILT_OP_SETATTR:
209 /* Setattr on inode */
211 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
212 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
213 /* quota chown log for each stripe */
214 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
215 EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
217 case FSFILT_OP_CANCEL_UNLINK:
218 /* blocks for log header bitmap update OR
219 * blocks for catalog header bitmap update + unlink of logs */
220 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
221 FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb) * logs;
224 /* delete 2 file(file + array id) + create 1 file (array id)
225 * create/update logs for each stripe */
226 nblocks += 2 * FSFILT_DELETE_TRANS_BLOCKS(inode->i_sb);
228 /*create array log for head file*/
230 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
231 EXT3_SINGLEDATA_TRANS_BLOCKS);
232 /*update head file array */
233 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
234 FSFILT_DATA_TRANS_BLOCKS(inode->i_sb);
236 default: CERROR("unknown transaction start op %d\n", op);
240 LASSERT(current->journal_info == desc_private);
241 journal = EXT3_SB(inode->i_sb)->s_journal;
242 if (nblocks > journal->j_max_transaction_buffers) {
243 CWARN("too many credits %d for op %ux%u using %d instead\n",
244 nblocks, op, logs, journal->j_max_transaction_buffers);
245 nblocks = journal->j_max_transaction_buffers;
249 LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
250 handle = fsfilt_ext3_journal_start(inode, nblocks);
253 LASSERT(current->journal_info == handle);
255 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
256 op, nblocks, PTR_ERR(handle));
261 * Calculate the number of buffer credits needed to write multiple pages in
262 * a single ext3 transaction. No, this shouldn't be here, but as yet ext3
263 * doesn't have a nice API for calculating this sort of thing in advance.
265 * See comment above ext3_writepage_trans_blocks for details. We assume
266 * no data journaling is being done, but it does allow for all of the pages
267 * being non-contiguous. If we are guaranteed contiguous pages we could
268 * reduce the number of (d)indirect blocks a lot.
270 * With N blocks per page and P pages, for each inode we have at most:
272 * min(N*P, blocksize/4 + 1) dindirect blocks
275 * For the entire filesystem, we have at most:
276 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
277 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
278 * objcount inode blocks
280 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
282 * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
284 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
285 int niocount, struct niobuf_local *nb)
287 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
289 const int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
290 int nbitmaps = 0, ngdblocks;
291 int needed = objcount + 1; /* inodes + superblock */
294 for (i = 0, j = 0; i < objcount; i++, fso++) {
295 /* two or more dindirect blocks in case we cross boundary */
296 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
298 sb->s_blocksize_bits) /
299 (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
300 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
302 /* leaf, indirect, tindirect blocks for first block */
303 nbitmaps += blockpp + 2;
305 j += fso->fso_bufcnt;
308 next_indir = nb[0].offset +
309 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
310 for (i = 1; i < niocount; i++) {
311 if (nb[i].offset >= next_indir) {
312 nbitmaps++; /* additional indirect */
313 next_indir = nb[i].offset +
314 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
315 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
316 nbitmaps++; /* additional indirect */
318 nbitmaps += blockpp; /* each leaf in different group? */
321 ngdblocks = nbitmaps;
322 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
323 nbitmaps = EXT3_SB(sb)->s_groups_count;
324 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
325 ngdblocks = EXT3_SB(sb)->s_gdb_count;
327 needed += nbitmaps + ngdblocks;
329 /* last_rcvd update */
330 needed += FSFILT_DATA_TRANS_BLOCKS(sb);
332 #if defined(CONFIG_QUOTA)
333 /* We assume that there will be 1 bit set in s_dquot.flags for each
334 * quota file that is active. This is at least true for now.
336 needed += hweight32(sb_any_quota_enabled(sb)) *
337 EXT3_SINGLEDATA_TRANS_BLOCKS;
343 /* We have to start a huge journal transaction here to hold all of the
344 * metadata for the pages being written here. This is necessitated by
345 * the fact that we do lots of prepare_write operations before we do
346 * any of the matching commit_write operations, so even if we split
347 * up to use "smaller" transactions none of them could complete until
348 * all of them were opened. By having a single journal transaction,
349 * we eliminate duplicate reservations for common blocks like the
350 * superblock and group descriptors or bitmaps.
352 * We will start the transaction here, but each prepare_write will
353 * add a refcount to the transaction, and each commit_write will
354 * remove a refcount. The transaction will be closed when all of
355 * the pages have been written.
357 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
358 int niocount, struct niobuf_local *nb,
359 void *desc_private, int logs)
366 LASSERT(current->journal_info == desc_private);
367 journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
368 needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
370 /* The number of blocks we could _possibly_ dirty can very large.
371 * We reduce our request if it is absurd (and we couldn't get that
372 * many credits for a single handle anyways).
374 * At some point we have to limit the size of I/Os sent at one time,
375 * increase the size of the journal, or we have to calculate the
376 * actual journal requirements more carefully by checking all of
377 * the blocks instead of being maximally pessimistic. It remains to
378 * be seen if this is a real problem or not.
380 if (needed > journal->j_max_transaction_buffers) {
381 CERROR("want too many journal credits (%d) using %d instead\n",
382 needed, journal->j_max_transaction_buffers);
383 needed = journal->j_max_transaction_buffers;
386 LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
387 handle = fsfilt_ext3_journal_start(fso->fso_dentry->d_inode, needed);
388 if (IS_ERR(handle)) {
389 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
392 LASSERT(handle->h_buffer_credits >= needed);
393 LASSERT(current->journal_info == handle);
399 static int fsfilt_ext3_extend(struct inode *inode, unsigned int nblocks,void *h)
401 handle_t *handle = h;
403 /* fsfilt_extend called with nblocks = 0 for testing in special cases */
405 handle->h_buffer_credits = 0;
406 CWARN("setting credits of handle %p to zero by request\n", h);
409 if (handle->h_buffer_credits > nblocks)
411 if (journal_extend(handle, nblocks) == 0)
414 ext3_mark_inode_dirty(handle, inode);
415 return journal_restart(handle, nblocks);
418 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
421 handle_t *handle = h;
423 LASSERT(current->journal_info == handle);
425 handle->h_sync = 1; /* recovery likes this */
427 rc = fsfilt_ext3_journal_stop(handle);
432 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
436 transaction_t *transaction;
437 handle_t *handle = h;
441 LASSERT(current->journal_info == handle);
443 transaction = handle->h_transaction;
444 journal = transaction->t_journal;
445 tid = transaction->t_tid;
446 /* we don't want to be blocked */
448 rc = fsfilt_ext3_journal_stop(handle);
450 CERROR("error while stopping transaction: %d\n", rc);
453 log_start_commit(journal, tid);
455 *wait_handle = (void *) tid;
456 CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
460 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
462 journal_t *journal = EXT3_JOURNAL(inode);
463 tid_t tid = (tid_t)(long)h;
465 CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
466 if (unlikely(is_journal_aborted(journal)))
469 log_wait_commit(EXT3_JOURNAL(inode), tid);
471 if (unlikely(is_journal_aborted(journal)))
476 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
477 struct iattr *iattr, int do_trunc)
479 struct inode *inode = dentry->d_inode;
484 /* Avoid marking the inode dirty on the superblock list unnecessarily.
485 * We are already writing the inode to disk as part of this
486 * transaction and want to avoid a lot of extra inode writeout
487 * later on. b=9828 */
488 if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
489 /* ATTR_SIZE would invoke truncate: clear it */
490 iattr->ia_valid &= ~ATTR_SIZE;
491 EXT3_I(inode)->i_disksize = iattr->ia_size;
492 i_size_write(inode, iattr->ia_size);
494 if (iattr->ia_valid & ATTR_UID)
495 inode->i_uid = iattr->ia_uid;
496 if (iattr->ia_valid & ATTR_GID)
497 inode->i_gid = iattr->ia_gid;
498 if (iattr->ia_valid & ATTR_ATIME)
499 inode->i_atime = iattr->ia_atime;
500 if (iattr->ia_valid & ATTR_MTIME)
501 inode->i_mtime = iattr->ia_mtime;
502 if (iattr->ia_valid & ATTR_CTIME)
503 inode->i_ctime = iattr->ia_ctime;
504 if (iattr->ia_valid & ATTR_MODE) {
505 inode->i_mode = iattr->ia_mode;
507 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
508 inode->i_mode &= ~S_ISGID;
511 inode->i_sb->s_op->dirty_inode(inode);
516 /* Don't allow setattr to change file type */
517 if (iattr->ia_valid & ATTR_MODE)
518 iattr->ia_mode = (inode->i_mode & S_IFMT) |
519 (iattr->ia_mode & ~S_IFMT);
521 /* We set these flags on the client, but have already checked perms
522 * so don't confuse inode_change_ok. */
523 iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
525 if (inode->i_op->setattr) {
526 rc = inode->i_op->setattr(dentry, iattr);
528 rc = inode_change_ok(inode, iattr);
530 rc = inode_setattr(inode, iattr);
538 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
539 unsigned int cmd, unsigned long arg)
544 /* FIXME: Can't do this because of nested transaction deadlock */
545 if (cmd == EXT3_IOC_SETFLAGS && (*(int *)arg) & EXT3_JOURNAL_DATA_FL) {
546 CERROR("can't set data journal flag on file\n");
550 if (inode->i_fop->ioctl)
551 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
558 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
559 void *lmm, int lmm_size, const char *name)
563 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
565 rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
566 name, lmm, lmm_size, 0);
570 CERROR("error adding MD data to inode %lu: rc = %d\n",
575 /* Must be called with i_mutex held */
576 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size,
581 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
583 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
584 name, lmm, lmm_size);
586 /* This gives us the MD size */
588 return (rc == -ENODATA) ? 0 : rc;
591 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
592 EXT3_XATTR_INDEX_TRUSTED, name,
594 memset(lmm, 0, lmm_size);
595 return (rc == -ENODATA) ? 0 : rc;
601 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
607 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
610 struct inode *inode = file->f_dentry->d_inode;
613 if (S_ISREG(inode->i_mode))
614 rc = file->f_op->read(file, buf, count, off);
616 const int blkbits = inode->i_sb->s_blocksize_bits;
617 const int blksize = inode->i_sb->s_blocksize;
619 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
620 count, inode->i_ino, *off);
622 struct buffer_head *bh;
625 if (*off < i_size_read(inode)) {
628 bh = ext3_bread(NULL, inode, *off >> blkbits,
631 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
634 memcpy(buf, bh->b_data, blksize);
637 /* XXX in theory we should just fake
638 * this buffer and continue like ext3,
639 * especially if this is a partial read
641 CERROR("error read dir %lu+%llu: %d\n",
642 inode->i_ino, *off, err);
647 struct ext3_dir_entry_2 *fake = (void *)buf;
649 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
650 memset(fake, 0, sizeof(*fake));
651 fake->rec_len = cpu_to_le16(blksize);
663 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
665 struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
667 fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
669 OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
672 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
673 void *handle, fsfilt_cb_t cb_func,
676 struct fsfilt_cb_data *fcb;
678 OBD_SLAB_ALLOC(fcb, fcb_cache, CFS_ALLOC_IO, sizeof *fcb);
682 fcb->cb_func = cb_func;
684 fcb->cb_last_rcvd = last_rcvd;
685 fcb->cb_data = cb_data;
687 CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
688 journal_callback_set(handle, fsfilt_ext3_cb_func,
689 (struct journal_callback *)fcb);
695 * We need to hack the return value for the free inode counts because
696 * the current EA code requires one filesystem block per inode with EAs,
697 * so it is possible to run out of blocks before we run out of inodes.
699 * This can be removed when the ext3 EA code is fixed.
701 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
706 memset(&sfs, 0, sizeof(sfs));
708 rc = ll_do_statfs(sb, &sfs);
710 if (!rc && sfs.f_bfree < sfs.f_ffree) {
711 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
712 sfs.f_ffree = sfs.f_bfree;
715 statfs_pack(osfs, &sfs);
719 static int fsfilt_ext3_sync(struct super_block *sb)
721 return ext3_force_commit(sb);
724 #if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))
725 #warning "kernel code has old extents/mballoc patch, disabling"
726 #undef EXT3_MULTIBLOCK_ALLOCATOR
728 #ifndef EXT3_EXTENTS_FL
729 #define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
732 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
733 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
734 #define ext3_up_truncate_sem(inode) up(&EXT3_I(inode)->truncate_sem);
735 #define ext3_down_truncate_sem(inode) down(&EXT3_I(inode)->truncate_sem);
737 #define ext3_up_truncate_sem(inode) mutex_unlock(&EXT3_I(inode)->truncate_mutex);
738 #define ext3_down_truncate_sem(inode) mutex_lock(&EXT3_I(inode)->truncate_mutex);
741 #include <linux/lustre_version.h>
742 #if EXT3_EXT_MAGIC == 0xf301
743 #define ee_start e_start
744 #define ee_block e_block
747 #ifndef EXT3_BB_MAX_BLOCKS
748 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
749 ext3_new_blocks(handle, inode, count, goal, err)
753 unsigned long *blocks;
761 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
762 unsigned long block, int *aflags)
764 struct ext3_inode_info *ei = EXT3_I(inode);
765 unsigned long bg_start;
766 unsigned long colour;
770 struct ext3_extent *ex;
771 depth = path->p_depth;
773 /* try to predict block placement */
774 if ((ex = path[depth].p_ext)) {
776 /* This prefers to eat into a contiguous extent
777 * rather than find an extent that the whole
778 * request will fit into. This can fragment data
779 * block allocation and prevents our lovely 1M I/Os
780 * from reaching the disk intact. */
781 if (ex->ee_block + ex->ee_len == block)
784 return ex->ee_start + (block - ex->ee_block);
787 /* it looks index is empty
788 * try to find starting from index itself */
789 if (path[depth].p_bh)
790 return path[depth].p_bh->b_blocknr;
793 /* OK. use inode's group */
794 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
795 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
796 colour = (current->pid % 16) *
797 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
798 return bg_start + colour + block;
801 #define ll_unmap_underlying_metadata(sb, blocknr) \
802 unmap_underlying_metadata((sb)->s_bdev, blocknr)
804 #ifndef EXT3_MB_HINT_GROUP_ALLOC
805 static unsigned long new_blocks(handle_t *handle, struct ext3_extents_tree *tree,
806 struct ext3_ext_path *path, unsigned long block,
807 int *count, int *err)
809 unsigned long pblock, goal;
812 goal = ext3_ext_find_goal(tree->inode, path, block, &aflags);
813 aflags |= 2; /* block have been already reserved */
814 pblock = ext3_mb_new_blocks(handle, tree->inode, goal, count, aflags, err);
819 static unsigned long new_blocks(handle_t *handle, struct ext3_extents_tree *tree,
820 struct ext3_ext_path *path, unsigned long block,
821 int *count, int *err)
823 struct ext3_allocation_request ar;
824 unsigned long pblock;
827 /* find neighbour allocated blocks */
829 *err = ext3_ext_search_left(tree, path, &ar.lleft, &ar.pleft);
833 *err = ext3_ext_search_right(tree, path, &ar.lright, &ar.pright);
837 /* allocate new block */
838 ar.goal = ext3_ext_find_goal(tree->inode, path, block, &aflags);
839 ar.inode = tree->inode;
842 ar.flags = EXT3_MB_HINT_DATA;
843 pblock = ext3_mb_new_blocks(handle, &ar, err);
850 static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
851 struct ext3_ext_path *path,
852 struct ext3_ext_cache *cex)
854 struct inode *inode = tree->inode;
855 struct bpointers *bp = tree->private;
856 struct ext3_extent nex;
857 unsigned long pblock;
863 EXT_ASSERT(i == path->p_depth);
864 EXT_ASSERT(path[i].p_hdr);
866 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
871 if (bp->create == 0) {
873 if (cex->ec_block < bp->start)
874 i = bp->start - cex->ec_block;
875 if (i >= cex->ec_len)
876 CERROR("nothing to do?! i = %d, e_num = %u\n",
878 for (; i < cex->ec_len && bp->num; i++) {
890 tgen = EXT_GENERATION(tree);
891 count = ext3_ext_calc_credits_for_insert(tree, path);
892 ext3_up_truncate_sem(inode);
894 handle = fsfilt_ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
895 if (IS_ERR(handle)) {
896 ext3_down_truncate_sem(inode);
897 return PTR_ERR(handle);
900 ext3_down_truncate_sem(inode);
901 if (tgen != EXT_GENERATION(tree)) {
902 /* the tree has changed. so path can be invalid at moment */
903 fsfilt_ext3_journal_stop(handle);
908 pblock = new_blocks(handle, tree, path, cex->ec_block, &count, &err);
911 EXT_ASSERT(count <= cex->ec_len);
913 /* insert new extent */
914 nex.ee_block = cex->ec_block;
915 nex.ee_start = pblock;
917 err = ext3_ext_insert_extent(handle, tree, path, &nex);
919 CERROR("can't insert extent: %d\n", err);
920 /* XXX: export ext3_free_blocks() */
921 /*ext3_free_blocks(handle, inode, nex.ee_start, nex.ee_len, 0);*/
926 * Putting len of the actual extent we just inserted,
927 * we are asking ext3_ext_walk_space() to continue
928 * scaning after that block
930 cex->ec_len = nex.ee_len;
931 cex->ec_start = nex.ee_start;
932 BUG_ON(nex.ee_len == 0);
933 BUG_ON(nex.ee_block != cex->ec_block);
936 fsfilt_ext3_journal_stop(handle);
941 CERROR("hmm. why do we find this extent?\n");
942 CERROR("initial space: %lu:%u\n",
943 bp->start, bp->init_num);
944 CERROR("current extent: %u/%u/%u %d\n",
945 cex->ec_block, cex->ec_len,
946 cex->ec_start, cex->ec_type);
949 if (cex->ec_block < bp->start)
950 i = bp->start - cex->ec_block;
951 if (i >= cex->ec_len)
952 CERROR("nothing to do?! i = %d, e_num = %u\n",
954 for (; i < cex->ec_len && bp->num; i++) {
955 *(bp->blocks) = cex->ec_start + i;
956 if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
960 /* unmap any possible underlying metadata from
961 * the block device mapping. bug 6998. */
962 ll_unmap_underlying_metadata(inode->i_sb,
974 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
975 unsigned long num, unsigned long *blocks,
976 int *created, int create)
978 struct ext3_extents_tree tree;
982 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
983 block, block + num - 1, (unsigned) inode->i_ino);
985 ext3_init_tree_desc(&tree, inode);
988 bp.created = created;
990 bp.init_num = bp.num = num;
993 ext3_down_truncate_sem(inode);
994 err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
995 ext3_ext_invalidate_cache(&tree);
996 ext3_up_truncate_sem(inode);
1001 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
1002 int pages, unsigned long *blocks,
1003 int *created, int create)
1005 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1007 struct page *fp = NULL;
1010 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1011 inode->i_ino, pages, (*page)->index);
1013 /* pages are sorted already. so, we just have to find
1014 * contig. space and process them properly */
1017 /* start new extent */
1022 } else if (fp->index + clen == (*page)->index) {
1023 /* continue the extent */
1030 /* process found extent */
1031 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1032 clen * blocks_per_page, blocks,
1037 /* look for next extent */
1039 blocks += blocks_per_page * clen;
1040 created += blocks_per_page * clen;
1044 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1045 clen * blocks_per_page, blocks,
1050 #endif /* EXT3_MULTIBLOCK_ALLOCATOR */
1052 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
1053 unsigned long *blocks, int *created, int create);
1054 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
1055 int pages, unsigned long *blocks,
1056 int *created, int create)
1058 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
1062 for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
1063 rc = ext3_map_inode_page(inode, *page, b, cr, create);
1065 CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
1066 inode->i_ino, *b, *cr, create, rc);
1070 b += blocks_per_page;
1071 cr += blocks_per_page;
1076 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
1077 int pages, unsigned long *blocks,
1078 int *created, int create,
1079 struct semaphore *optional_sem)
1082 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
1083 if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
1084 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
1085 blocks, created, create);
1089 if (optional_sem != NULL)
1091 rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
1093 if (optional_sem != NULL)
1099 int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
1101 unsigned long block;
1102 struct buffer_head *bh;
1103 int err, blocksize, csize, boffs, osize = size;
1105 /* prevent reading after eof */
1107 if (i_size_read(inode) < *offs + size) {
1108 size = i_size_read(inode) - *offs;
1111 CERROR("size %llu is too short for read %u@%llu\n",
1112 i_size_read(inode), size, *offs);
1114 } else if (size == 0) {
1121 blocksize = 1 << inode->i_blkbits;
1124 block = *offs >> inode->i_blkbits;
1125 boffs = *offs & (blocksize - 1);
1126 csize = min(blocksize - boffs, size);
1127 bh = ext3_bread(NULL, inode, block, 0, &err);
1129 CERROR("can't read block: %d\n", err);
1133 memcpy(buf, bh->b_data + boffs, csize);
1142 EXPORT_SYMBOL(fsfilt_ext3_read);
1144 static int fsfilt_ext3_read_record(struct file * file, void *buf,
1145 int size, loff_t *offs)
1148 rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
1154 int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
1155 loff_t *offs, handle_t *handle)
1157 struct buffer_head *bh = NULL;
1158 loff_t old_size = i_size_read(inode), offset = *offs;
1159 loff_t new_size = i_size_read(inode);
1160 unsigned long block;
1161 int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
1163 while (bufsize > 0) {
1167 block = offset >> inode->i_blkbits;
1168 boffs = offset & (blocksize - 1);
1169 size = min(blocksize - boffs, bufsize);
1170 bh = ext3_bread(handle, inode, block, 1, &err);
1172 CERROR("can't read/create block: %d\n", err);
1176 err = ext3_journal_get_write_access(handle, bh);
1178 CERROR("journal_get_write_access() returned error %d\n",
1182 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1183 memcpy(bh->b_data + boffs, buf, size);
1184 err = ext3_journal_dirty_metadata(handle, bh);
1186 CERROR("journal_dirty_metadata() returned error %d\n",
1190 if (offset + size > new_size)
1191 new_size = offset + size;
1199 /* correct in-core and on-disk sizes */
1200 if (new_size > i_size_read(inode)) {
1202 if (new_size > i_size_read(inode))
1203 i_size_write(inode, new_size);
1204 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
1205 EXT3_I(inode)->i_disksize = i_size_read(inode);
1206 if (i_size_read(inode) > old_size)
1207 mark_inode_dirty(inode);
1215 EXPORT_SYMBOL(fsfilt_ext3_write_handle);
1217 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1218 loff_t *offs, int force_sync)
1220 struct inode *inode = file->f_dentry->d_inode;
1222 int err, block_count = 0, blocksize;
1224 /* Determine how many transaction credits are needed */
1225 blocksize = 1 << inode->i_blkbits;
1226 block_count = (*offs & (blocksize - 1)) + bufsize;
1227 block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1229 handle = fsfilt_ext3_journal_start(inode,
1230 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
1231 if (IS_ERR(handle)) {
1232 CERROR("can't start transaction for %d blocks (%d bytes)\n",
1233 block_count * FSFILT_DATA_TRANS_BLOCKS(inode->i_sb) + 2, bufsize);
1234 return PTR_ERR(handle);
1237 err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
1239 if (!err && force_sync)
1240 handle->h_sync = 1; /* recovery likes this */
1242 fsfilt_ext3_journal_stop(handle);
1247 static int fsfilt_ext3_setup(struct super_block *sb)
1250 EXT3_SB(sb)->dx_lock = fsfilt_ext3_dx_lock;
1251 EXT3_SB(sb)->dx_unlock = fsfilt_ext3_dx_unlock;
1254 CWARN("Enabling PDIROPS\n");
1255 set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
1256 sb->s_flags |= S_PDIROPS;
1258 if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
1259 CWARN("filesystem doesn't have dir_index feature enabled\n");
1260 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)) && HAVE_QUOTA_SUPPORT
1261 set_opt(EXT3_SB(sb)->s_mount_opt, QUOTA);
1266 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1267 objects. Logs is number of logfiles to update */
1268 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1272 case FSFILT_OP_CREATE:
1273 /* directory leaf, index & indirect & EA*/
1274 return 4 + 3 * logs;
1275 case FSFILT_OP_UNLINK:
1281 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1282 int blockpp = 1 << (CFS_PAGE_SHIFT - sb->s_blocksize_bits);
1283 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1284 for (i = 0; i < op; i++, fso++) {
1285 int nblocks = fso->fso_bufcnt * blockpp;
1286 int ndindirect = min(nblocks, addrpp + 1);
1287 int nindir = nblocks + ndindirect + 1;
1291 return needed + 3 * logs;
1297 static const char *op_quotafile[] = { "lquota.user", "lquota.group" };
1299 #define DQINFO_COPY(out, in) \
1301 Q_COPY(out, in, dqi_bgrace); \
1302 Q_COPY(out, in, dqi_igrace); \
1303 Q_COPY(out, in, dqi_flags); \
1304 Q_COPY(out, in, dqi_valid); \
1307 #define DQBLK_COPY(out, in) \
1309 Q_COPY(out, in, dqb_bhardlimit); \
1310 Q_COPY(out, in, dqb_bsoftlimit); \
1311 Q_COPY(out, in, dqb_curspace); \
1312 Q_COPY(out, in, dqb_ihardlimit); \
1313 Q_COPY(out, in, dqb_isoftlimit); \
1314 Q_COPY(out, in, dqb_curinodes); \
1315 Q_COPY(out, in, dqb_btime); \
1316 Q_COPY(out, in, dqb_itime); \
1317 Q_COPY(out, in, dqb_valid); \
1322 static int fsfilt_ext3_quotactl(struct super_block *sb,
1323 struct obd_quotactl *oqc)
1325 int i, rc = 0, error = 0;
1326 struct quotactl_ops *qcop;
1327 struct if_dqinfo *info;
1328 struct if_dqblk *dqblk;
1334 OBD_ALLOC_PTR(info);
1337 OBD_ALLOC_PTR(dqblk);
1343 DQINFO_COPY(info, &oqc->qc_dqinfo);
1344 DQBLK_COPY(dqblk, &oqc->qc_dqblk);
1347 if (oqc->qc_cmd == Q_QUOTAON || oqc->qc_cmd == Q_QUOTAOFF) {
1348 for (i = 0; i < MAXQUOTAS; i++) {
1349 if (!Q_TYPESET(oqc, i))
1352 if (oqc->qc_cmd == Q_QUOTAON) {
1353 if (!qcop->quota_on)
1354 GOTO(out, rc = -ENOSYS);
1355 rc = qcop->quota_on(sb, i, oqc->qc_id,
1356 (char *)op_quotafile[i]);
1357 } else if (oqc->qc_cmd == Q_QUOTAOFF) {
1358 if (!qcop->quota_off)
1359 GOTO(out, rc = -ENOSYS);
1360 rc = qcop->quota_off(sb, i);
1368 GOTO(out, rc ?: error);
1371 switch (oqc->qc_cmd) {
1374 if (!qcop->get_info)
1375 GOTO(out, rc = -ENOSYS);
1376 rc = qcop->get_info(sb, oqc->qc_type, info);
1380 if (!qcop->set_dqblk)
1381 GOTO(out, rc = -ENOSYS);
1382 rc = qcop->set_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1386 if (!qcop->get_dqblk)
1387 GOTO(out, rc = -ENOSYS);
1388 rc = qcop->get_dqblk(sb, oqc->qc_type, oqc->qc_id, dqblk);
1391 if (!sb->s_qcop->quota_sync)
1392 GOTO(out, rc = -ENOSYS);
1393 qcop->quota_sync(sb, oqc->qc_type);
1396 CERROR("unsupported quotactl command: %d", oqc->qc_cmd);
1400 DQINFO_COPY(&oqc->qc_dqinfo, info);
1401 DQBLK_COPY(&oqc->qc_dqblk, dqblk);
1404 OBD_FREE_PTR(dqblk);
1407 CDEBUG(D_QUOTA, "quotactl command %#x, id %u, type %d "
1409 oqc->qc_cmd, oqc->qc_id, oqc->qc_type, rc);
1414 struct hlist_node dqb_hash; /* quotacheck hash */
1415 struct list_head dqb_list; /* in list also */
1416 qid_t dqb_id; /* uid/gid */
1417 short dqb_type; /* USRQUOTA/GRPQUOTA */
1418 __u32 dqb_bhardlimit; /* block hard limit */
1419 __u32 dqb_bsoftlimit; /* block soft limit */
1420 qsize_t dqb_curspace; /* current space */
1421 __u32 dqb_ihardlimit; /* inode hard limit */
1422 __u32 dqb_isoftlimit; /* inode soft limit */
1423 __u32 dqb_curinodes; /* current inodes */
1424 __u64 dqb_btime; /* block grace time */
1425 __u64 dqb_itime; /* inode grace time */
1426 __u32 dqb_valid; /* flag for above fields */
1429 static inline unsigned int chkquot_hash(qid_t id, int type)
1430 __attribute__((__const__));
1432 static inline unsigned int chkquot_hash(qid_t id, int type)
1434 return (id * (MAXQUOTAS - type)) % NR_DQHASH;
1437 static inline struct chk_dqblk *
1438 find_chkquot(struct hlist_head *head, qid_t id, int type)
1440 struct hlist_node *node;
1441 struct chk_dqblk *cdqb;
1443 hlist_for_each(node, head) {
1444 cdqb = hlist_entry(node, struct chk_dqblk, dqb_hash);
1445 if (cdqb->dqb_id == id && cdqb->dqb_type == type)
1452 static struct chk_dqblk *alloc_chkquot(qid_t id, int type)
1454 struct chk_dqblk *cdqb;
1456 OBD_ALLOC_PTR(cdqb);
1458 INIT_HLIST_NODE(&cdqb->dqb_hash);
1459 INIT_LIST_HEAD(&cdqb->dqb_list);
1461 cdqb->dqb_type = type;
1467 static struct chk_dqblk *
1468 cqget(struct super_block *sb, struct hlist_head *hash, struct list_head *list,
1469 qid_t id, int type, int first_check)
1471 struct hlist_head *head = hash + chkquot_hash(id, type);
1472 struct if_dqblk dqb;
1473 struct chk_dqblk *cdqb;
1476 cdqb = find_chkquot(head, id, type);
1480 cdqb = alloc_chkquot(id, type);
1485 rc = sb->s_qcop->get_dqblk(sb, type, id, &dqb);
1487 CERROR("get_dqblk of id %u, type %d failed: %d\n",
1490 DQBLK_COPY(cdqb, &dqb);
1491 cdqb->dqb_curspace = 0;
1492 cdqb->dqb_curinodes = 0;
1496 hlist_add_head(&cdqb->dqb_hash, head);
1497 list_add_tail(&cdqb->dqb_list, list);
1502 static inline int quota_onoff(struct super_block *sb, int cmd, int type)
1504 struct obd_quotactl *oqctl;
1507 OBD_ALLOC_PTR(oqctl);
1511 oqctl->qc_cmd = cmd;
1512 oqctl->qc_id = QFMT_LDISKFS;
1513 oqctl->qc_type = type;
1514 rc = fsfilt_ext3_quotactl(sb, oqctl);
1516 OBD_FREE_PTR(oqctl);
1520 static inline int read_old_dqinfo(struct super_block *sb, int type,
1521 struct if_dqinfo *dqinfo)
1523 struct obd_quotactl *oqctl;
1527 OBD_ALLOC_PTR(oqctl);
1531 oqctl->qc_cmd = Q_GETINFO;
1532 oqctl->qc_type = type;
1533 rc = fsfilt_ext3_quotactl(sb, oqctl);
1535 ((struct obd_dqinfo *)dqinfo)[type] = oqctl->qc_dqinfo;
1537 OBD_FREE_PTR(oqctl);
1541 static inline struct ext3_group_desc *
1542 get_group_desc(struct super_block *sb, int group)
1544 unsigned long desc_block, desc;
1545 struct ext3_group_desc *gdp;
1547 desc_block = group / EXT3_DESC_PER_BLOCK(sb);
1548 desc = group % EXT3_DESC_PER_BLOCK(sb);
1549 gdp = (struct ext3_group_desc *)
1550 EXT3_SB(sb)->s_group_desc[desc_block]->b_data;
1555 static inline struct buffer_head *
1556 read_inode_bitmap(struct super_block *sb, unsigned long group)
1558 struct ext3_group_desc *desc;
1559 struct buffer_head *bh;
1561 desc = get_group_desc(sb, group);
1562 bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
1567 static inline struct inode *ext3_iget_inuse(struct super_block *sb,
1568 struct buffer_head *bitmap_bh,
1569 int index, unsigned long ino)
1571 struct inode *inode = NULL;
1573 if (ext3_test_bit(index, bitmap_bh->b_data))
1574 inode = iget(sb, ino);
1580 struct hlist_head qckt_hash[NR_DQHASH]; /* quotacheck hash */
1581 struct list_head qckt_list; /* quotacheck list */
1582 int qckt_first_check[MAXQUOTAS]; /* 1 if no old quotafile */
1583 struct if_dqinfo qckt_dqinfo[MAXQUOTAS]; /* old dqinfo */
1586 static int add_inode_quota(struct inode *inode, struct qchk_ctxt *qctxt,
1587 struct obd_quotactl *oqc)
1589 struct chk_dqblk *cdqb[MAXQUOTAS] = { NULL, };
1591 qid_t qid[MAXQUOTAS];
1597 qid[USRQUOTA] = inode->i_uid;
1598 qid[GRPQUOTA] = inode->i_gid;
1600 if (S_ISDIR(inode->i_mode) ||
1601 S_ISREG(inode->i_mode) ||
1602 S_ISLNK(inode->i_mode))
1603 size = inode_get_bytes(inode);
1605 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1606 if (!Q_TYPESET(oqc, cnt))
1609 cdqb[cnt] = cqget(inode->i_sb, qctxt->qckt_hash,
1610 &qctxt->qckt_list, qid[cnt], cnt,
1611 qctxt->qckt_first_check[cnt]);
1617 cdqb[cnt]->dqb_curspace += size;
1618 cdqb[cnt]->dqb_curinodes++;
1622 for (i = 0; i < cnt; i++) {
1623 if (!Q_TYPESET(oqc, i))
1626 cdqb[i]->dqb_curspace -= size;
1627 cdqb[i]->dqb_curinodes--;
1634 static int v2_write_dqheader(struct file *f, int type)
1636 static const __u32 quota_magics[] = V2_INITQMAGICS;
1637 static const __u32 quota_versions[] = V2_INITQVERSIONS;
1638 struct v2_disk_dqheader dqhead;
1641 CLASSERT(ARRAY_SIZE(quota_magics) == ARRAY_SIZE(quota_versions));
1642 LASSERT(0 <= type && type < ARRAY_SIZE(quota_magics));
1644 dqhead.dqh_magic = cpu_to_le32(quota_magics[type]);
1645 dqhead.dqh_version = cpu_to_le32(quota_versions[type]);
1647 return cfs_user_write(f, (char *)&dqhead, sizeof(dqhead), &offset);
1650 /* write dqinfo struct in a new quota file */
1651 static int v2_write_dqinfo(struct file *f, int type, struct if_dqinfo *info)
1653 struct v2_disk_dqinfo dqinfo;
1654 __u32 blocks = V2_DQTREEOFF + 1;
1655 loff_t offset = V2_DQINFOOFF;
1658 dqinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
1659 dqinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
1660 dqinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK &
1663 dqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1664 dqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1665 dqinfo.dqi_flags = 0;
1668 dqinfo.dqi_blocks = cpu_to_le32(blocks);
1669 dqinfo.dqi_free_blk = 0;
1670 dqinfo.dqi_free_entry = 0;
1672 return cfs_user_write(f, (char *)&dqinfo, sizeof(dqinfo), &offset);
1675 static int create_new_quota_files(struct qchk_ctxt *qctxt,
1676 struct obd_quotactl *oqc)
1681 for (i = 0; i < MAXQUOTAS; i++) {
1682 struct if_dqinfo *info = qctxt->qckt_first_check[i]?
1683 NULL : &qctxt->qckt_dqinfo[i];
1686 if (!Q_TYPESET(oqc, i))
1689 file = filp_open(op_quotafile[i], O_RDWR | O_CREAT | O_TRUNC,
1693 CERROR("can't create %s file: rc = %d\n",
1694 op_quotafile[i], rc);
1698 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1699 CERROR("file %s is not regular", op_quotafile[i]);
1700 filp_close(file, 0);
1701 GOTO(out, rc = -EINVAL);
1704 rc = v2_write_dqheader(file, i);
1706 filp_close(file, 0);
1710 rc = v2_write_dqinfo(file, i, info);
1711 filp_close(file, 0);
1721 static int commit_chkquot(struct super_block *sb, struct qchk_ctxt *qctxt,
1722 struct chk_dqblk *cdqb)
1724 struct obd_quotactl *oqc;
1733 now = CURRENT_SECONDS;
1735 if (cdqb->dqb_bsoftlimit &&
1736 toqb(cdqb->dqb_curspace) >= cdqb->dqb_bsoftlimit &&
1739 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_bgrace;
1741 if (cdqb->dqb_isoftlimit &&
1742 cdqb->dqb_curinodes >= cdqb->dqb_isoftlimit &&
1745 now + qctxt->qckt_dqinfo[cdqb->dqb_type].dqi_igrace;
1747 cdqb->dqb_valid = QIF_ALL;
1749 oqc->qc_cmd = Q_SETQUOTA;
1750 oqc->qc_type = cdqb->dqb_type;
1751 oqc->qc_id = cdqb->dqb_id;
1752 DQBLK_COPY(&oqc->qc_dqblk, cdqb);
1754 rc = fsfilt_ext3_quotactl(sb, oqc);
1759 static int prune_chkquots(struct super_block *sb,
1760 struct qchk_ctxt *qctxt, int error)
1762 struct chk_dqblk *cdqb, *tmp;
1765 list_for_each_entry_safe(cdqb, tmp, &qctxt->qckt_list, dqb_list) {
1767 rc = commit_chkquot(sb, qctxt, cdqb);
1771 hlist_del_init(&cdqb->dqb_hash);
1772 list_del(&cdqb->dqb_list);
1779 static int fsfilt_ext3_quotacheck(struct super_block *sb,
1780 struct obd_quotactl *oqc)
1782 struct ext3_sb_info *sbi = EXT3_SB(sb);
1784 struct qchk_ctxt *qctxt;
1785 struct buffer_head *bitmap_bh = NULL;
1787 struct inode *inode;
1791 /* turn on quota and read dqinfo if existed */
1792 OBD_ALLOC_PTR(qctxt);
1794 oqc->qc_stat = -ENOMEM;
1798 for (i = 0; i < NR_DQHASH; i++)
1799 INIT_HLIST_HEAD(&qctxt->qckt_hash[i]);
1800 INIT_LIST_HEAD(&qctxt->qckt_list);
1802 for (i = 0; i < MAXQUOTAS; i++) {
1803 if (!Q_TYPESET(oqc, i))
1806 rc = quota_onoff(sb, Q_QUOTAON, i);
1807 if (!rc || rc == -EBUSY) {
1808 rc = read_old_dqinfo(sb, i, qctxt->qckt_dqinfo);
1811 } else if (rc == -ENOENT) {
1812 qctxt->qckt_first_check[i] = 1;
1818 /* check quota and update in hash */
1819 for (group = 0; group < sbi->s_groups_count; group++) {
1820 ino = group * sbi->s_inodes_per_group + 1;
1821 bitmap_bh = read_inode_bitmap(sb, group);
1823 CERROR("read_inode_bitmap group %d failed", group);
1824 GOTO(out, rc = -EIO);
1827 for (i = 0; i < sbi->s_inodes_per_group; i++, ino++) {
1828 if (ino < sbi->s_first_ino)
1831 inode = ext3_iget_inuse(sb, bitmap_bh, i, ino);
1832 rc = add_inode_quota(inode, qctxt, oqc);
1843 /* read old quota limits from old quota file. (only for the user
1844 * has limits but hasn't file) */
1845 #ifdef HAVE_QUOTA_SUPPORT
1846 for (i = 0; i < MAXQUOTAS; i++) {
1847 struct list_head id_list;
1848 struct dquot_id *dqid, *tmp;
1850 if (!Q_TYPESET(oqc, i))
1853 if (qctxt->qckt_first_check[i])
1857 LASSERT(sb_dqopt(sb)->files[i] != NULL);
1858 INIT_LIST_HEAD(&id_list);
1859 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1860 rc = lustre_get_qids(sb_dqopt(sb)->files[i], NULL, i, &id_list);
1862 rc = lustre_get_qids(NULL, sb_dqopt(sb)->files[i], i, &id_list);
1865 CERROR("read old limits failed. (rc:%d)\n", rc);
1867 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1868 list_del_init(&dqid->di_link);
1871 cqget(sb, qctxt->qckt_hash, &qctxt->qckt_list,
1873 qctxt->qckt_first_check[i]);
1878 /* turn off quota cause we are to dump chk_dqblk to files */
1879 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type);
1881 rc = create_new_quota_files(qctxt, oqc);
1885 /* we use vfs functions to set dqblk, so turn quota on */
1886 rc = quota_onoff(sb, Q_QUOTAON, oqc->qc_type);
1888 /* dump and free chk_dqblk */
1889 rc = prune_chkquots(sb, qctxt, rc);
1890 OBD_FREE_PTR(qctxt);
1892 /* turn off quota, `lfs quotacheck` will turn on when all
1893 * nodes quotacheck finish. */
1894 quota_onoff(sb, Q_QUOTAOFF, oqc->qc_type);
1898 CERROR("quotacheck failed: rc = %d\n", rc);
1903 #ifdef HAVE_QUOTA_SUPPORT
1904 static int fsfilt_ext3_quotainfo(struct lustre_quota_info *lqi, int type,
1910 if (lqi->qi_files[type] == NULL) {
1911 CERROR("operate qinfo before it's enabled!\n");
1917 rc = lustre_check_quota_file(lqi, type);
1920 rc = lustre_read_quota_info(lqi, type);
1923 rc = lustre_write_quota_info(lqi, type);
1925 case QFILE_INIT_INFO:
1926 rc = lustre_init_quota_info(lqi, type);
1929 CERROR("Unsupported admin quota file cmd %d\n", cmd);
1936 static int fsfilt_ext3_qids(struct file *file, struct inode *inode, int type,
1937 struct list_head *list)
1939 return lustre_get_qids(file, inode, type, list);
1942 static int fsfilt_ext3_dquot(struct lustre_dquot *dquot, int cmd)
1947 if (dquot->dq_info->qi_files[dquot->dq_type] == NULL) {
1948 CERROR("operate dquot before it's enabled!\n");
1953 case QFILE_RD_DQUOT:
1954 rc = lustre_read_dquot(dquot);
1956 case QFILE_WR_DQUOT:
1957 if (dquot->dq_dqb.dqb_ihardlimit ||
1958 dquot->dq_dqb.dqb_isoftlimit ||
1959 dquot->dq_dqb.dqb_bhardlimit ||
1960 dquot->dq_dqb.dqb_bsoftlimit)
1961 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
1963 set_bit(DQ_FAKE_B, &dquot->dq_flags);
1965 rc = lustre_commit_dquot(dquot);
1970 CERROR("Unsupported admin quota file cmd %d\n", cmd);
1978 lvfs_sbdev_type fsfilt_ext3_journal_sbdev(struct super_block *sb)
1980 return (EXT3_SB(sb)->journal_bdev);
1982 EXPORT_SYMBOL(fsfilt_ext3_journal_sbdev);
1984 static struct fsfilt_operations fsfilt_ext3_ops = {
1986 .fs_owner = THIS_MODULE,
1987 .fs_getlabel = fsfilt_ext3_get_label,
1988 .fs_setlabel = fsfilt_ext3_set_label,
1989 .fs_uuid = fsfilt_ext3_uuid,
1990 .fs_start = fsfilt_ext3_start,
1991 .fs_brw_start = fsfilt_ext3_brw_start,
1992 .fs_extend = fsfilt_ext3_extend,
1993 .fs_commit = fsfilt_ext3_commit,
1994 .fs_commit_async = fsfilt_ext3_commit_async,
1995 .fs_commit_wait = fsfilt_ext3_commit_wait,
1996 .fs_setattr = fsfilt_ext3_setattr,
1997 .fs_iocontrol = fsfilt_ext3_iocontrol,
1998 .fs_set_md = fsfilt_ext3_set_md,
1999 .fs_get_md = fsfilt_ext3_get_md,
2000 .fs_readpage = fsfilt_ext3_readpage,
2001 .fs_add_journal_cb = fsfilt_ext3_add_journal_cb,
2002 .fs_statfs = fsfilt_ext3_statfs,
2003 .fs_sync = fsfilt_ext3_sync,
2004 .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
2005 .fs_write_record = fsfilt_ext3_write_record,
2006 .fs_read_record = fsfilt_ext3_read_record,
2007 .fs_setup = fsfilt_ext3_setup,
2008 .fs_send_bio = fsfilt_ext3_send_bio,
2009 .fs_get_op_len = fsfilt_ext3_get_op_len,
2010 .fs_quotactl = fsfilt_ext3_quotactl,
2011 .fs_quotacheck = fsfilt_ext3_quotacheck,
2012 #ifdef HAVE_DISK_INODE_VERSION
2013 .fs_get_version = fsfilt_ext3_get_version,
2014 .fs_set_version = fsfilt_ext3_set_version,
2016 #ifdef HAVE_QUOTA_SUPPORT
2017 .fs_quotainfo = fsfilt_ext3_quotainfo,
2018 .fs_qids = fsfilt_ext3_qids,
2019 .fs_dquot = fsfilt_ext3_dquot,
2021 .fs_journal_sbdev = fsfilt_ext3_journal_sbdev,
2024 static int __init fsfilt_ext3_init(void)
2028 fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
2029 sizeof(struct fsfilt_cb_data), 0, 0);
2031 CERROR("error allocating fsfilt journal callback cache\n");
2032 GOTO(out, rc = -ENOMEM);
2035 rc = fsfilt_register_ops(&fsfilt_ext3_ops);
2038 int err = cfs_mem_cache_destroy(fcb_cache);
2039 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
2045 static void __exit fsfilt_ext3_exit(void)
2049 fsfilt_unregister_ops(&fsfilt_ext3_ops);
2050 rc = cfs_mem_cache_destroy(fcb_cache);
2051 LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
2054 module_init(fsfilt_ext3_init);
2055 module_exit(fsfilt_ext3_exit);
2057 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2058 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
2059 MODULE_LICENSE("GPL");