Whamcloud - gitweb
LU-1199 ldiskfs: Remove HAVE_{EXT4,JBD2}_JOURNAL_CALLBACK_{SET,ADD}
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/lvfs/fsfilt_ext3.c
37  *
38  * Author: Andreas Dilger <adilger@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_FILTER
42
43 #include <linux/init.h>
44 #include <linux/module.h>
45 #include <linux/fs.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <ext4/ext4.h>
49 #include <ext4/ext4_jbd2.h>
50 #include <linux/version.h>
51 #include <linux/bitops.h>
52 #include <linux/quota.h>
53
54 #include <libcfs/libcfs.h>
55 #include <lustre_fsfilt.h>
56 #include <obd.h>
57 #include <linux/lustre_compat25.h>
58 #include <linux/lprocfs_status.h>
59
60 #include <ext4/ext4_extents.h>
61
62 /* for kernels 2.6.18 and later */
63 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
64
65 #define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
66                ext3_ext_insert_extent(handle, inode, path, newext, flag)
67
68 #define ext3_mb_discard_inode_preallocations(inode) \
69                  ext3_discard_preallocations(inode)
70
71 #define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
72 #define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
73
74 static cfs_mem_cache_t *fcb_cache;
75
76 struct fsfilt_cb_data {
77         struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
78         fsfilt_cb_t cb_func;            /* MDS/OBD completion function */
79         struct obd_device *cb_obd;      /* MDS/OBD completion device */
80         __u64 cb_last_rcvd;             /* MDS/OST last committed operation */
81         void *cb_data;                  /* MDS/OST completion function data */
82 };
83
84 static char *fsfilt_ext3_get_label(struct super_block *sb)
85 {
86         return EXT3_SB(sb)->s_es->s_volume_name;
87 }
88
89 /* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
90 #ifdef HAVE_BLOCKS_FOR_TRUNCATE
91 # include <ext4/truncate.h>
92 #else
93 static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
94 {
95         ext4_lblk_t needed;
96
97         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
98         if (needed < 2)
99                 needed = 2;
100         if (needed > EXT4_MAX_TRANS_DATA)
101                 needed = EXT4_MAX_TRANS_DATA;
102         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
103 }
104 #endif
105
106 /*
107  * We don't currently need any additional blocks for rmdir and
108  * unlink transactions because we are storing the OST oa_id inside
109  * the inode (which we will be changing anyways as part of this
110  * transaction).
111  */
112 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
113                                int logs)
114 {
115         /* For updates to the last received file */
116         int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
117         journal_t *journal;
118         void *handle;
119
120         if (current->journal_info) {
121                 CDEBUG(D_INODE, "increasing refcount on %p\n",
122                        current->journal_info);
123                 goto journal_start;
124         }
125
126         switch(op) {
127         case FSFILT_OP_UNLINK:
128                 /* delete one file + create/update logs for each stripe */
129                 nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
130                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
131                             FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
132                 break;
133         case FSFILT_OP_CANCEL_UNLINK:
134                 LASSERT(logs == 1);
135
136                 /* blocks for log header bitmap update OR
137                  * blocks for catalog header bitmap update + unlink of logs +
138                  * blocks for delete the inode (include blocks truncating). */
139                 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
140                           EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
141                           ext4_blocks_for_truncate(inode) + 3;
142                 break;
143         default: CERROR("unknown transaction start op %d\n", op);
144                 LBUG();
145         }
146
147         LASSERT(current->journal_info == desc_private);
148         journal = EXT3_SB(inode->i_sb)->s_journal;
149         if (nblocks > journal->j_max_transaction_buffers) {
150                 CWARN("too many credits %d for op %ux%u using %d instead\n",
151                        nblocks, op, logs, journal->j_max_transaction_buffers);
152                 nblocks = journal->j_max_transaction_buffers;
153         }
154
155  journal_start:
156         LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
157         handle = ext3_journal_start(inode, nblocks);
158
159         if (!IS_ERR(handle))
160                 LASSERT(current->journal_info == handle);
161         else
162                 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
163                        op, nblocks, PTR_ERR(handle));
164         return handle;
165 }
166
167 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
168 {
169         int rc;
170         handle_t *handle = h;
171
172         LASSERT(current->journal_info == handle);
173         if (force_sync)
174                 handle->h_sync = 1; /* recovery likes this */
175
176         rc = ext3_journal_stop(handle);
177
178         return rc;
179 }
180
181 #ifndef EXT3_EXTENTS_FL
182 #define EXT3_EXTENTS_FL                 0x00080000 /* Inode uses extents */
183 #endif
184
185 #ifndef EXT_ASSERT
186 #define EXT_ASSERT(cond)  BUG_ON(!(cond))
187 #endif
188
189 #define EXT_GENERATION(inode)           (EXT4_I(inode)->i_ext_generation)
190 #define ext3_ext_base                   inode
191 #define ext3_ext_base2inode(inode)      (inode)
192 #define EXT_DEPTH(inode)                ext_depth(inode)
193 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
194                         ext3_ext_walk_space(inode, block, num, cb, cbdata);
195
196 struct bpointers {
197         unsigned long *blocks;
198         int *created;
199         unsigned long start;
200         int num;
201         int init_num;
202         int create;
203 };
204
205 static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
206                                unsigned long block, int *aflags)
207 {
208         struct ext3_inode_info *ei = EXT3_I(inode);
209         unsigned long bg_start;
210         unsigned long colour;
211         int depth;
212
213         if (path) {
214                 struct ext3_extent *ex;
215                 depth = path->p_depth;
216
217                 /* try to predict block placement */
218                 if ((ex = path[depth].p_ext))
219                         return ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
220
221                 /* it looks index is empty
222                  * try to find starting from index itself */
223                 if (path[depth].p_bh)
224                         return path[depth].p_bh->b_blocknr;
225         }
226
227         /* OK. use inode's group */
228         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
229                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
230         colour = (current->pid % 16) *
231                 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
232         return bg_start + colour + block;
233 }
234
235 #define ll_unmap_underlying_metadata(sb, blocknr) \
236         unmap_underlying_metadata((sb)->s_bdev, blocknr)
237
238 #ifndef EXT3_MB_HINT_GROUP_ALLOC
239 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
240                                 struct ext3_ext_path *path, unsigned long block,
241                                 unsigned long *count, int *err)
242 {
243         unsigned long pblock, goal;
244         int aflags = 0;
245         struct inode *inode = ext3_ext_base2inode(base);
246
247         goal = ext3_ext_find_goal(inode, path, block, &aflags);
248         aflags |= 2; /* block have been already reserved */
249         pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
250         return pblock;
251
252 }
253 #else
254 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
255                                 struct ext3_ext_path *path, unsigned long block,
256                                 unsigned long *count, int *err)
257 {
258         struct inode *inode = ext3_ext_base2inode(base);
259         struct ext3_allocation_request ar;
260         unsigned long pblock;
261         int aflags;
262
263         /* find neighbour allocated blocks */
264         ar.lleft = block;
265         *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
266         if (*err)
267                 return 0;
268         ar.lright = block;
269         *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
270         if (*err)
271                 return 0;
272
273         /* allocate new block */
274         ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
275         ar.inode = inode;
276         ar.logical = block;
277         ar.len = *count;
278         ar.flags = EXT3_MB_HINT_DATA;
279         pblock = ext3_mb_new_blocks(handle, &ar, err);
280         *count = ar.len;
281         return pblock;
282 }
283 #endif
284
285 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
286                                   struct ext3_ext_path *path,
287                                   struct ext3_ext_cache *cex,
288 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
289                                    struct ext3_extent *ex,
290 #endif
291                                   void *cbdata)
292 {
293         struct bpointers *bp = cbdata;
294         struct inode *inode = ext3_ext_base2inode(base);
295         struct ext3_extent nex;
296         unsigned long pblock;
297         unsigned long tgen;
298         int err, i;
299         unsigned long count;
300         handle_t *handle;
301
302         if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
303                 err = EXT_CONTINUE;
304                 goto map;
305         }
306
307         if (bp->create == 0) {
308                 i = 0;
309                 if (cex->ec_block < bp->start)
310                         i = bp->start - cex->ec_block;
311                 if (i >= cex->ec_len)
312                         CERROR("nothing to do?! i = %d, e_num = %u\n",
313                                         i, cex->ec_len);
314                 for (; i < cex->ec_len && bp->num; i++) {
315                         *(bp->created) = 0;
316                         bp->created++;
317                         *(bp->blocks) = 0;
318                         bp->blocks++;
319                         bp->num--;
320                         bp->start++;
321                 }
322
323                 return EXT_CONTINUE;
324         }
325
326         tgen = EXT_GENERATION(base);
327         count = ext3_ext_calc_credits_for_insert(base, path);
328
329         handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
330         if (IS_ERR(handle)) {
331                 return PTR_ERR(handle);
332         }
333
334         if (tgen != EXT_GENERATION(base)) {
335                 /* the tree has changed. so path can be invalid at moment */
336                 ext3_journal_stop(handle);
337                 return EXT_REPEAT;
338         }
339
340         /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
341          * protected by i_data_sem as whole. so we patch it to store
342          * generation to path and now verify the tree hasn't changed */
343         down_write((&EXT4_I(inode)->i_data_sem));
344
345         /* validate extent, make sure the extent tree does not changed */
346         if (EXT_GENERATION(base) != path[0].p_generation) {
347                 /* cex is invalid, try again */
348                 up_write(&EXT4_I(inode)->i_data_sem);
349                 ext3_journal_stop(handle);
350                 return EXT_REPEAT;
351         }
352
353         count = cex->ec_len;
354         pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
355         if (!pblock)
356                 goto out;
357         EXT_ASSERT(count <= cex->ec_len);
358
359         /* insert new extent */
360         nex.ee_block = cpu_to_le32(cex->ec_block);
361         ext3_ext_store_pblock(&nex, pblock);
362         nex.ee_len = cpu_to_le16(count);
363         err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
364         if (err) {
365                 /* free data blocks we just allocated */
366                 /* not a good idea to call discard here directly,
367                  * but otherwise we'd need to call it every free() */
368 #ifdef EXT3_MB_HINT_GROUP_ALLOC
369                 ext3_mb_discard_inode_preallocations(inode);
370 #endif
371                 ext3_free_blocks(handle, inode, ext_pblock(&nex),
372                                  cpu_to_le16(nex.ee_len), 0);
373                 goto out;
374         }
375
376         /*
377          * Putting len of the actual extent we just inserted,
378          * we are asking ext3_ext_walk_space() to continue
379          * scaning after that block
380          */
381         cex->ec_len = le16_to_cpu(nex.ee_len);
382         cex->ec_start = ext_pblock(&nex);
383         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
384         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
385
386 out:
387         up_write((&EXT4_I(inode)->i_data_sem));
388         ext3_journal_stop(handle);
389 map:
390         if (err >= 0) {
391                 /* map blocks */
392                 if (bp->num == 0) {
393                         CERROR("hmm. why do we find this extent?\n");
394                         CERROR("initial space: %lu:%u\n",
395                                 bp->start, bp->init_num);
396                         CERROR("current extent: %u/%u/%llu %d\n",
397                                 cex->ec_block, cex->ec_len,
398                                 (unsigned long long)cex->ec_start,
399                                 cex->ec_type);
400                 }
401                 i = 0;
402                 if (cex->ec_block < bp->start)
403                         i = bp->start - cex->ec_block;
404                 if (i >= cex->ec_len)
405                         CERROR("nothing to do?! i = %d, e_num = %u\n",
406                                         i, cex->ec_len);
407                 for (; i < cex->ec_len && bp->num; i++) {
408                         *(bp->blocks) = cex->ec_start + i;
409                         if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
410                                 *(bp->created) = 0;
411                         } else {
412                                 *(bp->created) = 1;
413                                 /* unmap any possible underlying metadata from
414                                  * the block device mapping.  bug 6998. */
415                                 ll_unmap_underlying_metadata(inode->i_sb,
416                                                              *(bp->blocks));
417                         }
418                         bp->created++;
419                         bp->blocks++;
420                         bp->num--;
421                         bp->start++;
422                 }
423         }
424         return err;
425 }
426
427 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
428                        unsigned long num, unsigned long *blocks,
429                        int *created, int create)
430 {
431         struct ext3_ext_base *base = inode;
432         struct bpointers bp;
433         int err;
434
435         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
436                block, block + num - 1, (unsigned) inode->i_ino);
437
438         bp.blocks = blocks;
439         bp.created = created;
440         bp.start = block;
441         bp.init_num = bp.num = num;
442         bp.create = create;
443
444         err = fsfilt_ext3_ext_walk_space(base, block, num,
445                                          ext3_ext_new_extent_cb, &bp);
446         ext3_ext_invalidate_cache(base);
447
448         return err;
449 }
450
451 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
452                                     int pages, unsigned long *blocks,
453                                     int *created, int create)
454 {
455         int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
456         int rc = 0, i = 0;
457         struct page *fp = NULL;
458         int clen = 0;
459
460         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
461                 inode->i_ino, pages, (*page)->index);
462
463         /* pages are sorted already. so, we just have to find
464          * contig. space and process them properly */
465         while (i < pages) {
466                 if (fp == NULL) {
467                         /* start new extent */
468                         fp = *page++;
469                         clen = 1;
470                         i++;
471                         continue;
472                 } else if (fp->index + clen == (*page)->index) {
473                         /* continue the extent */
474                         page++;
475                         clen++;
476                         i++;
477                         continue;
478                 }
479
480                 /* process found extent */
481                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
482                                         clen * blocks_per_page, blocks,
483                                         created, create);
484                 if (rc)
485                         GOTO(cleanup, rc);
486
487                 /* look for next extent */
488                 fp = NULL;
489                 blocks += blocks_per_page * clen;
490                 created += blocks_per_page * clen;
491         }
492
493         if (fp)
494                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
495                                         clen * blocks_per_page, blocks,
496                                         created, create);
497 cleanup:
498         return rc;
499 }
500
501 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
502                                unsigned long *blocks, int *created, int create);
503 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
504                                    int pages, unsigned long *blocks,
505                                    int *created, int create)
506 {
507         int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
508         unsigned long *b;
509         int rc = 0, i, *cr;
510
511         for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
512                 rc = ext3_map_inode_page(inode, *page, b, cr, create);
513                 if (rc) {
514                         CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
515                                inode->i_ino, *b, *cr, create, rc);
516                         break;
517                 }
518
519                 b += blocks_per_page;
520                 cr += blocks_per_page;
521         }
522         return rc;
523 }
524
525 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
526                                 int pages, unsigned long *blocks,
527                                 int *created, int create,
528                                 struct mutex *optional_mutex)
529 {
530         int rc;
531
532         if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
533                 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
534                                                      blocks, created, create);
535                 return rc;
536         }
537         if (optional_mutex != NULL)
538                 mutex_lock(optional_mutex);
539         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
540                                             created, create);
541         if (optional_mutex != NULL)
542                 mutex_unlock(optional_mutex);
543
544         return rc;
545 }
546
547 int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
548 {
549         unsigned long block;
550         struct buffer_head *bh;
551         int err, blocksize, csize, boffs, osize = size;
552
553         /* prevent reading after eof */
554         spin_lock(&inode->i_lock);
555         if (i_size_read(inode) < *offs + size) {
556                 size = i_size_read(inode) - *offs;
557                 spin_unlock(&inode->i_lock);
558                 if (size < 0) {
559                         CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
560                                i_size_read(inode), *offs);
561                         return -EBADR;
562                 } else if (size == 0) {
563                         return 0;
564                 }
565         } else {
566                 spin_unlock(&inode->i_lock);
567         }
568
569         blocksize = 1 << inode->i_blkbits;
570
571         while (size > 0) {
572                 block = *offs >> inode->i_blkbits;
573                 boffs = *offs & (blocksize - 1);
574                 csize = min(blocksize - boffs, size);
575                 bh = ext3_bread(NULL, inode, block, 0, &err);
576                 if (!bh) {
577                         CERROR("can't read block: %d\n", err);
578                         return err;
579                 }
580
581                 memcpy(buf, bh->b_data + boffs, csize);
582                 brelse(bh);
583
584                 *offs += csize;
585                 buf += csize;
586                 size -= csize;
587         }
588         return osize;
589 }
590 EXPORT_SYMBOL(fsfilt_ext3_read);
591
592 static int fsfilt_ext3_read_record(struct file * file, void *buf,
593                                    int size, loff_t *offs)
594 {
595         int rc;
596         rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
597         if (rc > 0)
598                 rc = 0;
599         return rc;
600 }
601
602 int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
603                                 loff_t *offs, handle_t *handle)
604 {
605         struct buffer_head *bh = NULL;
606         loff_t old_size = i_size_read(inode), offset = *offs;
607         loff_t new_size = i_size_read(inode);
608         unsigned long block;
609         int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
610
611         while (bufsize > 0) {
612                 if (bh != NULL)
613                         brelse(bh);
614
615                 block = offset >> inode->i_blkbits;
616                 boffs = offset & (blocksize - 1);
617                 size = min(blocksize - boffs, bufsize);
618                 bh = ext3_bread(handle, inode, block, 1, &err);
619                 if (!bh) {
620                         CERROR("can't read/create block: %d\n", err);
621                         break;
622                 }
623
624                 err = ext3_journal_get_write_access(handle, bh);
625                 if (err) {
626                         CERROR("journal_get_write_access() returned error %d\n",
627                                err);
628                         break;
629                 }
630                 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
631                 memcpy(bh->b_data + boffs, buf, size);
632                 err = ext3_journal_dirty_metadata(handle, bh);
633                 if (err) {
634                         CERROR("journal_dirty_metadata() returned error %d\n",
635                                err);
636                         break;
637                 }
638                 if (offset + size > new_size)
639                         new_size = offset + size;
640                 offset += size;
641                 bufsize -= size;
642                 buf += size;
643         }
644         if (bh)
645                 brelse(bh);
646
647         /* correct in-core and on-disk sizes */
648         if (new_size > i_size_read(inode)) {
649                 spin_lock(&inode->i_lock);
650                 if (new_size > i_size_read(inode))
651                         i_size_write(inode, new_size);
652                 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
653                         EXT3_I(inode)->i_disksize = i_size_read(inode);
654                 if (i_size_read(inode) > old_size) {
655                         spin_unlock(&inode->i_lock);
656                         mark_inode_dirty(inode);
657                 } else {
658                         spin_unlock(&inode->i_lock);
659                 }
660         }
661
662         if (err == 0)
663                 *offs = offset;
664         return err;
665 }
666 EXPORT_SYMBOL(fsfilt_ext3_write_handle);
667
668 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
669                                     loff_t *offs, int force_sync)
670 {
671         struct inode *inode = file->f_dentry->d_inode;
672         handle_t *handle;
673         int err, block_count = 0, blocksize;
674
675         /* Determine how many transaction credits are needed */
676         blocksize = 1 << inode->i_blkbits;
677         block_count = (*offs & (blocksize - 1)) + bufsize;
678         block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
679
680         handle = ext3_journal_start(inode,
681                         block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
682         if (IS_ERR(handle)) {
683                 CERROR("can't start transaction for %d blocks (%d bytes)\n",
684                        block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
685                        bufsize);
686                 return PTR_ERR(handle);
687         }
688
689         err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
690
691         if (!err && force_sync)
692                 handle->h_sync = 1; /* recovery likes this */
693
694         ext3_journal_stop(handle);
695
696         return err;
697 }
698
699 static int fsfilt_ext3_setup(struct super_block *sb)
700 {
701         if (!EXT3_HAS_COMPAT_FEATURE(sb,
702                                 EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
703                 CERROR("ext3 mounted without journal\n");
704                 return -EINVAL;
705         }
706
707 #ifdef S_PDIROPS
708         CWARN("Enabling PDIROPS\n");
709         set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
710         sb->s_flags |= S_PDIROPS;
711 #endif
712         if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
713                 CWARN("filesystem doesn't have dir_index feature enabled\n");
714         return 0;
715 }
716 static struct fsfilt_operations fsfilt_ext3_ops = {
717         .fs_type                = "ext3",
718         .fs_owner               = THIS_MODULE,
719         .fs_getlabel            = fsfilt_ext3_get_label,
720         .fs_start               = fsfilt_ext3_start,
721         .fs_commit              = fsfilt_ext3_commit,
722         .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
723         .fs_write_record        = fsfilt_ext3_write_record,
724         .fs_read_record         = fsfilt_ext3_read_record,
725         .fs_setup               = fsfilt_ext3_setup,
726 };
727
728 static int __init fsfilt_ext3_init(void)
729 {
730         int rc;
731
732         fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
733                                          sizeof(struct fsfilt_cb_data), 0, 0);
734         if (!fcb_cache) {
735                 CERROR("error allocating fsfilt journal callback cache\n");
736                 GOTO(out, rc = -ENOMEM);
737         }
738
739         rc = fsfilt_register_ops(&fsfilt_ext3_ops);
740
741         if (rc) {
742                 int err = cfs_mem_cache_destroy(fcb_cache);
743                 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
744         }
745 out:
746         return rc;
747 }
748
749 static void __exit fsfilt_ext3_exit(void)
750 {
751         int rc;
752
753         fsfilt_unregister_ops(&fsfilt_ext3_ops);
754         rc = cfs_mem_cache_destroy(fcb_cache);
755         LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
756 }
757
758 module_init(fsfilt_ext3_init);
759 module_exit(fsfilt_ext3_exit);
760
761 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
762 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
763 MODULE_LICENSE("GPL");