Whamcloud - gitweb
LU-1346 libcfs: replace cfs_ memory wrappers
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/lvfs/fsfilt_ext3.c
37  *
38  * Author: Andreas Dilger <adilger@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_FILTER
42
43 #include <linux/init.h>
44 #include <linux/module.h>
45 #include <linux/fs.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <ldiskfs/ldiskfs_config.h>
49 #include <ext4/ext4.h>
50 #include <ext4/ext4_jbd2.h>
51 #include <linux/version.h>
52 #include <linux/bitops.h>
53 #include <linux/quota.h>
54
55 #include <libcfs/libcfs.h>
56 #include <lustre_fsfilt.h>
57 #include <obd.h>
58 #include <linux/lustre_compat25.h>
59 #include <linux/lprocfs_status.h>
60
61 #include <ext4/ext4_extents.h>
62
63 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
64 #define ext3_ext_pblock(ex) ext_pblock((ex))
65 #endif
66
67 /* for kernels 2.6.18 and later */
68 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
69
70 #define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
71                ext3_ext_insert_extent(handle, inode, path, newext, flag)
72
73 #define ext3_mb_discard_inode_preallocations(inode) \
74                  ext3_discard_preallocations(inode)
75
76 #define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
77 #define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
78
79 static struct kmem_cache *fcb_cache;
80
81 struct fsfilt_cb_data {
82         struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
83         fsfilt_cb_t cb_func;            /* MDS/OBD completion function */
84         struct obd_device *cb_obd;      /* MDS/OBD completion device */
85         __u64 cb_last_rcvd;             /* MDS/OST last committed operation */
86         void *cb_data;                  /* MDS/OST completion function data */
87 };
88
89 static char *fsfilt_ext3_get_label(struct super_block *sb)
90 {
91         return EXT3_SB(sb)->s_es->s_volume_name;
92 }
93
94 /* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
95 #ifdef HAVE_BLOCKS_FOR_TRUNCATE
96 # include <ext4/truncate.h>
97 #else
98 static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
99 {
100         ext4_lblk_t needed;
101
102         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
103         if (needed < 2)
104                 needed = 2;
105         if (needed > EXT4_MAX_TRANS_DATA)
106                 needed = EXT4_MAX_TRANS_DATA;
107         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
108 }
109 #endif
110
111 /*
112  * We don't currently need any additional blocks for rmdir and
113  * unlink transactions because we are storing the OST oa_id inside
114  * the inode (which we will be changing anyways as part of this
115  * transaction).
116  */
117 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
118                                int logs)
119 {
120         /* For updates to the last received file */
121         int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
122         journal_t *journal;
123         void *handle;
124
125         if (current->journal_info) {
126                 CDEBUG(D_INODE, "increasing refcount on %p\n",
127                        current->journal_info);
128                 goto journal_start;
129         }
130
131         switch(op) {
132         case FSFILT_OP_UNLINK:
133                 /* delete one file + create/update logs for each stripe */
134                 nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
135                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
136                             FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
137                 break;
138         case FSFILT_OP_CANCEL_UNLINK:
139                 LASSERT(logs == 1);
140
141                 /* blocks for log header bitmap update OR
142                  * blocks for catalog header bitmap update + unlink of logs +
143                  * blocks for delete the inode (include blocks truncating). */
144                 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
145                           EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
146                           ext4_blocks_for_truncate(inode) + 3;
147                 break;
148         default: CERROR("unknown transaction start op %d\n", op);
149                 LBUG();
150         }
151
152         LASSERT(current->journal_info == desc_private);
153         journal = EXT3_SB(inode->i_sb)->s_journal;
154         if (nblocks > journal->j_max_transaction_buffers) {
155                 CWARN("too many credits %d for op %ux%u using %d instead\n",
156                        nblocks, op, logs, journal->j_max_transaction_buffers);
157                 nblocks = journal->j_max_transaction_buffers;
158         }
159
160  journal_start:
161         LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
162         handle = ext3_journal_start(inode, nblocks);
163
164         if (!IS_ERR(handle))
165                 LASSERT(current->journal_info == handle);
166         else
167                 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
168                        op, nblocks, PTR_ERR(handle));
169         return handle;
170 }
171
172 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
173 {
174         int rc;
175         handle_t *handle = h;
176
177         LASSERT(current->journal_info == handle);
178         if (force_sync)
179                 handle->h_sync = 1; /* recovery likes this */
180
181         rc = ext3_journal_stop(handle);
182
183         return rc;
184 }
185
186 #ifndef EXT3_EXTENTS_FL
187 #define EXT3_EXTENTS_FL                 0x00080000 /* Inode uses extents */
188 #endif
189
190 #ifndef EXT_ASSERT
191 #define EXT_ASSERT(cond)  BUG_ON(!(cond))
192 #endif
193
194 #define EXT_GENERATION(inode)           (EXT4_I(inode)->i_ext_generation)
195 #define ext3_ext_base                   inode
196 #define ext3_ext_base2inode(inode)      (inode)
197 #define EXT_DEPTH(inode)                ext_depth(inode)
198 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
199                         ext3_ext_walk_space(inode, block, num, cb, cbdata);
200
201 struct bpointers {
202         unsigned long *blocks;
203         unsigned long start;
204         int num;
205         int init_num;
206         int create;
207 };
208
209 static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
210                                unsigned long block, int *aflags)
211 {
212         struct ext3_inode_info *ei = EXT3_I(inode);
213         unsigned long bg_start;
214         unsigned long colour;
215         int depth;
216
217         if (path) {
218                 struct ext3_extent *ex;
219                 depth = path->p_depth;
220
221                 /* try to predict block placement */
222                 if ((ex = path[depth].p_ext))
223                         return ext4_ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
224
225                 /* it looks index is empty
226                  * try to find starting from index itself */
227                 if (path[depth].p_bh)
228                         return path[depth].p_bh->b_blocknr;
229         }
230
231         /* OK. use inode's group */
232         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
233                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
234         colour = (current->pid % 16) *
235                 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
236         return bg_start + colour + block;
237 }
238
239 #define ll_unmap_underlying_metadata(sb, blocknr) \
240         unmap_underlying_metadata((sb)->s_bdev, blocknr)
241
242 #ifndef EXT3_MB_HINT_GROUP_ALLOC
243 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
244                                 struct ext3_ext_path *path, unsigned long block,
245                                 unsigned long *count, int *err)
246 {
247         unsigned long pblock, goal;
248         int aflags = 0;
249         struct inode *inode = ext3_ext_base2inode(base);
250
251         goal = ext3_ext_find_goal(inode, path, block, &aflags);
252         aflags |= 2; /* block have been already reserved */
253         pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
254         return pblock;
255
256 }
257 #else
258 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
259                                 struct ext3_ext_path *path, unsigned long block,
260                                 unsigned long *count, int *err)
261 {
262         struct inode *inode = ext3_ext_base2inode(base);
263         struct ext3_allocation_request ar;
264         unsigned long pblock;
265         int aflags;
266
267         /* find neighbour allocated blocks */
268         ar.lleft = block;
269         *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
270         if (*err)
271                 return 0;
272         ar.lright = block;
273         *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
274         if (*err)
275                 return 0;
276
277         /* allocate new block */
278         ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
279         ar.inode = inode;
280         ar.logical = block;
281         ar.len = *count;
282         ar.flags = EXT3_MB_HINT_DATA;
283         pblock = ext3_mb_new_blocks(handle, &ar, err);
284         *count = ar.len;
285         return pblock;
286 }
287 #endif
288
289 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
290                                   struct ext3_ext_path *path,
291                                   struct ext3_ext_cache *cex,
292 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
293                                    struct ext3_extent *ex,
294 #endif
295                                   void *cbdata)
296 {
297         struct bpointers *bp = cbdata;
298         struct inode *inode = ext3_ext_base2inode(base);
299         struct ext3_extent nex;
300         unsigned long pblock;
301         unsigned long tgen;
302         int err, i;
303         unsigned long count;
304         handle_t *handle;
305
306 #ifdef EXT3_EXT_CACHE_EXTENT
307         if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
308 #else
309         if ((cex->ec_len != 0) && (cex->ec_start != 0))
310 #endif
311                                                    {
312                 err = EXT_CONTINUE;
313                 goto map;
314         }
315
316         if (bp->create == 0) {
317                 i = 0;
318                 if (cex->ec_block < bp->start)
319                         i = bp->start - cex->ec_block;
320                 if (i >= cex->ec_len)
321                         CERROR("nothing to do?! i = %d, e_num = %u\n",
322                                         i, cex->ec_len);
323                 for (; i < cex->ec_len && bp->num; i++) {
324                         *(bp->blocks) = 0;
325                         bp->blocks++;
326                         bp->num--;
327                         bp->start++;
328                 }
329
330                 return EXT_CONTINUE;
331         }
332
333         tgen = EXT_GENERATION(base);
334         count = ext3_ext_calc_credits_for_insert(base, path);
335
336         handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
337         if (IS_ERR(handle)) {
338                 return PTR_ERR(handle);
339         }
340
341         if (tgen != EXT_GENERATION(base)) {
342                 /* the tree has changed. so path can be invalid at moment */
343                 ext3_journal_stop(handle);
344                 return EXT_REPEAT;
345         }
346
347         /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
348          * protected by i_data_sem as whole. so we patch it to store
349          * generation to path and now verify the tree hasn't changed */
350         down_write((&EXT4_I(inode)->i_data_sem));
351
352         /* validate extent, make sure the extent tree does not changed */
353         if (EXT_GENERATION(base) != path[0].p_generation) {
354                 /* cex is invalid, try again */
355                 up_write(&EXT4_I(inode)->i_data_sem);
356                 ext3_journal_stop(handle);
357                 return EXT_REPEAT;
358         }
359
360         count = cex->ec_len;
361         pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
362         if (!pblock)
363                 goto out;
364         EXT_ASSERT(count <= cex->ec_len);
365
366         /* insert new extent */
367         nex.ee_block = cpu_to_le32(cex->ec_block);
368         ext3_ext_store_pblock(&nex, pblock);
369         nex.ee_len = cpu_to_le16(count);
370         err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
371         if (err) {
372                 /* free data blocks we just allocated */
373                 /* not a good idea to call discard here directly,
374                  * but otherwise we'd need to call it every free() */
375 #ifdef EXT3_MB_HINT_GROUP_ALLOC
376                 ext3_mb_discard_inode_preallocations(inode);
377 #endif
378 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
379                 ext3_free_blocks(handle, inode, NULL, ext4_ext_pblock(&nex),
380                                  cpu_to_le16(nex.ee_len), 0);
381 #else
382                 ext3_free_blocks(handle, inode, ext4_ext_pblock(&nex),
383                                  cpu_to_le16(nex.ee_len), 0);
384 #endif
385                 goto out;
386         }
387
388         /*
389          * Putting len of the actual extent we just inserted,
390          * we are asking ext3_ext_walk_space() to continue
391          * scaning after that block
392          */
393         cex->ec_len = le16_to_cpu(nex.ee_len);
394         cex->ec_start = ext4_ext_pblock(&nex);
395         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
396         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
397
398 out:
399         up_write((&EXT4_I(inode)->i_data_sem));
400         ext3_journal_stop(handle);
401 map:
402         if (err >= 0) {
403                 /* map blocks */
404                 if (bp->num == 0) {
405                         CERROR("hmm. why do we find this extent?\n");
406                         CERROR("initial space: %lu:%u\n",
407                                 bp->start, bp->init_num);
408 #ifdef EXT3_EXT_CACHE_EXTENT
409                         CERROR("current extent: %u/%u/%llu %d\n",
410                                 cex->ec_block, cex->ec_len,
411                                 (unsigned long long)cex->ec_start,
412                                 cex->ec_type);
413 #else
414                         CERROR("current extent: %u/%u/%llu\n",
415                                 cex->ec_block, cex->ec_len,
416                                 (unsigned long long)cex->ec_start);
417 #endif
418                 }
419                 i = 0;
420                 if (cex->ec_block < bp->start)
421                         i = bp->start - cex->ec_block;
422                 if (i >= cex->ec_len)
423                         CERROR("nothing to do?! i = %d, e_num = %u\n",
424                                         i, cex->ec_len);
425                 for (; i < cex->ec_len && bp->num; i++) {
426                         *(bp->blocks) = cex->ec_start + i;
427 #ifdef EXT3_EXT_CACHE_EXTENT
428                         if (cex->ec_type != EXT3_EXT_CACHE_EXTENT)
429 #else
430                         if ((cex->ec_len == 0) || (cex->ec_start == 0))
431 #endif
432                                                                         {
433                                 /* unmap any possible underlying metadata from
434                                  * the block device mapping.  bug 6998. */
435                                 ll_unmap_underlying_metadata(inode->i_sb,
436                                                              *(bp->blocks));
437                         }
438                         bp->blocks++;
439                         bp->num--;
440                         bp->start++;
441                 }
442         }
443         return err;
444 }
445
446 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
447                        unsigned long num, unsigned long *blocks,
448                        int create)
449 {
450         struct ext3_ext_base *base = inode;
451         struct bpointers bp;
452         int err;
453
454         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
455                block, block + num - 1, (unsigned) inode->i_ino);
456
457         bp.blocks = blocks;
458         bp.start = block;
459         bp.init_num = bp.num = num;
460         bp.create = create;
461
462         err = fsfilt_ext3_ext_walk_space(base, block, num,
463                                          ext3_ext_new_extent_cb, &bp);
464         ext3_ext_invalidate_cache(base);
465
466         return err;
467 }
468
469 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
470                                     int pages, unsigned long *blocks,
471                                     int create)
472 {
473         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
474         int rc = 0, i = 0;
475         struct page *fp = NULL;
476         int clen = 0;
477
478         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
479                 inode->i_ino, pages, (*page)->index);
480
481         /* pages are sorted already. so, we just have to find
482          * contig. space and process them properly */
483         while (i < pages) {
484                 if (fp == NULL) {
485                         /* start new extent */
486                         fp = *page++;
487                         clen = 1;
488                         i++;
489                         continue;
490                 } else if (fp->index + clen == (*page)->index) {
491                         /* continue the extent */
492                         page++;
493                         clen++;
494                         i++;
495                         continue;
496                 }
497
498                 /* process found extent */
499                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
500                                         clen * blocks_per_page, blocks,
501                                         create);
502                 if (rc)
503                         GOTO(cleanup, rc);
504
505                 /* look for next extent */
506                 fp = NULL;
507                 blocks += blocks_per_page * clen;
508         }
509
510         if (fp)
511                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
512                                         clen * blocks_per_page, blocks,
513                                         create);
514 cleanup:
515         return rc;
516 }
517
518 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
519                                    int pages, unsigned long *blocks,
520                                    int create)
521 {
522         int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
523         unsigned long *b;
524         int rc = 0, i;
525
526         for (i = 0, b = blocks; i < pages; i++, page++) {
527                 rc = ext3_map_inode_page(inode, *page, b, create);
528                 if (rc) {
529                         CERROR("ino %lu, blk %lu create %d: rc %d\n",
530                                inode->i_ino, *b, create, rc);
531                         break;
532                 }
533
534                 b += blocks_per_page;
535         }
536         return rc;
537 }
538
539 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
540                                 int pages, unsigned long *blocks,
541                                 int create, struct mutex *optional_mutex)
542 {
543         int rc;
544
545         if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
546                 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
547                                                      blocks, create);
548                 return rc;
549         }
550         if (optional_mutex != NULL)
551                 mutex_lock(optional_mutex);
552         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks, create);
553         if (optional_mutex != NULL)
554                 mutex_unlock(optional_mutex);
555
556         return rc;
557 }
558
559 int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
560 {
561         unsigned long block;
562         struct buffer_head *bh;
563         int err, blocksize, csize, boffs, osize = size;
564
565         /* prevent reading after eof */
566         spin_lock(&inode->i_lock);
567         if (i_size_read(inode) < *offs + size) {
568                 size = i_size_read(inode) - *offs;
569                 spin_unlock(&inode->i_lock);
570                 if (size < 0) {
571                         CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
572                                i_size_read(inode), *offs);
573                         return -EBADR;
574                 } else if (size == 0) {
575                         return 0;
576                 }
577         } else {
578                 spin_unlock(&inode->i_lock);
579         }
580
581         blocksize = 1 << inode->i_blkbits;
582
583         while (size > 0) {
584                 block = *offs >> inode->i_blkbits;
585                 boffs = *offs & (blocksize - 1);
586                 csize = min(blocksize - boffs, size);
587                 bh = ext3_bread(NULL, inode, block, 0, &err);
588                 if (!bh) {
589                         CERROR("can't read block: %d\n", err);
590                         return err;
591                 }
592
593                 memcpy(buf, bh->b_data + boffs, csize);
594                 brelse(bh);
595
596                 *offs += csize;
597                 buf += csize;
598                 size -= csize;
599         }
600         return osize;
601 }
602 EXPORT_SYMBOL(fsfilt_ext3_read);
603
604 static int fsfilt_ext3_read_record(struct file * file, void *buf,
605                                    int size, loff_t *offs)
606 {
607         int rc;
608         rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
609         if (rc > 0)
610                 rc = 0;
611         return rc;
612 }
613
614 int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
615                                 loff_t *offs, handle_t *handle)
616 {
617         struct buffer_head *bh = NULL;
618         loff_t old_size = i_size_read(inode), offset = *offs;
619         loff_t new_size = i_size_read(inode);
620         unsigned long block;
621         int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
622
623         while (bufsize > 0) {
624                 if (bh != NULL)
625                         brelse(bh);
626
627                 block = offset >> inode->i_blkbits;
628                 boffs = offset & (blocksize - 1);
629                 size = min(blocksize - boffs, bufsize);
630                 bh = ext3_bread(handle, inode, block, 1, &err);
631                 if (!bh) {
632                         CERROR("can't read/create block: %d\n", err);
633                         break;
634                 }
635
636                 err = ext3_journal_get_write_access(handle, bh);
637                 if (err) {
638                         CERROR("journal_get_write_access() returned error %d\n",
639                                err);
640                         break;
641                 }
642                 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
643                 memcpy(bh->b_data + boffs, buf, size);
644                 err = ext3_journal_dirty_metadata(handle, bh);
645                 if (err) {
646                         CERROR("journal_dirty_metadata() returned error %d\n",
647                                err);
648                         break;
649                 }
650                 if (offset + size > new_size)
651                         new_size = offset + size;
652                 offset += size;
653                 bufsize -= size;
654                 buf += size;
655         }
656         if (bh)
657                 brelse(bh);
658
659         /* correct in-core and on-disk sizes */
660         if (new_size > i_size_read(inode)) {
661                 spin_lock(&inode->i_lock);
662                 if (new_size > i_size_read(inode))
663                         i_size_write(inode, new_size);
664                 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
665                         EXT3_I(inode)->i_disksize = i_size_read(inode);
666                 if (i_size_read(inode) > old_size) {
667                         spin_unlock(&inode->i_lock);
668                         mark_inode_dirty(inode);
669                 } else {
670                         spin_unlock(&inode->i_lock);
671                 }
672         }
673
674         if (err == 0)
675                 *offs = offset;
676         return err;
677 }
678 EXPORT_SYMBOL(fsfilt_ext3_write_handle);
679
680 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
681                                     loff_t *offs, int force_sync)
682 {
683         struct inode *inode = file->f_dentry->d_inode;
684         handle_t *handle;
685         int err, block_count = 0, blocksize;
686
687         /* Determine how many transaction credits are needed */
688         blocksize = 1 << inode->i_blkbits;
689         block_count = (*offs & (blocksize - 1)) + bufsize;
690         block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
691
692         handle = ext3_journal_start(inode,
693                         block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
694         if (IS_ERR(handle)) {
695                 CERROR("can't start transaction for %d blocks (%d bytes)\n",
696                        block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
697                        bufsize);
698                 return PTR_ERR(handle);
699         }
700
701         err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
702
703         if (!err && force_sync)
704                 handle->h_sync = 1; /* recovery likes this */
705
706         ext3_journal_stop(handle);
707
708         return err;
709 }
710
711 static int fsfilt_ext3_setup(struct super_block *sb)
712 {
713         if (!EXT3_HAS_COMPAT_FEATURE(sb,
714                                 EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
715                 CERROR("ext3 mounted without journal\n");
716                 return -EINVAL;
717         }
718
719 #ifdef S_PDIROPS
720         CWARN("Enabling PDIROPS\n");
721         set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
722         sb->s_flags |= S_PDIROPS;
723 #endif
724         if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
725                 CWARN("filesystem doesn't have dir_index feature enabled\n");
726         return 0;
727 }
728 static struct fsfilt_operations fsfilt_ext3_ops = {
729         .fs_type                = "ext3",
730         .fs_owner               = THIS_MODULE,
731         .fs_getlabel            = fsfilt_ext3_get_label,
732         .fs_start               = fsfilt_ext3_start,
733         .fs_commit              = fsfilt_ext3_commit,
734         .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
735         .fs_write_record        = fsfilt_ext3_write_record,
736         .fs_read_record         = fsfilt_ext3_read_record,
737         .fs_setup               = fsfilt_ext3_setup,
738 };
739
740 static int __init fsfilt_ext3_init(void)
741 {
742         int rc;
743
744         fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
745                                       sizeof(struct fsfilt_cb_data),
746                                       0, 0, NULL);
747         if (!fcb_cache) {
748                 CERROR("error allocating fsfilt journal callback cache\n");
749                 GOTO(out, rc = -ENOMEM);
750         }
751
752         rc = fsfilt_register_ops(&fsfilt_ext3_ops);
753
754         if (rc)
755                 kmem_cache_destroy(fcb_cache);
756 out:
757         return rc;
758 }
759
760 static void __exit fsfilt_ext3_exit(void)
761 {
762         fsfilt_unregister_ops(&fsfilt_ext3_ops);
763         kmem_cache_destroy(fcb_cache);
764 }
765
766 module_init(fsfilt_ext3_init);
767 module_exit(fsfilt_ext3_exit);
768
769 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
770 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
771 MODULE_LICENSE("GPL");