Whamcloud - gitweb
LU-3043 build: init local variable that breaks sles11sp2 build
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/lvfs/fsfilt_ext3.c
37  *
38  * Author: Andreas Dilger <adilger@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_FILTER
42
43 #include <linux/init.h>
44 #include <linux/module.h>
45 #include <linux/fs.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <ext4/ext4.h>
49 #include <ext4/ext4_jbd2.h>
50 #include <linux/version.h>
51 #include <linux/bitops.h>
52 #include <linux/quota.h>
53
54 #include <libcfs/libcfs.h>
55 #include <lustre_fsfilt.h>
56 #include <obd.h>
57 #include <linux/lustre_compat25.h>
58 #include <linux/lprocfs_status.h>
59
60 #include <ext4/ext4_extents.h>
61
62 /* for kernels 2.6.18 and later */
63 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
64
65 #define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
66                ext3_ext_insert_extent(handle, inode, path, newext, flag)
67
68 #define ext3_mb_discard_inode_preallocations(inode) \
69                  ext3_discard_preallocations(inode)
70
71 #define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
72 #define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
73
74 static cfs_mem_cache_t *fcb_cache;
75
76 struct fsfilt_cb_data {
77         struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
78         fsfilt_cb_t cb_func;            /* MDS/OBD completion function */
79         struct obd_device *cb_obd;      /* MDS/OBD completion device */
80         __u64 cb_last_rcvd;             /* MDS/OST last committed operation */
81         void *cb_data;                  /* MDS/OST completion function data */
82 };
83
84 static char *fsfilt_ext3_get_label(struct super_block *sb)
85 {
86         return EXT3_SB(sb)->s_es->s_volume_name;
87 }
88
89 /* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
90 #ifdef HAVE_BLOCKS_FOR_TRUNCATE
91 # include <ext4/truncate.h>
92 #else
93 static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
94 {
95         ext4_lblk_t needed;
96
97         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
98         if (needed < 2)
99                 needed = 2;
100         if (needed > EXT4_MAX_TRANS_DATA)
101                 needed = EXT4_MAX_TRANS_DATA;
102         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
103 }
104 #endif
105
106 /*
107  * We don't currently need any additional blocks for rmdir and
108  * unlink transactions because we are storing the OST oa_id inside
109  * the inode (which we will be changing anyways as part of this
110  * transaction).
111  */
112 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
113                                int logs)
114 {
115         /* For updates to the last received file */
116         int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
117         journal_t *journal;
118         void *handle;
119
120         if (current->journal_info) {
121                 CDEBUG(D_INODE, "increasing refcount on %p\n",
122                        current->journal_info);
123                 goto journal_start;
124         }
125
126         switch(op) {
127         case FSFILT_OP_UNLINK:
128                 /* delete one file + create/update logs for each stripe */
129                 nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
130                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
131                             FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
132                 break;
133         case FSFILT_OP_CANCEL_UNLINK:
134                 LASSERT(logs == 1);
135
136                 /* blocks for log header bitmap update OR
137                  * blocks for catalog header bitmap update + unlink of logs +
138                  * blocks for delete the inode (include blocks truncating). */
139                 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
140                           EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
141                           ext4_blocks_for_truncate(inode) + 3;
142                 break;
143         default: CERROR("unknown transaction start op %d\n", op);
144                 LBUG();
145         }
146
147         LASSERT(current->journal_info == desc_private);
148         journal = EXT3_SB(inode->i_sb)->s_journal;
149         if (nblocks > journal->j_max_transaction_buffers) {
150                 CWARN("too many credits %d for op %ux%u using %d instead\n",
151                        nblocks, op, logs, journal->j_max_transaction_buffers);
152                 nblocks = journal->j_max_transaction_buffers;
153         }
154
155  journal_start:
156         LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
157         handle = ext3_journal_start(inode, nblocks);
158
159         if (!IS_ERR(handle))
160                 LASSERT(current->journal_info == handle);
161         else
162                 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
163                        op, nblocks, PTR_ERR(handle));
164         return handle;
165 }
166
167 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
168 {
169         int rc;
170         handle_t *handle = h;
171
172         LASSERT(current->journal_info == handle);
173         if (force_sync)
174                 handle->h_sync = 1; /* recovery likes this */
175
176         rc = ext3_journal_stop(handle);
177
178         return rc;
179 }
180
181 #ifndef EXT3_EXTENTS_FL
182 #define EXT3_EXTENTS_FL                 0x00080000 /* Inode uses extents */
183 #endif
184
185 #ifndef EXT_ASSERT
186 #define EXT_ASSERT(cond)  BUG_ON(!(cond))
187 #endif
188
189 #define EXT_GENERATION(inode)           (EXT4_I(inode)->i_ext_generation)
190 #define ext3_ext_base                   inode
191 #define ext3_ext_base2inode(inode)      (inode)
192 #define EXT_DEPTH(inode)                ext_depth(inode)
193 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
194                         ext3_ext_walk_space(inode, block, num, cb, cbdata);
195
196 struct bpointers {
197         unsigned long *blocks;
198         unsigned long start;
199         int num;
200         int init_num;
201         int create;
202 };
203
204 static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
205                                unsigned long block, int *aflags)
206 {
207         struct ext3_inode_info *ei = EXT3_I(inode);
208         unsigned long bg_start;
209         unsigned long colour;
210         int depth;
211
212         if (path) {
213                 struct ext3_extent *ex;
214                 depth = path->p_depth;
215
216                 /* try to predict block placement */
217                 if ((ex = path[depth].p_ext))
218                         return ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
219
220                 /* it looks index is empty
221                  * try to find starting from index itself */
222                 if (path[depth].p_bh)
223                         return path[depth].p_bh->b_blocknr;
224         }
225
226         /* OK. use inode's group */
227         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
228                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
229         colour = (current->pid % 16) *
230                 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
231         return bg_start + colour + block;
232 }
233
234 #define ll_unmap_underlying_metadata(sb, blocknr) \
235         unmap_underlying_metadata((sb)->s_bdev, blocknr)
236
237 #ifndef EXT3_MB_HINT_GROUP_ALLOC
238 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
239                                 struct ext3_ext_path *path, unsigned long block,
240                                 unsigned long *count, int *err)
241 {
242         unsigned long pblock, goal;
243         int aflags = 0;
244         struct inode *inode = ext3_ext_base2inode(base);
245
246         goal = ext3_ext_find_goal(inode, path, block, &aflags);
247         aflags |= 2; /* block have been already reserved */
248         pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
249         return pblock;
250
251 }
252 #else
253 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
254                                 struct ext3_ext_path *path, unsigned long block,
255                                 unsigned long *count, int *err)
256 {
257         struct inode *inode = ext3_ext_base2inode(base);
258         struct ext3_allocation_request ar;
259         unsigned long pblock;
260         int aflags;
261
262         /* find neighbour allocated blocks */
263         ar.lleft = block;
264         *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
265         if (*err)
266                 return 0;
267         ar.lright = block;
268         *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
269         if (*err)
270                 return 0;
271
272         /* allocate new block */
273         ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
274         ar.inode = inode;
275         ar.logical = block;
276         ar.len = *count;
277         ar.flags = EXT3_MB_HINT_DATA;
278         pblock = ext3_mb_new_blocks(handle, &ar, err);
279         *count = ar.len;
280         return pblock;
281 }
282 #endif
283
284 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
285                                   struct ext3_ext_path *path,
286                                   struct ext3_ext_cache *cex,
287 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
288                                    struct ext3_extent *ex,
289 #endif
290                                   void *cbdata)
291 {
292         struct bpointers *bp = cbdata;
293         struct inode *inode = ext3_ext_base2inode(base);
294         struct ext3_extent nex;
295         unsigned long pblock;
296         unsigned long tgen;
297         int err, i;
298         unsigned long count;
299         handle_t *handle;
300
301 #ifdef EXT3_EXT_CACHE_EXTENT
302         if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
303 #else
304         if ((cex->ec_len != 0) && (cex->ec_start != 0))
305 #endif
306                                                    {
307                 err = EXT_CONTINUE;
308                 goto map;
309         }
310
311         if (bp->create == 0) {
312                 i = 0;
313                 if (cex->ec_block < bp->start)
314                         i = bp->start - cex->ec_block;
315                 if (i >= cex->ec_len)
316                         CERROR("nothing to do?! i = %d, e_num = %u\n",
317                                         i, cex->ec_len);
318                 for (; i < cex->ec_len && bp->num; i++) {
319                         *(bp->blocks) = 0;
320                         bp->blocks++;
321                         bp->num--;
322                         bp->start++;
323                 }
324
325                 return EXT_CONTINUE;
326         }
327
328         tgen = EXT_GENERATION(base);
329         count = ext3_ext_calc_credits_for_insert(base, path);
330
331         handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
332         if (IS_ERR(handle)) {
333                 return PTR_ERR(handle);
334         }
335
336         if (tgen != EXT_GENERATION(base)) {
337                 /* the tree has changed. so path can be invalid at moment */
338                 ext3_journal_stop(handle);
339                 return EXT_REPEAT;
340         }
341
342         /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
343          * protected by i_data_sem as whole. so we patch it to store
344          * generation to path and now verify the tree hasn't changed */
345         down_write((&EXT4_I(inode)->i_data_sem));
346
347         /* validate extent, make sure the extent tree does not changed */
348         if (EXT_GENERATION(base) != path[0].p_generation) {
349                 /* cex is invalid, try again */
350                 up_write(&EXT4_I(inode)->i_data_sem);
351                 ext3_journal_stop(handle);
352                 return EXT_REPEAT;
353         }
354
355         count = cex->ec_len;
356         pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
357         if (!pblock)
358                 goto out;
359         EXT_ASSERT(count <= cex->ec_len);
360
361         /* insert new extent */
362         nex.ee_block = cpu_to_le32(cex->ec_block);
363         ext3_ext_store_pblock(&nex, pblock);
364         nex.ee_len = cpu_to_le16(count);
365         err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
366         if (err) {
367                 /* free data blocks we just allocated */
368                 /* not a good idea to call discard here directly,
369                  * but otherwise we'd need to call it every free() */
370 #ifdef EXT3_MB_HINT_GROUP_ALLOC
371                 ext3_mb_discard_inode_preallocations(inode);
372 #endif
373                 ext3_free_blocks(handle, inode, ext_pblock(&nex),
374                                  cpu_to_le16(nex.ee_len), 0);
375                 goto out;
376         }
377
378         /*
379          * Putting len of the actual extent we just inserted,
380          * we are asking ext3_ext_walk_space() to continue
381          * scaning after that block
382          */
383         cex->ec_len = le16_to_cpu(nex.ee_len);
384         cex->ec_start = ext_pblock(&nex);
385         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
386         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
387
388 out:
389         up_write((&EXT4_I(inode)->i_data_sem));
390         ext3_journal_stop(handle);
391 map:
392         if (err >= 0) {
393                 /* map blocks */
394                 if (bp->num == 0) {
395                         CERROR("hmm. why do we find this extent?\n");
396                         CERROR("initial space: %lu:%u\n",
397                                 bp->start, bp->init_num);
398 #ifdef EXT3_EXT_CACHE_EXTENT
399                         CERROR("current extent: %u/%u/%llu %d\n",
400                                 cex->ec_block, cex->ec_len,
401                                 (unsigned long long)cex->ec_start,
402                                 cex->ec_type);
403 #else
404                         CERROR("current extent: %u/%u/%llu\n",
405                                 cex->ec_block, cex->ec_len,
406                                 (unsigned long long)cex->ec_start);
407 #endif
408                 }
409                 i = 0;
410                 if (cex->ec_block < bp->start)
411                         i = bp->start - cex->ec_block;
412                 if (i >= cex->ec_len)
413                         CERROR("nothing to do?! i = %d, e_num = %u\n",
414                                         i, cex->ec_len);
415                 for (; i < cex->ec_len && bp->num; i++) {
416                         *(bp->blocks) = cex->ec_start + i;
417 #ifdef EXT3_EXT_CACHE_EXTENT
418                         if (cex->ec_type != EXT3_EXT_CACHE_EXTENT)
419 #else
420                         if ((cex->ec_len == 0) || (cex->ec_start == 0))
421 #endif
422                                                                         {
423                                 /* unmap any possible underlying metadata from
424                                  * the block device mapping.  bug 6998. */
425                                 ll_unmap_underlying_metadata(inode->i_sb,
426                                                              *(bp->blocks));
427                         }
428                         bp->blocks++;
429                         bp->num--;
430                         bp->start++;
431                 }
432         }
433         return err;
434 }
435
436 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
437                        unsigned long num, unsigned long *blocks,
438                        int create)
439 {
440         struct ext3_ext_base *base = inode;
441         struct bpointers bp;
442         int err;
443
444         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
445                block, block + num - 1, (unsigned) inode->i_ino);
446
447         bp.blocks = blocks;
448         bp.start = block;
449         bp.init_num = bp.num = num;
450         bp.create = create;
451
452         err = fsfilt_ext3_ext_walk_space(base, block, num,
453                                          ext3_ext_new_extent_cb, &bp);
454         ext3_ext_invalidate_cache(base);
455
456         return err;
457 }
458
459 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
460                                     int pages, unsigned long *blocks,
461                                     int create)
462 {
463         int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
464         int rc = 0, i = 0;
465         struct page *fp = NULL;
466         int clen = 0;
467
468         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
469                 inode->i_ino, pages, (*page)->index);
470
471         /* pages are sorted already. so, we just have to find
472          * contig. space and process them properly */
473         while (i < pages) {
474                 if (fp == NULL) {
475                         /* start new extent */
476                         fp = *page++;
477                         clen = 1;
478                         i++;
479                         continue;
480                 } else if (fp->index + clen == (*page)->index) {
481                         /* continue the extent */
482                         page++;
483                         clen++;
484                         i++;
485                         continue;
486                 }
487
488                 /* process found extent */
489                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
490                                         clen * blocks_per_page, blocks,
491                                         create);
492                 if (rc)
493                         GOTO(cleanup, rc);
494
495                 /* look for next extent */
496                 fp = NULL;
497                 blocks += blocks_per_page * clen;
498         }
499
500         if (fp)
501                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
502                                         clen * blocks_per_page, blocks,
503                                         create);
504 cleanup:
505         return rc;
506 }
507
508 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
509                                    int pages, unsigned long *blocks,
510                                    int create)
511 {
512         int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
513         unsigned long *b;
514         int rc = 0, i;
515
516         for (i = 0, b = blocks; i < pages; i++, page++) {
517                 rc = ext3_map_inode_page(inode, *page, b, create);
518                 if (rc) {
519                         CERROR("ino %lu, blk %lu create %d: rc %d\n",
520                                inode->i_ino, *b, create, rc);
521                         break;
522                 }
523
524                 b += blocks_per_page;
525         }
526         return rc;
527 }
528
529 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
530                                 int pages, unsigned long *blocks,
531                                 int create, struct mutex *optional_mutex)
532 {
533         int rc;
534
535         if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
536                 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
537                                                      blocks, create);
538                 return rc;
539         }
540         if (optional_mutex != NULL)
541                 mutex_lock(optional_mutex);
542         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks, create);
543         if (optional_mutex != NULL)
544                 mutex_unlock(optional_mutex);
545
546         return rc;
547 }
548
549 int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
550 {
551         unsigned long block;
552         struct buffer_head *bh;
553         int err, blocksize, csize, boffs, osize = size;
554
555         /* prevent reading after eof */
556         spin_lock(&inode->i_lock);
557         if (i_size_read(inode) < *offs + size) {
558                 size = i_size_read(inode) - *offs;
559                 spin_unlock(&inode->i_lock);
560                 if (size < 0) {
561                         CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
562                                i_size_read(inode), *offs);
563                         return -EBADR;
564                 } else if (size == 0) {
565                         return 0;
566                 }
567         } else {
568                 spin_unlock(&inode->i_lock);
569         }
570
571         blocksize = 1 << inode->i_blkbits;
572
573         while (size > 0) {
574                 block = *offs >> inode->i_blkbits;
575                 boffs = *offs & (blocksize - 1);
576                 csize = min(blocksize - boffs, size);
577                 bh = ext3_bread(NULL, inode, block, 0, &err);
578                 if (!bh) {
579                         CERROR("can't read block: %d\n", err);
580                         return err;
581                 }
582
583                 memcpy(buf, bh->b_data + boffs, csize);
584                 brelse(bh);
585
586                 *offs += csize;
587                 buf += csize;
588                 size -= csize;
589         }
590         return osize;
591 }
592 EXPORT_SYMBOL(fsfilt_ext3_read);
593
594 static int fsfilt_ext3_read_record(struct file * file, void *buf,
595                                    int size, loff_t *offs)
596 {
597         int rc;
598         rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
599         if (rc > 0)
600                 rc = 0;
601         return rc;
602 }
603
604 int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
605                                 loff_t *offs, handle_t *handle)
606 {
607         struct buffer_head *bh = NULL;
608         loff_t old_size = i_size_read(inode), offset = *offs;
609         loff_t new_size = i_size_read(inode);
610         unsigned long block;
611         int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
612
613         while (bufsize > 0) {
614                 if (bh != NULL)
615                         brelse(bh);
616
617                 block = offset >> inode->i_blkbits;
618                 boffs = offset & (blocksize - 1);
619                 size = min(blocksize - boffs, bufsize);
620                 bh = ext3_bread(handle, inode, block, 1, &err);
621                 if (!bh) {
622                         CERROR("can't read/create block: %d\n", err);
623                         break;
624                 }
625
626                 err = ext3_journal_get_write_access(handle, bh);
627                 if (err) {
628                         CERROR("journal_get_write_access() returned error %d\n",
629                                err);
630                         break;
631                 }
632                 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
633                 memcpy(bh->b_data + boffs, buf, size);
634                 err = ext3_journal_dirty_metadata(handle, bh);
635                 if (err) {
636                         CERROR("journal_dirty_metadata() returned error %d\n",
637                                err);
638                         break;
639                 }
640                 if (offset + size > new_size)
641                         new_size = offset + size;
642                 offset += size;
643                 bufsize -= size;
644                 buf += size;
645         }
646         if (bh)
647                 brelse(bh);
648
649         /* correct in-core and on-disk sizes */
650         if (new_size > i_size_read(inode)) {
651                 spin_lock(&inode->i_lock);
652                 if (new_size > i_size_read(inode))
653                         i_size_write(inode, new_size);
654                 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
655                         EXT3_I(inode)->i_disksize = i_size_read(inode);
656                 if (i_size_read(inode) > old_size) {
657                         spin_unlock(&inode->i_lock);
658                         mark_inode_dirty(inode);
659                 } else {
660                         spin_unlock(&inode->i_lock);
661                 }
662         }
663
664         if (err == 0)
665                 *offs = offset;
666         return err;
667 }
668 EXPORT_SYMBOL(fsfilt_ext3_write_handle);
669
670 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
671                                     loff_t *offs, int force_sync)
672 {
673         struct inode *inode = file->f_dentry->d_inode;
674         handle_t *handle;
675         int err, block_count = 0, blocksize;
676
677         /* Determine how many transaction credits are needed */
678         blocksize = 1 << inode->i_blkbits;
679         block_count = (*offs & (blocksize - 1)) + bufsize;
680         block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
681
682         handle = ext3_journal_start(inode,
683                         block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
684         if (IS_ERR(handle)) {
685                 CERROR("can't start transaction for %d blocks (%d bytes)\n",
686                        block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
687                        bufsize);
688                 return PTR_ERR(handle);
689         }
690
691         err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
692
693         if (!err && force_sync)
694                 handle->h_sync = 1; /* recovery likes this */
695
696         ext3_journal_stop(handle);
697
698         return err;
699 }
700
701 static int fsfilt_ext3_setup(struct super_block *sb)
702 {
703         if (!EXT3_HAS_COMPAT_FEATURE(sb,
704                                 EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
705                 CERROR("ext3 mounted without journal\n");
706                 return -EINVAL;
707         }
708
709 #ifdef S_PDIROPS
710         CWARN("Enabling PDIROPS\n");
711         set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
712         sb->s_flags |= S_PDIROPS;
713 #endif
714         if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
715                 CWARN("filesystem doesn't have dir_index feature enabled\n");
716         return 0;
717 }
718 static struct fsfilt_operations fsfilt_ext3_ops = {
719         .fs_type                = "ext3",
720         .fs_owner               = THIS_MODULE,
721         .fs_getlabel            = fsfilt_ext3_get_label,
722         .fs_start               = fsfilt_ext3_start,
723         .fs_commit              = fsfilt_ext3_commit,
724         .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
725         .fs_write_record        = fsfilt_ext3_write_record,
726         .fs_read_record         = fsfilt_ext3_read_record,
727         .fs_setup               = fsfilt_ext3_setup,
728 };
729
730 static int __init fsfilt_ext3_init(void)
731 {
732         int rc;
733
734         fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
735                                          sizeof(struct fsfilt_cb_data), 0, 0);
736         if (!fcb_cache) {
737                 CERROR("error allocating fsfilt journal callback cache\n");
738                 GOTO(out, rc = -ENOMEM);
739         }
740
741         rc = fsfilt_register_ops(&fsfilt_ext3_ops);
742
743         if (rc) {
744                 int err = cfs_mem_cache_destroy(fcb_cache);
745                 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
746         }
747 out:
748         return rc;
749 }
750
751 static void __exit fsfilt_ext3_exit(void)
752 {
753         int rc;
754
755         fsfilt_unregister_ops(&fsfilt_ext3_ops);
756         rc = cfs_mem_cache_destroy(fcb_cache);
757         LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
758 }
759
760 module_init(fsfilt_ext3_init);
761 module_exit(fsfilt_ext3_exit);
762
763 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
764 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
765 MODULE_LICENSE("GPL");