Whamcloud - gitweb
LU-2748 osd: allocate buffers on demand
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/lvfs/fsfilt_ext3.c
37  *
38  * Author: Andreas Dilger <adilger@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_FILTER
42
43 #include <linux/init.h>
44 #include <linux/module.h>
45 #include <linux/fs.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <ext4/ext4.h>
49 #include <ext4/ext4_jbd2.h>
50 #include <linux/version.h>
51 #include <linux/bitops.h>
52 #include <linux/quota.h>
53
54 #include <libcfs/libcfs.h>
55 #include <lustre_fsfilt.h>
56 #include <obd.h>
57 #include <linux/lustre_compat25.h>
58 #include <linux/lprocfs_status.h>
59
60 #include <ext4/ext4_extents.h>
61
62 /* for kernels 2.6.18 and later */
63 #define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
64
65 #define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
66                ext3_ext_insert_extent(handle, inode, path, newext, flag)
67
68 #define ext3_mb_discard_inode_preallocations(inode) \
69                  ext3_discard_preallocations(inode)
70
71 #define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
72 #define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
73
74 static cfs_mem_cache_t *fcb_cache;
75
76 struct fsfilt_cb_data {
77         struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
78         fsfilt_cb_t cb_func;            /* MDS/OBD completion function */
79         struct obd_device *cb_obd;      /* MDS/OBD completion device */
80         __u64 cb_last_rcvd;             /* MDS/OST last committed operation */
81         void *cb_data;                  /* MDS/OST completion function data */
82 };
83
84 static char *fsfilt_ext3_get_label(struct super_block *sb)
85 {
86         return EXT3_SB(sb)->s_es->s_volume_name;
87 }
88
89 /* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
90 #ifdef HAVE_BLOCKS_FOR_TRUNCATE
91 # include <ext4/truncate.h>
92 #else
93 static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
94 {
95         ext4_lblk_t needed;
96
97         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
98         if (needed < 2)
99                 needed = 2;
100         if (needed > EXT4_MAX_TRANS_DATA)
101                 needed = EXT4_MAX_TRANS_DATA;
102         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
103 }
104 #endif
105
106 /*
107  * We don't currently need any additional blocks for rmdir and
108  * unlink transactions because we are storing the OST oa_id inside
109  * the inode (which we will be changing anyways as part of this
110  * transaction).
111  */
112 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
113                                int logs)
114 {
115         /* For updates to the last received file */
116         int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
117         journal_t *journal;
118         void *handle;
119
120         if (current->journal_info) {
121                 CDEBUG(D_INODE, "increasing refcount on %p\n",
122                        current->journal_info);
123                 goto journal_start;
124         }
125
126         switch(op) {
127         case FSFILT_OP_UNLINK:
128                 /* delete one file + create/update logs for each stripe */
129                 nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
130                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
131                             FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
132                 break;
133         case FSFILT_OP_CANCEL_UNLINK:
134                 LASSERT(logs == 1);
135
136                 /* blocks for log header bitmap update OR
137                  * blocks for catalog header bitmap update + unlink of logs +
138                  * blocks for delete the inode (include blocks truncating). */
139                 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
140                           EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
141                           ext4_blocks_for_truncate(inode) + 3;
142                 break;
143         default: CERROR("unknown transaction start op %d\n", op);
144                 LBUG();
145         }
146
147         LASSERT(current->journal_info == desc_private);
148         journal = EXT3_SB(inode->i_sb)->s_journal;
149         if (nblocks > journal->j_max_transaction_buffers) {
150                 CWARN("too many credits %d for op %ux%u using %d instead\n",
151                        nblocks, op, logs, journal->j_max_transaction_buffers);
152                 nblocks = journal->j_max_transaction_buffers;
153         }
154
155  journal_start:
156         LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
157         handle = ext3_journal_start(inode, nblocks);
158
159         if (!IS_ERR(handle))
160                 LASSERT(current->journal_info == handle);
161         else
162                 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
163                        op, nblocks, PTR_ERR(handle));
164         return handle;
165 }
166
167 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
168 {
169         int rc;
170         handle_t *handle = h;
171
172         LASSERT(current->journal_info == handle);
173         if (force_sync)
174                 handle->h_sync = 1; /* recovery likes this */
175
176         rc = ext3_journal_stop(handle);
177
178         return rc;
179 }
180
181 #ifndef EXT3_EXTENTS_FL
182 #define EXT3_EXTENTS_FL                 0x00080000 /* Inode uses extents */
183 #endif
184
185 #ifndef EXT_ASSERT
186 #define EXT_ASSERT(cond)  BUG_ON(!(cond))
187 #endif
188
189 #define EXT_GENERATION(inode)           (EXT4_I(inode)->i_ext_generation)
190 #define ext3_ext_base                   inode
191 #define ext3_ext_base2inode(inode)      (inode)
192 #define EXT_DEPTH(inode)                ext_depth(inode)
193 #define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
194                         ext3_ext_walk_space(inode, block, num, cb, cbdata);
195
196 struct bpointers {
197         unsigned long *blocks;
198         unsigned long start;
199         int num;
200         int init_num;
201         int create;
202 };
203
204 static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
205                                unsigned long block, int *aflags)
206 {
207         struct ext3_inode_info *ei = EXT3_I(inode);
208         unsigned long bg_start;
209         unsigned long colour;
210         int depth;
211
212         if (path) {
213                 struct ext3_extent *ex;
214                 depth = path->p_depth;
215
216                 /* try to predict block placement */
217                 if ((ex = path[depth].p_ext))
218                         return ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
219
220                 /* it looks index is empty
221                  * try to find starting from index itself */
222                 if (path[depth].p_bh)
223                         return path[depth].p_bh->b_blocknr;
224         }
225
226         /* OK. use inode's group */
227         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
228                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
229         colour = (current->pid % 16) *
230                 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
231         return bg_start + colour + block;
232 }
233
234 #define ll_unmap_underlying_metadata(sb, blocknr) \
235         unmap_underlying_metadata((sb)->s_bdev, blocknr)
236
237 #ifndef EXT3_MB_HINT_GROUP_ALLOC
238 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
239                                 struct ext3_ext_path *path, unsigned long block,
240                                 unsigned long *count, int *err)
241 {
242         unsigned long pblock, goal;
243         int aflags = 0;
244         struct inode *inode = ext3_ext_base2inode(base);
245
246         goal = ext3_ext_find_goal(inode, path, block, &aflags);
247         aflags |= 2; /* block have been already reserved */
248         pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
249         return pblock;
250
251 }
252 #else
253 static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
254                                 struct ext3_ext_path *path, unsigned long block,
255                                 unsigned long *count, int *err)
256 {
257         struct inode *inode = ext3_ext_base2inode(base);
258         struct ext3_allocation_request ar;
259         unsigned long pblock;
260         int aflags;
261
262         /* find neighbour allocated blocks */
263         ar.lleft = block;
264         *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
265         if (*err)
266                 return 0;
267         ar.lright = block;
268         *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
269         if (*err)
270                 return 0;
271
272         /* allocate new block */
273         ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
274         ar.inode = inode;
275         ar.logical = block;
276         ar.len = *count;
277         ar.flags = EXT3_MB_HINT_DATA;
278         pblock = ext3_mb_new_blocks(handle, &ar, err);
279         *count = ar.len;
280         return pblock;
281 }
282 #endif
283
284 static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
285                                   struct ext3_ext_path *path,
286                                   struct ext3_ext_cache *cex,
287 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
288                                    struct ext3_extent *ex,
289 #endif
290                                   void *cbdata)
291 {
292         struct bpointers *bp = cbdata;
293         struct inode *inode = ext3_ext_base2inode(base);
294         struct ext3_extent nex;
295         unsigned long pblock;
296         unsigned long tgen;
297         int err, i;
298         unsigned long count;
299         handle_t *handle;
300
301 #ifdef EXT3_EXT_CACHE_EXTENT
302         if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
303 #else
304         if ((cex->ec_len != 0) && (cex->ec_start != 0))
305 #endif
306                                                    {
307                 err = EXT_CONTINUE;
308                 goto map;
309         }
310
311         if (bp->create == 0) {
312                 i = 0;
313                 if (cex->ec_block < bp->start)
314                         i = bp->start - cex->ec_block;
315                 if (i >= cex->ec_len)
316                         CERROR("nothing to do?! i = %d, e_num = %u\n",
317                                         i, cex->ec_len);
318                 for (; i < cex->ec_len && bp->num; i++) {
319                         *(bp->blocks) = 0;
320                         bp->blocks++;
321                         bp->num--;
322                         bp->start++;
323                 }
324
325                 return EXT_CONTINUE;
326         }
327
328         tgen = EXT_GENERATION(base);
329         count = ext3_ext_calc_credits_for_insert(base, path);
330
331         handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
332         if (IS_ERR(handle)) {
333                 return PTR_ERR(handle);
334         }
335
336         if (tgen != EXT_GENERATION(base)) {
337                 /* the tree has changed. so path can be invalid at moment */
338                 ext3_journal_stop(handle);
339                 return EXT_REPEAT;
340         }
341
342         /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
343          * protected by i_data_sem as whole. so we patch it to store
344          * generation to path and now verify the tree hasn't changed */
345         down_write((&EXT4_I(inode)->i_data_sem));
346
347         /* validate extent, make sure the extent tree does not changed */
348         if (EXT_GENERATION(base) != path[0].p_generation) {
349                 /* cex is invalid, try again */
350                 up_write(&EXT4_I(inode)->i_data_sem);
351                 ext3_journal_stop(handle);
352                 return EXT_REPEAT;
353         }
354
355         count = cex->ec_len;
356         pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
357         if (!pblock)
358                 goto out;
359         EXT_ASSERT(count <= cex->ec_len);
360
361         /* insert new extent */
362         nex.ee_block = cpu_to_le32(cex->ec_block);
363         ext3_ext_store_pblock(&nex, pblock);
364         nex.ee_len = cpu_to_le16(count);
365         err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
366         if (err) {
367                 /* free data blocks we just allocated */
368                 /* not a good idea to call discard here directly,
369                  * but otherwise we'd need to call it every free() */
370 #ifdef EXT3_MB_HINT_GROUP_ALLOC
371                 ext3_mb_discard_inode_preallocations(inode);
372 #endif
373                 ext3_free_blocks(handle, inode, ext_pblock(&nex),
374                                  cpu_to_le16(nex.ee_len), 0);
375                 goto out;
376         }
377
378         /*
379          * Putting len of the actual extent we just inserted,
380          * we are asking ext3_ext_walk_space() to continue
381          * scaning after that block
382          */
383         cex->ec_len = le16_to_cpu(nex.ee_len);
384         cex->ec_start = ext_pblock(&nex);
385         BUG_ON(le16_to_cpu(nex.ee_len) == 0);
386         BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
387
388 out:
389         up_write((&EXT4_I(inode)->i_data_sem));
390         ext3_journal_stop(handle);
391 map:
392         if (err >= 0) {
393                 /* map blocks */
394                 if (bp->num == 0) {
395                         CERROR("hmm. why do we find this extent?\n");
396                         CERROR("initial space: %lu:%u\n",
397                                 bp->start, bp->init_num);
398 #ifdef EXT3_EXT_CACHE_EXTENT
399                         CERROR("current extent: %u/%u/%llu %d\n",
400                                 cex->ec_block, cex->ec_len,
401                                 (unsigned long long)cex->ec_start,
402                                 cex->ec_type);
403 #else
404                         CERROR("current extent: %u/%u/%llu\n",
405                                 cex->ec_block, cex->ec_len,
406                                 (unsigned long long)cex->ec_start);
407 #endif
408                 }
409                 i = 0;
410                 if (cex->ec_block < bp->start)
411                         i = bp->start - cex->ec_block;
412                 if (i >= cex->ec_len)
413                         CERROR("nothing to do?! i = %d, e_num = %u\n",
414                                         i, cex->ec_len);
415                 for (; i < cex->ec_len && bp->num; i++) {
416                         *(bp->blocks) = cex->ec_start + i;
417 #ifdef EXT3_EXT_CACHE_EXTENT
418                         if (cex->ec_type != EXT3_EXT_CACHE_EXTENT)
419 #else
420                         if ((cex->ec_len == 0) || (cex->ec_start == 0))
421 #endif
422                                                                         {
423                                 /* unmap any possible underlying metadata from
424                                  * the block device mapping.  bug 6998. */
425                                 ll_unmap_underlying_metadata(inode->i_sb,
426                                                              *(bp->blocks));
427                         }
428                         bp->blocks++;
429                         bp->num--;
430                         bp->start++;
431                 }
432         }
433         return err;
434 }
435
436 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
437                        unsigned long num, unsigned long *blocks,
438                        int create)
439 {
440         struct ext3_ext_base *base = inode;
441         struct bpointers bp;
442         int err;
443
444         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
445                block, block + num - 1, (unsigned) inode->i_ino);
446
447         bp.blocks = blocks;
448         bp.start = block;
449         bp.init_num = bp.num = num;
450         bp.create = create;
451
452         err = fsfilt_ext3_ext_walk_space(base, block, num,
453                                          ext3_ext_new_extent_cb, &bp);
454         ext3_ext_invalidate_cache(base);
455
456         return err;
457 }
458
459 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
460                                     int pages, unsigned long *blocks,
461                                     int create)
462 {
463         int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
464         int rc = 0, i = 0;
465         struct page *fp = NULL;
466         int clen = 0;
467
468         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
469                 inode->i_ino, pages, (*page)->index);
470
471         /* pages are sorted already. so, we just have to find
472          * contig. space and process them properly */
473         while (i < pages) {
474                 if (fp == NULL) {
475                         /* start new extent */
476                         fp = *page++;
477                         clen = 1;
478                         i++;
479                         continue;
480                 } else if (fp->index + clen == (*page)->index) {
481                         /* continue the extent */
482                         page++;
483                         clen++;
484                         i++;
485                         continue;
486                 }
487
488                 /* process found extent */
489                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
490                                         clen * blocks_per_page, blocks,
491                                         create);
492                 if (rc)
493                         GOTO(cleanup, rc);
494
495                 /* look for next extent */
496                 fp = NULL;
497                 blocks += blocks_per_page * clen;
498         }
499
500         if (fp)
501                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
502                                         clen * blocks_per_page, blocks,
503                                         create);
504 cleanup:
505         return rc;
506 }
507
508 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
509                                unsigned long *blocks, int create);
510 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
511                                    int pages, unsigned long *blocks,
512                                    int create)
513 {
514         int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
515         unsigned long *b;
516         int rc = 0, i;
517
518         for (i = 0, b = blocks; i < pages; i++, page++) {
519                 rc = ext3_map_inode_page(inode, *page, b, create);
520                 if (rc) {
521                         CERROR("ino %lu, blk %lu create %d: rc %d\n",
522                                inode->i_ino, *b, create, rc);
523                         break;
524                 }
525
526                 b += blocks_per_page;
527         }
528         return rc;
529 }
530
531 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
532                                 int pages, unsigned long *blocks,
533                                 int create, struct mutex *optional_mutex)
534 {
535         int rc;
536
537         if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
538                 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
539                                                      blocks, create);
540                 return rc;
541         }
542         if (optional_mutex != NULL)
543                 mutex_lock(optional_mutex);
544         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks, create);
545         if (optional_mutex != NULL)
546                 mutex_unlock(optional_mutex);
547
548         return rc;
549 }
550
551 int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
552 {
553         unsigned long block;
554         struct buffer_head *bh;
555         int err, blocksize, csize, boffs, osize = size;
556
557         /* prevent reading after eof */
558         spin_lock(&inode->i_lock);
559         if (i_size_read(inode) < *offs + size) {
560                 size = i_size_read(inode) - *offs;
561                 spin_unlock(&inode->i_lock);
562                 if (size < 0) {
563                         CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
564                                i_size_read(inode), *offs);
565                         return -EBADR;
566                 } else if (size == 0) {
567                         return 0;
568                 }
569         } else {
570                 spin_unlock(&inode->i_lock);
571         }
572
573         blocksize = 1 << inode->i_blkbits;
574
575         while (size > 0) {
576                 block = *offs >> inode->i_blkbits;
577                 boffs = *offs & (blocksize - 1);
578                 csize = min(blocksize - boffs, size);
579                 bh = ext3_bread(NULL, inode, block, 0, &err);
580                 if (!bh) {
581                         CERROR("can't read block: %d\n", err);
582                         return err;
583                 }
584
585                 memcpy(buf, bh->b_data + boffs, csize);
586                 brelse(bh);
587
588                 *offs += csize;
589                 buf += csize;
590                 size -= csize;
591         }
592         return osize;
593 }
594 EXPORT_SYMBOL(fsfilt_ext3_read);
595
596 static int fsfilt_ext3_read_record(struct file * file, void *buf,
597                                    int size, loff_t *offs)
598 {
599         int rc;
600         rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
601         if (rc > 0)
602                 rc = 0;
603         return rc;
604 }
605
606 int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
607                                 loff_t *offs, handle_t *handle)
608 {
609         struct buffer_head *bh = NULL;
610         loff_t old_size = i_size_read(inode), offset = *offs;
611         loff_t new_size = i_size_read(inode);
612         unsigned long block;
613         int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
614
615         while (bufsize > 0) {
616                 if (bh != NULL)
617                         brelse(bh);
618
619                 block = offset >> inode->i_blkbits;
620                 boffs = offset & (blocksize - 1);
621                 size = min(blocksize - boffs, bufsize);
622                 bh = ext3_bread(handle, inode, block, 1, &err);
623                 if (!bh) {
624                         CERROR("can't read/create block: %d\n", err);
625                         break;
626                 }
627
628                 err = ext3_journal_get_write_access(handle, bh);
629                 if (err) {
630                         CERROR("journal_get_write_access() returned error %d\n",
631                                err);
632                         break;
633                 }
634                 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
635                 memcpy(bh->b_data + boffs, buf, size);
636                 err = ext3_journal_dirty_metadata(handle, bh);
637                 if (err) {
638                         CERROR("journal_dirty_metadata() returned error %d\n",
639                                err);
640                         break;
641                 }
642                 if (offset + size > new_size)
643                         new_size = offset + size;
644                 offset += size;
645                 bufsize -= size;
646                 buf += size;
647         }
648         if (bh)
649                 brelse(bh);
650
651         /* correct in-core and on-disk sizes */
652         if (new_size > i_size_read(inode)) {
653                 spin_lock(&inode->i_lock);
654                 if (new_size > i_size_read(inode))
655                         i_size_write(inode, new_size);
656                 if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
657                         EXT3_I(inode)->i_disksize = i_size_read(inode);
658                 if (i_size_read(inode) > old_size) {
659                         spin_unlock(&inode->i_lock);
660                         mark_inode_dirty(inode);
661                 } else {
662                         spin_unlock(&inode->i_lock);
663                 }
664         }
665
666         if (err == 0)
667                 *offs = offset;
668         return err;
669 }
670 EXPORT_SYMBOL(fsfilt_ext3_write_handle);
671
672 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
673                                     loff_t *offs, int force_sync)
674 {
675         struct inode *inode = file->f_dentry->d_inode;
676         handle_t *handle;
677         int err, block_count = 0, blocksize;
678
679         /* Determine how many transaction credits are needed */
680         blocksize = 1 << inode->i_blkbits;
681         block_count = (*offs & (blocksize - 1)) + bufsize;
682         block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
683
684         handle = ext3_journal_start(inode,
685                         block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
686         if (IS_ERR(handle)) {
687                 CERROR("can't start transaction for %d blocks (%d bytes)\n",
688                        block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
689                        bufsize);
690                 return PTR_ERR(handle);
691         }
692
693         err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
694
695         if (!err && force_sync)
696                 handle->h_sync = 1; /* recovery likes this */
697
698         ext3_journal_stop(handle);
699
700         return err;
701 }
702
703 static int fsfilt_ext3_setup(struct super_block *sb)
704 {
705         if (!EXT3_HAS_COMPAT_FEATURE(sb,
706                                 EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
707                 CERROR("ext3 mounted without journal\n");
708                 return -EINVAL;
709         }
710
711 #ifdef S_PDIROPS
712         CWARN("Enabling PDIROPS\n");
713         set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
714         sb->s_flags |= S_PDIROPS;
715 #endif
716         if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
717                 CWARN("filesystem doesn't have dir_index feature enabled\n");
718         return 0;
719 }
720 static struct fsfilt_operations fsfilt_ext3_ops = {
721         .fs_type                = "ext3",
722         .fs_owner               = THIS_MODULE,
723         .fs_getlabel            = fsfilt_ext3_get_label,
724         .fs_start               = fsfilt_ext3_start,
725         .fs_commit              = fsfilt_ext3_commit,
726         .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
727         .fs_write_record        = fsfilt_ext3_write_record,
728         .fs_read_record         = fsfilt_ext3_read_record,
729         .fs_setup               = fsfilt_ext3_setup,
730 };
731
732 static int __init fsfilt_ext3_init(void)
733 {
734         int rc;
735
736         fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
737                                          sizeof(struct fsfilt_cb_data), 0, 0);
738         if (!fcb_cache) {
739                 CERROR("error allocating fsfilt journal callback cache\n");
740                 GOTO(out, rc = -ENOMEM);
741         }
742
743         rc = fsfilt_register_ops(&fsfilt_ext3_ops);
744
745         if (rc) {
746                 int err = cfs_mem_cache_destroy(fcb_cache);
747                 LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
748         }
749 out:
750         return rc;
751 }
752
753 static void __exit fsfilt_ext3_exit(void)
754 {
755         int rc;
756
757         fsfilt_unregister_ops(&fsfilt_ext3_ops);
758         rc = cfs_mem_cache_destroy(fcb_cache);
759         LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
760 }
761
762 module_init(fsfilt_ext3_init);
763 module_exit(fsfilt_ext3_exit);
764
765 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
766 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
767 MODULE_LICENSE("GPL");