Whamcloud - gitweb
39aace847ad6a6b9cff965664d184b41f1e37290
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/lib/fsfilt_ext3.c
5  *  Lustre filesystem abstraction routines
6  *
7  *  Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   Lustre is free software; you can redistribute it and/or
13  *   modify it under the terms of version 2 of the GNU General Public
14  *   License as published by the Free Software Foundation.
15  *
16  *   Lustre is distributed in the hope that it will be useful,
17  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *   GNU General Public License for more details.
20  *
21  *   You should have received a copy of the GNU General Public License
22  *   along with Lustre; if not, write to the Free Software
23  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25
26 #define DEBUG_SUBSYSTEM S_FILTER
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/fs.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
39 #include <linux/ext3_xattr.h>
40 #else
41 #include <ext3/xattr.h>
42 #endif
43
44 #include <linux/kp30.h>
45 #include <linux/lustre_fsfilt.h>
46 #include <linux/obd.h>
47 #include <linux/obd_class.h>
48 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
49 #include <linux/iobuf.h>
50 #endif
51
52 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
53 #include <linux/ext3_extents.h>
54 #endif
55                                                                                                                              
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
57 # define lock_24kernel() lock_kernel()
58 # define unlock_24kernel() unlock_kernel()
59 #else
60 # define lock_24kernel() do {} while (0)
61 # define unlock_24kernel() do {} while (0)
62 #endif
63
64 static kmem_cache_t *fcb_cache;
65 static atomic_t fcb_cache_count = ATOMIC_INIT(0);
66
67 struct fsfilt_cb_data {
68         struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
69         fsfilt_cb_t cb_func;            /* MDS/OBD completion function */
70         struct obd_device *cb_obd;      /* MDS/OBD completion device */
71         __u64 cb_last_rcvd;             /* MDS/OST last committed operation */
72         void *cb_data;                  /* MDS/OST completion function data */
73 };
74
75 #ifndef EXT3_XATTR_INDEX_TRUSTED        /* temporary until we hit l28 kernel */
76 #define EXT3_XATTR_INDEX_TRUSTED        4
77 #endif
78 #define XATTR_LUSTRE_MDS_LOV_EA         "lov"
79
80 #define EXT3_XATTR_INDEX_LUSTRE         5                         /* old */
81 #define XATTR_LUSTRE_MDS_OBJID          "system.lustre_mds_objid" /* old */
82
83 /*
84  * We don't currently need any additional blocks for rmdir and
85  * unlink transactions because we are storing the OST oa_id inside
86  * the inode (which we will be changing anyways as part of this
87  * transaction).
88  */
89 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
90                                int logs)
91 {
92         /* For updates to the last recieved file */
93         int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
94         journal_t *journal;
95         void *handle;
96
97         if (current->journal_info) {
98                 CDEBUG(D_INODE, "increasing refcount on %p\n",
99                        current->journal_info);
100                 goto journal_start;
101         }
102
103         switch(op) {
104         case FSFILT_OP_RMDIR:
105         case FSFILT_OP_UNLINK:
106                 /* delete one file + create/update logs for each stripe */
107                 nblocks += EXT3_DELETE_TRANS_BLOCKS;
108                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
109                             EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
110                 break;
111         case FSFILT_OP_RENAME:
112                 /* modify additional directory */
113                 nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
114                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
115                             EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
116                 /* no break */
117         case FSFILT_OP_SYMLINK:
118                 /* additional block + block bitmap + GDT for long symlink */
119                 nblocks += 3;
120                 /* no break */
121         case FSFILT_OP_CREATE:
122                 /* create/update logs for each stripe */
123                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
124                             EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
125                 /* no break */
126         case FSFILT_OP_MKDIR:
127         case FSFILT_OP_MKNOD:
128                 /* modify one inode + block bitmap + GDT */
129                 nblocks += 3;
130                 /* no break */
131         case FSFILT_OP_LINK:
132                 /* modify parent directory */
133                 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
134                         EXT3_DATA_TRANS_BLOCKS;
135                 break;
136         case FSFILT_OP_SETATTR:
137                 /* Setattr on inode */
138                 nblocks += 1;
139                 break;
140         case FSFILT_OP_CANCEL_UNLINK:
141                 /* blocks for log header bitmap update OR
142                  * blocks for catalog header bitmap update + unlink of logs */
143                 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
144                         EXT3_DELETE_TRANS_BLOCKS * logs;
145                 break;
146         default: CERROR("unknown transaction start op %d\n", op);
147                  LBUG();
148         }
149
150         LASSERT(current->journal_info == desc_private);
151         journal = EXT3_SB(inode->i_sb)->s_journal;
152         if (nblocks > journal->j_max_transaction_buffers) {
153                 CERROR("too many credits %d for op %ux%u using %d instead\n",
154                        nblocks, op, logs, journal->j_max_transaction_buffers);
155                 nblocks = journal->j_max_transaction_buffers;
156         }
157
158  journal_start:
159         LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
160         lock_24kernel();
161         handle = journal_start(EXT3_JOURNAL(inode), nblocks);
162         unlock_24kernel();
163
164         if (!IS_ERR(handle))
165                 LASSERT(current->journal_info == handle);
166         else
167                 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
168                        op, nblocks, PTR_ERR(handle));
169         return handle;
170 }
171
172 /*
173  * Calculate the number of buffer credits needed to write multiple pages in
174  * a single ext3 transaction.  No, this shouldn't be here, but as yet ext3
175  * doesn't have a nice API for calculating this sort of thing in advance.
176  *
177  * See comment above ext3_writepage_trans_blocks for details.  We assume
178  * no data journaling is being done, but it does allow for all of the pages
179  * being non-contiguous.  If we are guaranteed contiguous pages we could
180  * reduce the number of (d)indirect blocks a lot.
181  *
182  * With N blocks per page and P pages, for each inode we have at most:
183  * N*P indirect
184  * min(N*P, blocksize/4 + 1) dindirect blocks
185  * niocount tindirect
186  *
187  * For the entire filesystem, we have at most:
188  * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
189  * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
190  * objcount inode blocks
191  * 1 superblock
192  * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
193  *
194  * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
195  */
196 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
197                                       int niocount, struct niobuf_local *nb)
198 {
199         struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
200         __u64 next_indir;
201         const int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
202         int nbitmaps = 0, ngdblocks;
203         int needed = objcount + 1; /* inodes + superblock */
204         int i, j;
205
206         for (i = 0, j = 0; i < objcount; i++, fso++) {
207                 /* two or more dindirect blocks in case we cross boundary */
208                 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
209                                     nb[j].offset) >>
210                                    sb->s_blocksize_bits) /
211                         (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
212                 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
213
214                 /* leaf, indirect, tindirect blocks for first block */
215                 nbitmaps += blockpp + 2;
216
217                 j += fso->fso_bufcnt;
218         }
219
220         next_indir = nb[0].offset +
221                 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
222         for (i = 1; i < niocount; i++) {
223                 if (nb[i].offset >= next_indir) {
224                         nbitmaps++;     /* additional indirect */
225                         next_indir = nb[i].offset +
226                                 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
227                 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
228                         nbitmaps++;     /* additional indirect */
229                 }
230                 nbitmaps += blockpp;    /* each leaf in different group? */
231         }
232
233         ngdblocks = nbitmaps;
234         if (nbitmaps > EXT3_SB(sb)->s_groups_count)
235                 nbitmaps = EXT3_SB(sb)->s_groups_count;
236         if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
237                 ngdblocks = EXT3_SB(sb)->s_gdb_count;
238
239         needed += nbitmaps + ngdblocks;
240
241         /* last_rcvd update */
242         needed += EXT3_DATA_TRANS_BLOCKS;
243
244 #if defined(CONFIG_QUOTA) && !defined(__x86_64__) /* XXX */
245         /* We assume that there will be 1 bit set in s_dquot.flags for each
246          * quota file that is active.  This is at least true for now.
247          */
248         needed += hweight32(sb_any_quota_enabled(sb)) *
249                 EXT3_SINGLEDATA_TRANS_BLOCKS;
250 #endif
251
252         return needed;
253 }
254
255 /* We have to start a huge journal transaction here to hold all of the
256  * metadata for the pages being written here.  This is necessitated by
257  * the fact that we do lots of prepare_write operations before we do
258  * any of the matching commit_write operations, so even if we split
259  * up to use "smaller" transactions none of them could complete until
260  * all of them were opened.  By having a single journal transaction,
261  * we eliminate duplicate reservations for common blocks like the
262  * superblock and group descriptors or bitmaps.
263  *
264  * We will start the transaction here, but each prepare_write will
265  * add a refcount to the transaction, and each commit_write will
266  * remove a refcount.  The transaction will be closed when all of
267  * the pages have been written.
268  */
269 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
270                                    int niocount, struct niobuf_local *nb,
271                                    void *desc_private, int logs)
272 {
273         journal_t *journal;
274         handle_t *handle;
275         int needed;
276         ENTRY;
277
278         LASSERT(current->journal_info == desc_private);
279         journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
280         needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
281
282         /* The number of blocks we could _possibly_ dirty can very large.
283          * We reduce our request if it is absurd (and we couldn't get that
284          * many credits for a single handle anyways).
285          *
286          * At some point we have to limit the size of I/Os sent at one time,
287          * increase the size of the journal, or we have to calculate the
288          * actual journal requirements more carefully by checking all of
289          * the blocks instead of being maximally pessimistic.  It remains to
290          * be seen if this is a real problem or not.
291          */
292         if (needed > journal->j_max_transaction_buffers) {
293                 CERROR("want too many journal credits (%d) using %d instead\n",
294                        needed, journal->j_max_transaction_buffers);
295                 needed = journal->j_max_transaction_buffers;
296         }
297
298         LASSERTF(needed > 0, "can't start %d credit transaction\n", needed);
299         lock_24kernel();
300         handle = journal_start(journal, needed);
301         unlock_24kernel();
302         if (IS_ERR(handle)) {
303                 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
304                        PTR_ERR(handle));
305         } else {
306                 LASSERT(handle->h_buffer_credits >= needed);
307                 LASSERT(current->journal_info == handle);
308         }
309
310         RETURN(handle);
311 }
312
313 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
314 {
315         int rc;
316         handle_t *handle = h;
317
318         LASSERT(current->journal_info == handle);
319         if (force_sync)
320                 handle->h_sync = 1; /* recovery likes this */
321
322         lock_24kernel();
323         rc = journal_stop(handle);
324         unlock_24kernel();
325
326         return rc;
327 }
328
329 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
330                                     void **wait_handle)
331 {
332         unsigned long tid;
333         transaction_t *transaction;
334 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
335         unsigned long rtid;
336 #endif
337         handle_t *handle = h;
338         journal_t *journal;
339         int rc;
340
341         LASSERT(current->journal_info == handle);
342
343         lock_kernel();
344         transaction = handle->h_transaction;
345         journal = transaction->t_journal;
346         tid = transaction->t_tid;
347         /* we don't want to be blocked */
348         handle->h_sync = 0;
349         rc = journal_stop(handle);
350         if (rc) {
351                 CERROR("error while stopping transaction: %d\n", rc);
352                 unlock_kernel();
353                 return rc;
354         }
355 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
356         rtid = log_start_commit(journal, transaction);
357         if (rtid != tid)
358                 CERROR("strange race: %lu != %lu\n",
359                        (unsigned long) tid, (unsigned long) rtid);
360 #else
361         log_start_commit(journal, transaction->t_tid);
362 #endif
363         unlock_kernel();
364
365         *wait_handle = (void *) tid;
366         CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
367         return 0;
368 }
369
370 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
371 {
372         tid_t tid = (tid_t)(long)h;
373
374         CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
375         if (is_journal_aborted(EXT3_JOURNAL(inode)))
376                 return -EIO;
377
378         log_wait_commit(EXT3_JOURNAL(inode), tid);
379
380         return 0;
381 }
382
383 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
384                                struct iattr *iattr, int do_trunc)
385 {
386         struct inode *inode = dentry->d_inode;
387         int rc;
388
389         lock_kernel();
390
391         /* A _really_ horrible hack to avoid removing the data stored
392          * in the block pointers; this is really the "small" stripe MD data.
393          * We can avoid further hackery by virtue of the MDS file size being
394          * zero all the time (which doesn't invoke block truncate at unlink
395          * time), so we assert we never change the MDS file size from zero. */
396         if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
397                 /* ATTR_SIZE would invoke truncate: clear it */
398                 iattr->ia_valid &= ~ATTR_SIZE;
399                 EXT3_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
400
401                 /* make sure _something_ gets set - so new inode
402                  * goes to disk (probably won't work over XFS */
403                 if (!(iattr->ia_valid & (ATTR_MODE | ATTR_MTIME | ATTR_CTIME))){
404                         iattr->ia_valid |= ATTR_MODE;
405                         iattr->ia_mode = inode->i_mode;
406                 }
407         }
408
409         /* Don't allow setattr to change file type */
410         iattr->ia_mode = (inode->i_mode & S_IFMT)|(iattr->ia_mode & ~S_IFMT);
411
412         /* We set these flags on the client, but have already checked perms
413          * so don't confuse inode_change_ok. */
414         iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
415
416         if (inode->i_op->setattr) {
417                 rc = inode->i_op->setattr(dentry, iattr);
418         } else {
419                 rc = inode_change_ok(inode, iattr);
420                 if (!rc)
421                         rc = inode_setattr(inode, iattr);
422         }
423
424         unlock_kernel();
425
426         return rc;
427 }
428
429 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
430                                  unsigned int cmd, unsigned long arg)
431 {
432         int rc = 0;
433         ENTRY;
434
435         if (inode->i_fop->ioctl)
436                 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
437         else
438                 RETURN(-ENOTTY);
439
440         RETURN(rc);
441 }
442
443 #undef INLINE_EA
444 #undef OLD_EA
445 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
446                               void *lmm, int lmm_size)
447 {
448         int rc, old_ea = 0;
449
450         LASSERT(down_trylock(&inode->i_sem) != 0);
451
452 #ifdef INLINE_EA  /* can go away before 1.0 - just for testing bug 2097 now */
453         /* Nasty hack city - store stripe MD data in the block pointers if
454          * it will fit, because putting it in an EA currently kills the MDS
455          * performance.  We'll fix this with "fast EAs" in the future.
456          */
457         if (inode->i_blocks == 0 && lmm_size <= sizeof(EXT3_I(inode)->i_data) -
458                                             sizeof(EXT3_I(inode)->i_data[0])) {
459                 unsigned old_size = EXT3_I(inode)->i_data[0];
460                 if (old_size != 0) {
461                         LASSERT(old_size < sizeof(EXT3_I(inode)->i_data));
462                         CERROR("setting EA on %lu/%u again... interesting\n",
463                                inode->i_ino, inode->i_generation);
464                 }
465
466                 EXT3_I(inode)->i_data[0] = cpu_to_le32(lmm_size);
467                 memcpy(&EXT3_I(inode)->i_data[1], lmm, lmm_size);
468                 mark_inode_dirty(inode);
469                 return 0;
470         }
471 #endif
472 #ifdef OLD_EA
473         /* keep this when we get rid of OLD_EA (too noisy during conversion) */
474         if (EXT3_I(inode)->i_file_acl /* || large inode EA flag */) {
475                 CWARN("setting EA on %lu/%u again... interesting\n",
476                        inode->i_ino, inode->i_generation);
477                 old_ea = 1;
478         }
479
480         lock_kernel();
481         /* this can go away before 1.0.  For bug 2097 testing only. */
482         rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_LUSTRE,
483                                    XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size, 0);
484 #else
485         lock_kernel();
486         rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
487                                    XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size, 0);
488
489         /* This tries to delete the old-format LOV EA, but only as long as we
490          * have successfully saved the new-format LOV EA (we can always try
491          * the conversion again the next time the file is accessed).  It is
492          * possible (although unlikely) that the new-format LOV EA couldn't be
493          * saved because it ran out of space but we would need a file striped
494          * over least 123 OSTs before the two EAs filled a 4kB block.
495          *
496          * This can be removed when all filesystems have converted to the
497          * new EA format, but otherwise adds little if any overhead.  If we
498          * wanted backward compatibility for existing files, we could keep
499          * the old EA around for a while but we'd have to clean it up later. */
500         if (rc >= 0 && old_ea) {
501                 int err = ext3_xattr_set_handle(handle, inode,
502                                                 EXT3_XATTR_INDEX_LUSTRE,
503                                                 XATTR_LUSTRE_MDS_OBJID,
504                                                 NULL, 0, 0);
505                 if (err)
506                         CERROR("error deleting old LOV EA on %lu/%u: rc %d\n",
507                                inode->i_ino, inode->i_generation, err);
508         }
509 #endif
510         unlock_kernel();
511
512         if (rc)
513                 CERROR("error adding MD data to inode %lu: rc = %d\n",
514                        inode->i_ino, rc);
515         return rc;
516 }
517
518 /* Must be called with i_sem held */
519 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
520 {
521         int rc;
522
523         LASSERT(down_trylock(&inode->i_sem) != 0);
524         lock_kernel();
525         /* Keep support for reading "inline EAs" until we convert
526          * users over to new format entirely.  See bug 841/2097. */
527         if (inode->i_blocks == 0 && EXT3_I(inode)->i_data[0]) {
528                 unsigned size = le32_to_cpu(EXT3_I(inode)->i_data[0]);
529                 void *handle;
530
531                 LASSERT(size < sizeof(EXT3_I(inode)->i_data));
532                 if (lmm) {
533                         if (size > lmm_size) {
534                                 CERROR("inline EA on %lu/%u bad size %u > %u\n",
535                                        inode->i_ino, inode->i_generation,
536                                        size, lmm_size);
537                                 return -ERANGE;
538                         }
539                         memcpy(lmm, &EXT3_I(inode)->i_data[1], size);
540                 }
541
542 #ifndef INLINE_EA
543                 /* migrate LOV EA data to external block - keep same format */
544                 CWARN("DEBUG: migrate inline EA for inode %lu/%u to block\n",
545                       inode->i_ino, inode->i_generation);
546
547                 handle = journal_start(EXT3_JOURNAL(inode),
548                                        EXT3_XATTR_TRANS_BLOCKS);
549                 if (!IS_ERR(handle)) {
550                         int err;
551                         rc = fsfilt_ext3_set_md(inode, handle,
552                                                 &EXT3_I(inode)->i_data[1],size);
553                         if (rc == 0) {
554                                 memset(EXT3_I(inode)->i_data, 0,
555                                        sizeof(EXT3_I(inode)->i_data));
556                                 mark_inode_dirty(inode);
557                         }
558                         err = journal_stop(handle);
559                         if (err && rc == 0)
560                                 rc = err;
561                 } else {
562                         rc = PTR_ERR(handle);
563                 }
564 #endif
565                 unlock_kernel();
566                 return size;
567         }
568
569         rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
570                             XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size);
571         /* try old EA type if new one failed - MDS will convert it for us */
572         if (rc == -ENODATA) {
573                 CDEBUG(D_INFO,"failed new LOV EA %d/%s from inode %lu: rc %d\n",
574                        EXT3_XATTR_INDEX_TRUSTED, XATTR_LUSTRE_MDS_LOV_EA,
575                        inode->i_ino, rc);
576
577                 rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_LUSTRE,
578                                     XATTR_LUSTRE_MDS_OBJID, lmm, lmm_size);
579         }
580         unlock_kernel();
581
582         /* This gives us the MD size */
583         if (lmm == NULL)
584                 return (rc == -ENODATA) ? 0 : rc;
585
586         if (rc < 0) {
587                 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
588                        EXT3_XATTR_INDEX_LUSTRE, XATTR_LUSTRE_MDS_OBJID,
589                        inode->i_ino, rc);
590                 memset(lmm, 0, lmm_size);
591                 return (rc == -ENODATA) ? 0 : rc;
592         }
593
594         return rc;
595 }
596
597 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
598 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
599 {
600         submit_bio(rw, bio);
601         return 0;
602 }
603 #else
604 static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct kiobuf *bio)
605 {
606         int rc, blocks_per_page;
607
608         rc = brw_kiovec(rw, 1, &bio, inode->i_dev,
609                         bio->blocks, 1 << inode->i_blkbits);
610
611         blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
612
613         if (rc != (1 << inode->i_blkbits) * bio->nr_pages * blocks_per_page) {
614                 CERROR("short write?  expected %d, wrote %d\n",
615                        (1 << inode->i_blkbits) * bio->nr_pages *
616                        blocks_per_page, rc);
617         }
618
619         return rc;
620 }
621 #endif
622
623 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
624                                     loff_t *off)
625 {
626         struct inode *inode = file->f_dentry->d_inode;
627         int rc = 0;
628
629         if (S_ISREG(inode->i_mode))
630                 rc = file->f_op->read(file, buf, count, off);
631         else {
632                 const int blkbits = inode->i_sb->s_blocksize_bits;
633                 const int blksize = inode->i_sb->s_blocksize;
634
635                 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
636                        count, inode->i_ino, *off);
637                 while (count > 0) {
638                         struct buffer_head *bh;
639
640                         bh = NULL;
641                         if (*off < inode->i_size) {
642                                 int err = 0;
643
644                                 bh = ext3_bread(NULL, inode, *off >> blkbits,
645                                                 0, &err);
646
647                                 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
648
649                                 if (bh) {
650                                         memcpy(buf, bh->b_data, blksize);
651                                         brelse(bh);
652                                 } else if (err) {
653                                         /* XXX in theory we should just fake
654                                          * this buffer and continue like ext3,
655                                          * especially if this is a partial read
656                                          */
657                                         CERROR("error read dir %lu+%llu: %d\n",
658                                                inode->i_ino, *off, err);
659                                         RETURN(err);
660                                 }
661                         }
662                         if (!bh) {
663                                 struct ext3_dir_entry_2 *fake = (void *)buf;
664
665                                 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
666                                 memset(fake, 0, sizeof(*fake));
667                                 fake->rec_len = cpu_to_le32(blksize);
668                         }
669                         count -= blksize;
670                         buf += blksize;
671                         *off += blksize;
672                         rc += blksize;
673                 }
674         }
675
676         return rc;
677 }
678
679 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
680 {
681         struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
682
683         fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
684
685         OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
686         atomic_dec(&fcb_cache_count);
687 }
688
689 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,
690                                       void *handle, fsfilt_cb_t cb_func,
691                                       void *cb_data)
692 {
693         struct fsfilt_cb_data *fcb;
694
695         OBD_SLAB_ALLOC(fcb, fcb_cache, GFP_NOFS, sizeof *fcb);
696         if (fcb == NULL)
697                 RETURN(-ENOMEM);
698
699         atomic_inc(&fcb_cache_count);
700         fcb->cb_func = cb_func;
701         fcb->cb_obd = obd;
702         fcb->cb_last_rcvd = last_rcvd;
703         fcb->cb_data = cb_data;
704
705         CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
706         lock_24kernel();
707         journal_callback_set(handle, fsfilt_ext3_cb_func,
708                              (struct journal_callback *)fcb);
709         unlock_24kernel();
710
711         return 0;
712 }
713
714 /*
715  * We need to hack the return value for the free inode counts because
716  * the current EA code requires one filesystem block per inode with EAs,
717  * so it is possible to run out of blocks before we run out of inodes.
718  *
719  * This can be removed when the ext3 EA code is fixed.
720  */
721 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
722 {
723         struct kstatfs sfs;
724         int rc;
725
726         memset(&sfs, 0, sizeof(sfs));
727
728         rc = sb->s_op->statfs(sb, &sfs);
729
730         if (!rc && sfs.f_bfree < sfs.f_ffree) {
731                 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
732                 sfs.f_ffree = sfs.f_bfree;
733         }
734
735         statfs_pack(osfs, &sfs);
736         return rc;
737 }
738
739 static int fsfilt_ext3_sync(struct super_block *sb)
740 {
741         return ext3_force_commit(sb);
742 }
743
744 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
745 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
746 #define ext3_up_truncate_sem(inode)  up_write(&EXT3_I(inode)->truncate_sem);
747 #define ext3_down_truncate_sem(inode)  down_write(&EXT3_I(inode)->truncate_sem);
748 #else
749 #define ext3_up_truncate_sem(inode)  up(&EXT3_I(inode)->truncate_sem);
750 #define ext3_down_truncate_sem(inode)  down(&EXT3_I(inode)->truncate_sem);
751 #endif
752                                                                                                                              
753 #include <linux/lustre_version.h>
754 #if EXT3_EXT_MAGIC == 0xf301
755 #define ee_start e_start
756 #define ee_block e_block
757 #define ee_len   e_num
758 #endif
759 #ifndef EXT3_BB_MAX_BLOCKS
760 #define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \
761         ext3_new_blocks(handle, inode, count, goal, err)
762 #endif
763
764 struct bpointers {
765         unsigned long *blocks;
766         int *created;
767         unsigned long start;
768         int num;
769         int init_num;
770         int create;
771 };
772
773 static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
774                                 unsigned long block, int *aflags)
775 {
776         struct ext3_inode_info *ei = EXT3_I(inode);
777         unsigned long bg_start;
778         unsigned long colour;
779         int depth;
780                                                                                                                              
781         if (path) {
782                 struct ext3_extent *ex;
783                 depth = path->p_depth;
784                                                                                                                              
785                 /* try to predict block placement */
786                 if ((ex = path[depth].p_ext)) {
787 #if 0
788                         /* This prefers to eat into a contiguous extent
789                          * rather than find an extent that the whole
790                          * request will fit into.  This can fragment data
791                          * block allocation and prevents our lovely 1M I/Os
792                          * from reaching the disk intact. */
793                         if (ex->ee_block + ex->ee_len == block)
794                                 *aflags |= 1;
795 #endif
796                         return ex->ee_start + (block - ex->ee_block);
797                 }
798                                                                                                                              
799                 /* it looks index is empty
800                  * try to find starting from index itself */
801                 if (path[depth].p_bh)
802                         return path[depth].p_bh->b_blocknr;
803         }
804         
805         /* OK. use inode's group */
806         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
807                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
808         colour = (current->pid % 16) *
809                         (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
810         return bg_start + colour + block;
811 }
812
813 static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
814                                   struct ext3_ext_path *path,
815                                   struct ext3_extent *newex, int exist)
816 {
817         struct inode *inode = tree->inode;
818         struct bpointers *bp = tree->private;
819         int count, err, goal;
820         unsigned long pblock;
821         unsigned long tgen;
822         loff_t new_i_size;
823         handle_t *handle;
824         int i, aflags = 0;
825                                                                                                                              
826         i = EXT_DEPTH(tree);
827         EXT_ASSERT(i == path->p_depth);
828         EXT_ASSERT(path[i].p_hdr);
829                                                                                                                              
830         if (exist) {
831                 err = EXT_CONTINUE;
832                 goto map;
833         }
834                                                                                                                              
835         if (bp->create == 0) {
836                 i = 0;
837                 if (newex->ee_block < bp->start)
838                         i = bp->start - newex->ee_block;
839                 if (i >= newex->ee_len)
840                         CERROR("nothing to do?! i = %d, e_num = %u\n",
841                                         i, newex->ee_len);
842                 for (; i < newex->ee_len && bp->num; i++) {
843                         *(bp->created) = 0;
844                         *(bp->created) = 0;
845                         bp->created++;
846                         *(bp->blocks) = 0;
847                         bp->blocks++;
848                         bp->num--;
849                         bp->start++;
850                 }
851                                                                                                                              
852                 return EXT_CONTINUE;
853         }
854                                                                                                                              
855         tgen = EXT_GENERATION(tree);
856         count = ext3_ext_calc_credits_for_insert(tree, path);
857         ext3_up_truncate_sem(inode);
858                                                                                                                              
859         lock_24kernel();
860         handle = journal_start(EXT3_JOURNAL(inode), count + EXT3_ALLOC_NEEDED + 1);
861         unlock_24kernel();
862         if (IS_ERR(handle)) {
863                 ext3_down_truncate_sem(inode);
864                 return PTR_ERR(handle);
865         }
866         
867         if (tgen != EXT_GENERATION(tree)) {
868                 /* the tree has changed. so path can be invalid at moment */
869                 lock_24kernel();
870                 journal_stop(handle);
871                 unlock_24kernel();
872                 ext3_down_truncate_sem(inode);
873                 return EXT_REPEAT;
874         }
875                                                                                                                              
876         ext3_down_truncate_sem(inode);
877         count = newex->ee_len;
878         goal = ext3_ext_find_goal(inode, path, newex->ee_block, &aflags);
879         aflags |= 2; /* block have been already reserved */
880         pblock = ext3_mb_new_blocks(handle, inode, goal, &count, aflags, &err);
881         if (!pblock)
882                 goto out;
883         EXT_ASSERT(count <= newex->ee_len);
884                                                                                                                              
885         /* insert new extent */
886         newex->ee_start = pblock;
887         newex->ee_len = count;
888         err = ext3_ext_insert_extent(handle, tree, path, newex);
889         if (err)
890                 goto out;
891
892         /* correct on-disk inode size */
893         if (newex->ee_len > 0) {
894                 new_i_size = (loff_t) newex->ee_block + newex->ee_len;
895                 new_i_size = new_i_size << inode->i_blkbits;
896                 if (new_i_size > EXT3_I(inode)->i_disksize) {
897                         EXT3_I(inode)->i_disksize = new_i_size;
898                         err = ext3_mark_inode_dirty(handle, inode);
899                 }
900         }
901                                                                                                                              
902 out:
903         lock_24kernel();
904         journal_stop(handle);
905         unlock_24kernel();
906 map:
907         if (err >= 0) {
908                 /* map blocks */
909                 if (bp->num == 0) {
910                         CERROR("hmm. why do we find this extent?\n");
911                         CERROR("initial space: %lu:%u\n",
912                                 bp->start, bp->init_num);
913                         CERROR("current extent: %u/%u/%u %d\n",
914                                 newex->ee_block, newex->ee_len,
915                                 newex->ee_start, exist);
916                 }
917                 i = 0;
918                 if (newex->ee_block < bp->start)
919                         i = bp->start - newex->ee_block;
920                 if (i >= newex->ee_len)
921                         CERROR("nothing to do?! i = %d, e_num = %u\n",
922                                         i, newex->ee_len);
923                 for (; i < newex->ee_len && bp->num; i++) {
924                         *(bp->created) = (exist == 0 ? 1 : 0);
925                         bp->created++;
926                         *(bp->blocks) = newex->ee_start + i;
927                         bp->blocks++;
928                         bp->num--;
929                         bp->start++;
930                 }
931         }
932         return err;
933 }
934
935 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
936                        unsigned long num, unsigned long *blocks,
937                        int *created, int create)
938 {
939         struct ext3_extents_tree tree;
940         struct bpointers bp;
941         int err;
942                                                                                                                              
943         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
944                 block, block + num, (unsigned) inode->i_ino);
945                                                                                                                              
946         ext3_init_tree_desc(&tree, inode);
947         tree.private = &bp;
948         bp.blocks = blocks;
949         bp.created = created;
950         bp.start = block;
951         bp.init_num = bp.num = num;
952         bp.create = create;
953                                                                                                                              
954         ext3_down_truncate_sem(inode);
955         err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
956         ext3_ext_invalidate_cache(&tree);
957         ext3_up_truncate_sem(inode);
958                                                                                                                              
959         return err;
960 }
961
962 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
963                                     int pages, unsigned long *blocks,
964                                     int *created, int create)
965 {
966         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
967         int rc = 0, i = 0;
968         struct page *fp = NULL;
969         int clen = 0;
970                                                                                                                              
971         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
972                 inode->i_ino, pages, (*page)->index);
973                                                                                                                              
974         /* pages are sorted already. so, we just have to find
975          * contig. space and process them properly */
976         while (i < pages) {
977                 if (fp == NULL) {
978                         /* start new extent */
979                         fp = *page++;
980                         clen = 1;
981                         i++;
982                         continue;
983                 } else if (fp->index + clen == (*page)->index) {
984                         /* continue the extent */
985                         page++;
986                         clen++;
987                         i++;
988                         continue;
989                 }
990                 
991                 /* process found extent */
992                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
993                                         clen * blocks_per_page, blocks,
994                                         created, create);
995                 if (rc)
996                         GOTO(cleanup, rc);
997                                                                                                                              
998                 /* look for next extent */
999                 fp = NULL;
1000                 blocks += blocks_per_page * clen;
1001                 created += blocks_per_page * clen;
1002         }
1003                                                                                                                              
1004         if (fp)
1005                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
1006                                         clen * blocks_per_page, blocks,
1007                                         created, create);
1008 cleanup:
1009         return rc;
1010 }
1011 #endif
1012
1013 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
1014                                unsigned long *blocks, int *created, int create);
1015 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
1016                                    int pages, unsigned long *blocks,
1017                                    int *created, int create)
1018 {
1019         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1020         unsigned long *b;
1021         int rc = 0, i, *cr;
1022                                                                                                                              
1023         for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
1024                 rc = ext3_map_inode_page(inode, *page, b, cr, create);
1025                 if (rc) {
1026                         CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
1027                                inode->i_ino, *b, *cr, create, rc);
1028                         break;
1029                 }
1030
1031                 b += blocks_per_page;
1032                 cr += blocks_per_page;
1033         }
1034         return rc;
1035 }
1036
1037 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
1038                                 int pages, unsigned long *blocks,
1039                                 int *created, int create,
1040                                 struct semaphore *optional_sem)
1041 {
1042         int rc;
1043 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
1044         if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
1045                 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
1046                                                      blocks, created, create);
1047                 return rc;
1048         }
1049 #endif
1050         if (optional_sem != NULL)
1051                 down(optional_sem);
1052         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
1053                                             created, create);
1054         if (optional_sem != NULL)
1055                 up(optional_sem);
1056
1057         return rc;
1058 }
1059
1060 extern int ext3_prep_san_write(struct inode *inode, long *blocks,
1061                                int nblocks, loff_t newsize);
1062 static int fsfilt_ext3_prep_san_write(struct inode *inode, long *blocks,
1063                                       int nblocks, loff_t newsize)
1064 {
1065         return ext3_prep_san_write(inode, blocks, nblocks, newsize);
1066 }
1067
1068 static int fsfilt_ext3_read_record(struct file * file, void *buf,
1069                                    int size, loff_t *offs)
1070 {
1071         struct inode *inode = file->f_dentry->d_inode;
1072         unsigned long block;
1073         struct buffer_head *bh;
1074         int err, blocksize, csize, boffs;
1075
1076         /* prevent reading after eof */
1077         lock_kernel();
1078         if (inode->i_size < *offs + size) {
1079                 size = inode->i_size - *offs;
1080                 unlock_kernel();
1081                 if (size < 0) {
1082                         CERROR("size %llu is too short for read %u@%llu\n",
1083                                inode->i_size, size, *offs);
1084                         return -EIO;
1085                 } else if (size == 0) {
1086                         return 0;
1087                 }
1088         } else {
1089                 unlock_kernel();
1090         }
1091
1092         blocksize = 1 << inode->i_blkbits;
1093
1094         while (size > 0) {
1095                 block = *offs >> inode->i_blkbits;
1096                 boffs = *offs & (blocksize - 1);
1097                 csize = min(blocksize - boffs, size);
1098                 bh = ext3_bread(NULL, inode, block, 0, &err);
1099                 if (!bh) {
1100                         CERROR("can't read block: %d\n", err);
1101                         return err;
1102                 }
1103
1104                 memcpy(buf, bh->b_data + boffs, csize);
1105                 brelse(bh);
1106
1107                 *offs += csize;
1108                 buf += csize;
1109                 size -= csize;
1110         }
1111         return 0;
1112 }
1113
1114 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
1115                                     loff_t *offs, int force_sync)
1116 {
1117         struct buffer_head *bh = NULL;
1118         unsigned long block;
1119         struct inode *inode = file->f_dentry->d_inode;
1120         loff_t old_size = inode->i_size, offset = *offs;
1121         loff_t new_size = inode->i_size;
1122         journal_t *journal;
1123         handle_t *handle;
1124         int err, block_count = 0, blocksize, size, boffs;
1125
1126         /* Determine how many transaction credits are needed */
1127         blocksize = 1 << inode->i_blkbits;
1128         block_count = (*offs & (blocksize - 1)) + bufsize;
1129         block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
1130
1131         journal = EXT3_SB(inode->i_sb)->s_journal;
1132         lock_24kernel();
1133         handle = journal_start(journal,
1134                                block_count * EXT3_DATA_TRANS_BLOCKS + 2);
1135         unlock_24kernel();
1136         if (IS_ERR(handle)) {
1137                 CERROR("can't start transaction\n");
1138                 return PTR_ERR(handle);
1139         }
1140
1141         while (bufsize > 0) {
1142                 if (bh != NULL)
1143                         brelse(bh);
1144
1145                 block = offset >> inode->i_blkbits;
1146                 boffs = offset & (blocksize - 1);
1147                 size = min(blocksize - boffs, bufsize);
1148                 bh = ext3_bread(handle, inode, block, 1, &err);
1149                 if (!bh) {
1150                         CERROR("can't read/create block: %d\n", err);
1151                         goto out;
1152                 }
1153
1154                 err = ext3_journal_get_write_access(handle, bh);
1155                 if (err) {
1156                         CERROR("journal_get_write_access() returned error %d\n",
1157                                err);
1158                         goto out;
1159                 }
1160                 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
1161                 memcpy(bh->b_data + boffs, buf, size);
1162                 err = ext3_journal_dirty_metadata(handle, bh);
1163                 if (err) {
1164                         CERROR("journal_dirty_metadata() returned error %d\n",
1165                                err);
1166                         goto out;
1167                 }
1168                 if (offset + size > new_size)
1169                         new_size = offset + size;
1170                 offset += size;
1171                 bufsize -= size;
1172                 buf += size;
1173         }
1174
1175         if (force_sync)
1176                 handle->h_sync = 1; /* recovery likes this */
1177 out:
1178         if (bh)
1179                 brelse(bh);
1180
1181         /* correct in-core and on-disk sizes */
1182         if (new_size > inode->i_size) {
1183                 lock_kernel();
1184                 if (new_size > inode->i_size)
1185                         inode->i_size = new_size;
1186                 if (inode->i_size > EXT3_I(inode)->i_disksize)
1187                         EXT3_I(inode)->i_disksize = inode->i_size;
1188                 if (inode->i_size > old_size)
1189                         mark_inode_dirty(inode);
1190                 unlock_kernel();
1191         }
1192
1193         lock_24kernel();
1194         journal_stop(handle);
1195         unlock_24kernel();
1196
1197         if (err == 0)
1198                 *offs = offset;
1199         return err;
1200 }
1201
1202 static int fsfilt_ext3_setup(struct super_block *sb)
1203 {
1204 #if 0
1205         EXT3_SB(sb)->dx_lock = fsfilt_ext3_dx_lock;
1206         EXT3_SB(sb)->dx_unlock = fsfilt_ext3_dx_unlock;
1207 #endif
1208 #ifdef S_PDIROPS
1209         CWARN("Enabling PDIROPS\n");
1210         set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
1211         sb->s_flags |= S_PDIROPS;
1212 #endif
1213         return 0;
1214 }
1215
1216 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1217    objects. Logs is number of logfiles to update */
1218 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1219 {
1220         if ( !fso ) {
1221                 switch(op) {
1222                 case FSFILT_OP_CREATE:
1223                                  /* directory leaf, index & indirect & EA*/
1224                         return 4 + 3 * logs;
1225                 case FSFILT_OP_UNLINK:
1226                         return 3 * logs;
1227                 }
1228         } else {
1229                 int i;
1230                 int needed = 0;
1231                 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1232                 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
1233                 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1234                 for (i = 0; i < op; i++, fso++) {
1235                         int nblocks = fso->fso_bufcnt * blockpp;
1236                         int ndindirect = min(nblocks, addrpp + 1);
1237                         int nindir = nblocks + ndindirect + 1;
1238
1239                         needed += nindir;
1240                 }
1241                 return needed + 3 * logs;
1242         }
1243
1244         return 0;
1245 }
1246
1247 static struct fsfilt_operations fsfilt_ext3_ops = {
1248         .fs_type                = "ext3",
1249         .fs_owner               = THIS_MODULE,
1250         .fs_start               = fsfilt_ext3_start,
1251         .fs_brw_start           = fsfilt_ext3_brw_start,
1252         .fs_commit              = fsfilt_ext3_commit,
1253         .fs_commit_async        = fsfilt_ext3_commit_async,
1254         .fs_commit_wait         = fsfilt_ext3_commit_wait,
1255         .fs_setattr             = fsfilt_ext3_setattr,
1256         .fs_iocontrol           = fsfilt_ext3_iocontrol,
1257         .fs_set_md              = fsfilt_ext3_set_md,
1258         .fs_get_md              = fsfilt_ext3_get_md,
1259         .fs_readpage            = fsfilt_ext3_readpage,
1260         .fs_add_journal_cb      = fsfilt_ext3_add_journal_cb,
1261         .fs_statfs              = fsfilt_ext3_statfs,
1262         .fs_sync                = fsfilt_ext3_sync,
1263         .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
1264         .fs_prep_san_write      = fsfilt_ext3_prep_san_write,
1265         .fs_write_record        = fsfilt_ext3_write_record,
1266         .fs_read_record         = fsfilt_ext3_read_record,
1267         .fs_setup               = fsfilt_ext3_setup,
1268         .fs_send_bio            = fsfilt_ext3_send_bio,
1269         .fs_get_op_len          = fsfilt_ext3_get_op_len,
1270 };
1271
1272 static int __init fsfilt_ext3_init(void)
1273 {
1274         int rc;
1275
1276         fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
1277                                       sizeof(struct fsfilt_cb_data), 0,
1278                                       0, NULL, NULL);
1279         if (!fcb_cache) {
1280                 CERROR("error allocating fsfilt journal callback cache\n");
1281                 GOTO(out, rc = -ENOMEM);
1282         }
1283
1284         rc = fsfilt_register_ops(&fsfilt_ext3_ops);
1285
1286         if (rc)
1287                 kmem_cache_destroy(fcb_cache);
1288 out:
1289         return rc;
1290 }
1291
1292 static void __exit fsfilt_ext3_exit(void)
1293 {
1294         fsfilt_unregister_ops(&fsfilt_ext3_ops);
1295         LASSERTF(kmem_cache_destroy(fcb_cache) == 0,
1296                  "can't free fsfilt callback cache: count %d\n",
1297                  atomic_read(&fcb_cache_count));
1298 }
1299
1300 module_init(fsfilt_ext3_init);
1301 module_exit(fsfilt_ext3_exit);
1302
1303 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1304 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
1305 MODULE_LICENSE("GPL");