Whamcloud - gitweb
Land b_smallfix onto HEAD (20040512_1806)
[fs/lustre-release.git] / lustre / lvfs / fsfilt_ext3.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/lib/fsfilt_ext3.c
5  *  Lustre filesystem abstraction routines
6  *
7  *  Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   Lustre is free software; you can redistribute it and/or
13  *   modify it under the terms of version 2 of the GNU General Public
14  *   License as published by the Free Software Foundation.
15  *
16  *   Lustre is distributed in the hope that it will be useful,
17  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *   GNU General Public License for more details.
20  *
21  *   You should have received a copy of the GNU General Public License
22  *   along with Lustre; if not, write to the Free Software
23  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25
26 #define DEBUG_SUBSYSTEM S_FILTER
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/fs.h>
31 #include <linux/jbd.h>
32 #include <linux/slab.h>
33 #include <linux/pagemap.h>
34 #include <linux/quotaops.h>
35 #include <linux/ext3_fs.h>
36 #include <linux/ext3_jbd.h>
37 #include <linux/version.h>
38 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
39 #include <linux/ext3_xattr.h>
40 #else
41 #include <ext3/xattr.h>
42 #endif
43
44 #include <linux/kp30.h>
45 #include <linux/lustre_fsfilt.h>
46 #include <linux/obd.h>
47 #include <linux/obd_class.h>
48 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
49 #include <linux/module.h>
50 #include <linux/iobuf.h>
51 #endif
52
53 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
54 #include <linux/ext3_extents.h>
55 #endif
56
57 static kmem_cache_t *fcb_cache;
58 static atomic_t fcb_cache_count = ATOMIC_INIT(0);
59
60 struct fsfilt_cb_data {
61         struct journal_callback cb_jcb; /* jbd private data - MUST BE FIRST */
62         fsfilt_cb_t cb_func;            /* MDS/OBD completion function */
63         struct obd_device *cb_obd;      /* MDS/OBD completion device */
64         __u64 cb_last_rcvd;             /* MDS/OST last committed operation */
65         void *cb_data;                  /* MDS/OST completion function data */
66 };
67
68 #ifndef EXT3_XATTR_INDEX_TRUSTED        /* temporary until we hit l28 kernel */
69 #define EXT3_XATTR_INDEX_TRUSTED        4
70 #endif
71 #define XATTR_LUSTRE_MDS_LOV_EA         "lov"
72
73 /*
74  * We don't currently need any additional blocks for rmdir and
75  * unlink transactions because we are storing the OST oa_id inside
76  * the inode (which we will be changing anyways as part of this
77  * transaction).
78  */
79 static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
80                                int logs)
81 {
82         /* For updates to the last recieved file */
83         int nblocks = EXT3_SINGLEDATA_TRANS_BLOCKS;
84         journal_t *journal;
85         void *handle;
86
87         if (current->journal_info) {
88                 CDEBUG(D_INODE, "increasing refcount on %p\n",
89                        current->journal_info);
90                 goto journal_start;
91         }
92
93         /* XXX BUG 3188 -- must return to one set of opcodes */
94         /* FIXME - cache hook */
95         if (op & 0x20) {
96                 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
97                 op = op & ~0x20;
98         }
99
100         /* FIXME - kml */
101         if (op & 0x10) {
102                 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
103                 op = op & ~0x10;
104         }
105
106         switch(op) {
107         case FSFILT_OP_RMDIR:
108         case FSFILT_OP_UNLINK:
109                 /* delete one file + create/update logs for each stripe */
110                 nblocks += EXT3_DELETE_TRANS_BLOCKS;
111                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
112                             EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
113                 break;
114         case FSFILT_OP_RENAME:
115                 /* modify additional directory */
116                 nblocks += EXT3_SINGLEDATA_TRANS_BLOCKS;
117                 /* no break */
118         case FSFILT_OP_SYMLINK:
119                 /* additional block + block bitmap + GDT for long symlink */
120                 nblocks += 3;
121                 /* no break */
122         case FSFILT_OP_CREATE:
123                 /* create/update logs for each stripe */
124                 nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
125                             EXT3_SINGLEDATA_TRANS_BLOCKS) * logs;
126                 /* no break */
127         case FSFILT_OP_MKDIR:
128         case FSFILT_OP_MKNOD:
129                 /* modify one inode + block bitmap + GDT */
130                 nblocks += 3;
131                 /* no break */
132         case FSFILT_OP_LINK:
133                 /* modify parent directory */
134                 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS +
135                         EXT3_DATA_TRANS_BLOCKS;
136                 break;
137         case FSFILT_OP_SETATTR:
138                 /* Setattr on inode */
139                 nblocks += 1;
140                 break;
141         case FSFILT_OP_CANCEL_UNLINK:
142                 /* blocks for log header bitmap update OR
143                  * blocks for catalog header bitmap update + unlink of logs */
144                 nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
145                         EXT3_DELETE_TRANS_BLOCKS * logs;
146                 break;
147         case FSFILT_OP_NOOP:
148                 nblocks += EXT3_INDEX_EXTRA_TRANS_BLOCKS+EXT3_DATA_TRANS_BLOCKS;
149                 break;
150         default: CERROR("unknown transaction start op %d\n", op);
151                 LBUG();
152         }
153
154         LASSERT(current->journal_info == desc_private);
155         journal = EXT3_SB(inode->i_sb)->s_journal;
156         if (nblocks > journal->j_max_transaction_buffers) {
157                 CERROR("too many credits %d for op %ux%u using %d instead\n",
158                        nblocks, op, logs, journal->j_max_transaction_buffers);
159                 nblocks = journal->j_max_transaction_buffers;
160         }
161
162  journal_start:
163         lock_kernel();
164         handle = journal_start(EXT3_JOURNAL(inode), nblocks);
165         unlock_kernel();
166
167         if (!IS_ERR(handle))
168                 LASSERT(current->journal_info == handle);
169         else
170                 CERROR("error starting handle for op %u (%u credits): rc %ld\n",
171                        op, nblocks, PTR_ERR(handle));
172         return handle;
173 }
174
175 /*
176  * Calculate the number of buffer credits needed to write multiple pages in
177  * a single ext3 transaction.  No, this shouldn't be here, but as yet ext3
178  * doesn't have a nice API for calculating this sort of thing in advance.
179  *
180  * See comment above ext3_writepage_trans_blocks for details.  We assume
181  * no data journaling is being done, but it does allow for all of the pages
182  * being non-contiguous.  If we are guaranteed contiguous pages we could
183  * reduce the number of (d)indirect blocks a lot.
184  *
185  * With N blocks per page and P pages, for each inode we have at most:
186  * N*P indirect
187  * min(N*P, blocksize/4 + 1) dindirect blocks
188  * niocount tindirect
189  *
190  * For the entire filesystem, we have at most:
191  * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
192  * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
193  * objcount inode blocks
194  * 1 superblock
195  * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
196  *
197  * 1 EXT3_DATA_TRANS_BLOCKS for the last_rcvd update.
198  */
199 static int fsfilt_ext3_credits_needed(int objcount, struct fsfilt_objinfo *fso,
200                                       int niocount, struct niobuf_local *nb)
201 {
202         struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
203         __u64 next_indir;
204         const int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
205         int nbitmaps = 0, ngdblocks;
206         int needed = objcount + 1; /* inodes + superblock */
207         int i, j;
208
209         for (i = 0, j = 0; i < objcount; i++, fso++) {
210                 /* two or more dindirect blocks in case we cross boundary */
211                 int ndind = (long)((nb[j + fso->fso_bufcnt - 1].offset -
212                                     nb[j].offset) >>
213                                    sb->s_blocksize_bits) /
214                         (EXT3_ADDR_PER_BLOCK(sb) * EXT3_ADDR_PER_BLOCK(sb));
215                 nbitmaps += min(fso->fso_bufcnt, ndind > 0 ? ndind : 2);
216
217                 /* leaf, indirect, tindirect blocks for first block */
218                 nbitmaps += blockpp + 2;
219
220                 j += fso->fso_bufcnt;
221         }
222
223         next_indir = nb[0].offset +
224                 (EXT3_ADDR_PER_BLOCK(sb) << sb->s_blocksize_bits);
225         for (i = 1; i < niocount; i++) {
226                 if (nb[i].offset >= next_indir) {
227                         nbitmaps++;     /* additional indirect */
228                         next_indir = nb[i].offset +
229                                 (EXT3_ADDR_PER_BLOCK(sb)<<sb->s_blocksize_bits);
230                 } else if (nb[i].offset != nb[i - 1].offset + sb->s_blocksize) {
231                         nbitmaps++;     /* additional indirect */
232                 }
233                 nbitmaps += blockpp;    /* each leaf in different group? */
234         }
235
236         ngdblocks = nbitmaps;
237         if (nbitmaps > EXT3_SB(sb)->s_groups_count)
238                 nbitmaps = EXT3_SB(sb)->s_groups_count;
239         if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
240                 ngdblocks = EXT3_SB(sb)->s_gdb_count;
241
242         needed += nbitmaps + ngdblocks;
243
244         /* last_rcvd update */
245         needed += EXT3_DATA_TRANS_BLOCKS;
246
247 #if defined(CONFIG_QUOTA) && !defined(__x86_64__) /* XXX */
248         /* We assume that there will be 1 bit set in s_dquot.flags for each
249          * quota file that is active.  This is at least true for now.
250          */
251         needed += hweight32(sb_any_quota_enabled(sb)) *
252                 EXT3_SINGLEDATA_TRANS_BLOCKS;
253 #endif
254
255         return needed;
256 }
257
258 /* We have to start a huge journal transaction here to hold all of the
259  * metadata for the pages being written here.  This is necessitated by
260  * the fact that we do lots of prepare_write operations before we do
261  * any of the matching commit_write operations, so even if we split
262  * up to use "smaller" transactions none of them could complete until
263  * all of them were opened.  By having a single journal transaction,
264  * we eliminate duplicate reservations for common blocks like the
265  * superblock and group descriptors or bitmaps.
266  *
267  * We will start the transaction here, but each prepare_write will
268  * add a refcount to the transaction, and each commit_write will
269  * remove a refcount.  The transaction will be closed when all of
270  * the pages have been written.
271  */
272 static void *fsfilt_ext3_brw_start(int objcount, struct fsfilt_objinfo *fso,
273                                    int niocount, struct niobuf_local *nb,
274                                    void *desc_private, int logs)
275 {
276         journal_t *journal;
277         handle_t *handle;
278         int needed;
279         ENTRY;
280
281         LASSERT(current->journal_info == desc_private);
282         journal = EXT3_SB(fso->fso_dentry->d_inode->i_sb)->s_journal;
283         needed = fsfilt_ext3_credits_needed(objcount, fso, niocount, nb);
284
285         /* The number of blocks we could _possibly_ dirty can very large.
286          * We reduce our request if it is absurd (and we couldn't get that
287          * many credits for a single handle anyways).
288          *
289          * At some point we have to limit the size of I/Os sent at one time,
290          * increase the size of the journal, or we have to calculate the
291          * actual journal requirements more carefully by checking all of
292          * the blocks instead of being maximally pessimistic.  It remains to
293          * be seen if this is a real problem or not.
294          */
295         if (needed > journal->j_max_transaction_buffers) {
296                 CERROR("want too many journal credits (%d) using %d instead\n",
297                        needed, journal->j_max_transaction_buffers);
298                 needed = journal->j_max_transaction_buffers;
299         }
300
301         lock_kernel();
302         handle = journal_start(journal, needed);
303         unlock_kernel();
304         if (IS_ERR(handle)) {
305                 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
306                        PTR_ERR(handle));
307         } else {
308                 LASSERT(handle->h_buffer_credits >= needed);
309                 LASSERT(current->journal_info == handle);
310         }
311
312         RETURN(handle);
313 }
314
315 static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
316 {
317         int rc;
318         handle_t *handle = h;
319
320         LASSERT(current->journal_info == handle);
321         if (force_sync)
322                 handle->h_sync = 1; /* recovery likes this */
323
324         lock_kernel();
325         rc = journal_stop(handle);
326         unlock_kernel();
327
328         return rc;
329 }
330
331 static int fsfilt_ext3_commit_async(struct inode *inode, void *h,
332                                     void **wait_handle)
333 {
334         unsigned long tid;
335         transaction_t *transaction;
336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
337         unsigned long rtid;
338 #endif
339         handle_t *handle = h;
340         journal_t *journal;
341         int rc;
342
343         LASSERT(current->journal_info == handle);
344
345         lock_kernel();
346         transaction = handle->h_transaction;
347         journal = transaction->t_journal;
348         tid = transaction->t_tid;
349         /* we don't want to be blocked */
350         handle->h_sync = 0;
351         rc = journal_stop(handle);
352         if (rc) {
353                 CERROR("error while stopping transaction: %d\n", rc);
354                 unlock_kernel();
355                 return rc;
356         }
357 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
358         rtid = log_start_commit(journal, transaction);
359         if (rtid != tid)
360                 CERROR("strange race: %lu != %lu\n",
361                        (unsigned long) tid, (unsigned long) rtid);
362 #else
363         log_start_commit(journal, transaction->t_tid);
364 #endif
365         unlock_kernel();
366
367         *wait_handle = (void *) tid;
368         CDEBUG(D_INODE, "commit async: %lu\n", (unsigned long) tid);
369         return 0;
370 }
371
372 static int fsfilt_ext3_commit_wait(struct inode *inode, void *h)
373 {
374         tid_t tid = (tid_t)(long)h;
375
376         CDEBUG(D_INODE, "commit wait: %lu\n", (unsigned long) tid);
377         if (is_journal_aborted(EXT3_JOURNAL(inode)))
378                 return -EIO;
379
380         log_wait_commit(EXT3_JOURNAL(inode), tid);
381
382         return 0;
383 }
384
385 static int fsfilt_ext3_setattr(struct dentry *dentry, void *handle,
386                                struct iattr *iattr, int do_trunc)
387 {
388         struct inode *inode = dentry->d_inode;
389         int rc;
390
391         lock_kernel();
392
393         /* A _really_ horrible hack to avoid removing the data stored
394          * in the block pointers; this is really the "small" stripe MD data.
395          * We can avoid further hackery by virtue of the MDS file size being
396          * zero all the time (which doesn't invoke block truncate at unlink
397          * time), so we assert we never change the MDS file size from zero. */
398         if (iattr->ia_valid & ATTR_SIZE && !do_trunc) {
399                 /* ATTR_SIZE would invoke truncate: clear it */
400                 iattr->ia_valid &= ~ATTR_SIZE;
401                 EXT3_I(inode)->i_disksize = inode->i_size = iattr->ia_size;
402
403                 /* make sure _something_ gets set - so new inode
404                  * goes to disk (probably won't work over XFS */
405                 if (!(iattr->ia_valid & (ATTR_MODE | ATTR_MTIME | ATTR_CTIME))){
406                         iattr->ia_valid |= ATTR_MODE;
407                         iattr->ia_mode = inode->i_mode;
408                 }
409         }
410
411         /* Don't allow setattr to change file type */
412         iattr->ia_mode = (inode->i_mode & S_IFMT)|(iattr->ia_mode & ~S_IFMT);
413
414         /* We set these flags on the client, but have already checked perms
415          * so don't confuse inode_change_ok. */
416         iattr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
417
418         if (inode->i_op->setattr) {
419                 rc = inode->i_op->setattr(dentry, iattr);
420         } else {
421                 rc = inode_change_ok(inode, iattr);
422                 if (!rc)
423                         rc = inode_setattr(inode, iattr);
424         }
425
426         unlock_kernel();
427
428         return rc;
429 }
430
431 static int fsfilt_ext3_iocontrol(struct inode * inode, struct file *file,
432                                  unsigned int cmd, unsigned long arg)
433 {
434         int rc = 0;
435         ENTRY;
436
437         if (inode->i_fop->ioctl)
438                 rc = inode->i_fop->ioctl(inode, file, cmd, arg);
439         else
440                 RETURN(-ENOTTY);
441
442         RETURN(rc);
443 }
444
445 static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
446                               void *lmm, int lmm_size)
447 {
448         int rc;
449
450         /* keep this when we get rid of OLD_EA (too noisy during conversion) */
451         if (EXT3_I(inode)->i_file_acl /* || large inode EA flag */)
452                 CWARN("setting EA on %lu/%u again... interesting\n",
453                        inode->i_ino, inode->i_generation);
454
455         lock_kernel();
456         rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
457                                    XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size, 0);
458
459         unlock_kernel();
460
461         if (rc)
462                 CERROR("error adding MD data to inode %lu: rc = %d\n",
463                        inode->i_ino, rc);
464         return rc;
465 }
466
467 /* Must be called with i_sem held */
468 static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size)
469 {
470         int rc;
471
472         LASSERT(down_trylock(&inode->i_sem) != 0);
473         lock_kernel();
474
475         rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
476                             XATTR_LUSTRE_MDS_LOV_EA, lmm, lmm_size);
477         unlock_kernel();
478
479         /* This gives us the MD size */
480         if (lmm == NULL)
481                 return (rc == -ENODATA) ? 0 : rc;
482
483         if (rc < 0) {
484                 CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
485                        EXT3_XATTR_INDEX_TRUSTED, XATTR_LUSTRE_MDS_LOV_EA,
486                        inode->i_ino, rc);
487                 memset(lmm, 0, lmm_size);
488                 return (rc == -ENODATA) ? 0 : rc;
489         }
490
491         return rc;
492 }
493
494 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
495 static int fsfilt_ext3_send_bio(struct inode *inode, struct bio *bio)
496 {
497         submit_bio(WRITE, bio);
498         return 0;
499 }
500 #else
501 static int fsfilt_ext3_send_bio(struct inode *inode, struct kiobuf *bio)
502 {
503         int rc, blocks_per_page;
504
505         rc = brw_kiovec(WRITE, 1, &bio, inode->i_dev,
506                         bio->blocks, 1 << inode->i_blkbits);
507
508         blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
509
510         if (rc != (1 << inode->i_blkbits) * bio->nr_pages *
511             blocks_per_page) {
512                 CERROR("short write?  expected %d, wrote %d\n",
513                        (1 << inode->i_blkbits) * bio->nr_pages *
514                        blocks_per_page, rc);
515         }
516
517         return rc;
518 }
519 #endif
520
521 /* FIXME-UMKA: This should be used in 2.6.x io code later. */
522 static struct page *fsfilt_ext3_getpage(struct inode *inode, long int index)
523 {
524         int rc;
525         struct page *page;
526
527         page = grab_cache_page(inode->i_mapping, index);
528         if (page == NULL)
529                 return ERR_PTR(-ENOMEM);
530
531         if (PageUptodate(page)) {
532                 unlock_page(page);
533                 return page;
534         }
535
536         rc = inode->i_mapping->a_ops->readpage(NULL, page);
537         if (rc < 0) {
538                 page_cache_release(page);
539                 return ERR_PTR(rc);
540         }
541
542         return page;
543 }
544
545 static ssize_t fsfilt_ext3_readpage(struct file *file, char *buf, size_t count,
546                                     loff_t *off)
547 {
548         struct inode *inode = file->f_dentry->d_inode;
549         int rc = 0;
550
551         if (S_ISREG(inode->i_mode))
552                 rc = file->f_op->read(file, buf, count, off);
553         else {
554                 const int blkbits = inode->i_sb->s_blocksize_bits;
555                 const int blksize = inode->i_sb->s_blocksize;
556
557                 CDEBUG(D_EXT2, "reading "LPSZ" at dir %lu+%llu\n",
558                        count, inode->i_ino, *off);
559                 while (count > 0) {
560                         struct buffer_head *bh;
561
562                         bh = NULL;
563                         if (*off < inode->i_size) {
564                                 int err = 0;
565
566                                 bh = ext3_bread(NULL, inode, *off >> blkbits,
567                                                 0, &err);
568
569                                 CDEBUG(D_EXT2, "read %u@%llu\n", blksize, *off);
570
571                                 if (bh) {
572                                         memcpy(buf, bh->b_data, blksize);
573                                         brelse(bh);
574                                 } else if (err) {
575                                         /* XXX in theory we should just fake
576                                          * this buffer and continue like ext3,
577                                          * especially if this is a partial read
578                                          */
579                                         CERROR("error read dir %lu+%llu: %d\n",
580                                                inode->i_ino, *off, err);
581                                         RETURN(err);
582                                 }
583                         }
584                         if (!bh) {
585                                 struct ext3_dir_entry_2 *fake = (void *)buf;
586
587                                 CDEBUG(D_EXT2, "fake %u@%llu\n", blksize, *off);
588                                 memset(fake, 0, sizeof(*fake));
589                                 fake->rec_len = cpu_to_le32(blksize);
590                         }
591                         count -= blksize;
592                         buf += blksize;
593                         *off += blksize;
594                         rc += blksize;
595                 }
596         }
597
598         return rc;
599 }
600
601 static void fsfilt_ext3_cb_func(struct journal_callback *jcb, int error)
602 {
603         struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;
604
605         fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);
606
607         OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);
608         atomic_dec(&fcb_cache_count);
609 }
610
611 static int fsfilt_ext3_add_journal_cb(struct obd_device *obd,
612                                       struct super_block *sb,
613                                       __u64 last_rcvd,
614                                       void *handle, fsfilt_cb_t cb_func,
615                                       void *cb_data)
616 {
617         struct fsfilt_cb_data *fcb;
618
619         OBD_SLAB_ALLOC(fcb, fcb_cache, GFP_NOFS, sizeof *fcb);
620         if (fcb == NULL)
621                 RETURN(-ENOMEM);
622
623         atomic_inc(&fcb_cache_count);
624         fcb->cb_func = cb_func;
625         fcb->cb_obd = obd;
626         fcb->cb_last_rcvd = last_rcvd;
627         fcb->cb_data = cb_data;
628
629         CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);
630         lock_kernel();
631         journal_callback_set(handle, fsfilt_ext3_cb_func,
632                              (struct journal_callback *)fcb);
633         unlock_kernel();
634
635         return 0;
636 }
637
638 /*
639  * We need to hack the return value for the free inode counts because
640  * the current EA code requires one filesystem block per inode with EAs,
641  * so it is possible to run out of blocks before we run out of inodes.
642  *
643  * This can be removed when the ext3 EA code is fixed.
644  */
645 static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs)
646 {
647         struct kstatfs sfs;
648         int rc;
649
650         memset(&sfs, 0, sizeof(sfs));
651
652         rc = sb->s_op->statfs(sb, &sfs);
653
654         if (!rc && sfs.f_bfree < sfs.f_ffree) {
655                 sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;
656                 sfs.f_ffree = sfs.f_bfree;
657         }
658
659         statfs_pack(osfs, &sfs);
660         return rc;
661 }
662
663 static int fsfilt_ext3_sync(struct super_block *sb)
664 {
665         return ext3_force_commit(sb);
666 }
667
668 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
669 struct bpointers {
670         unsigned long *blocks;
671         int *created;
672         unsigned long start;
673         int num;
674         int init_num;
675 };
676
677 static int ext3_ext_new_extent_cb(struct ext3_extents_tree *tree,
678                                   struct ext3_ext_path *path,
679                                   struct ext3_extent *newex, int exist)
680 {
681         struct inode *inode = tree->inode;
682         struct bpointers *bp = tree->private;
683         int count, err, goal;
684         unsigned long pblock;
685         unsigned long tgen;
686         loff_t new_i_size;
687         handle_t *handle;
688         int i;
689
690         i = EXT_DEPTH(tree);
691         EXT_ASSERT(i == path->p_depth);
692         EXT_ASSERT(path[i].p_hdr);
693
694         if (exist) {
695                 err = EXT_CONTINUE;
696                 goto map;
697         }
698
699         tgen = EXT_GENERATION(tree);
700         count = ext3_ext_calc_credits_for_insert(tree, path);
701         up_write(&EXT3_I(inode)->truncate_sem);
702
703         handle = ext3_journal_start(inode, count + EXT3_ALLOC_NEEDED + 1);
704         if (IS_ERR(handle)) {
705                 down_write(&EXT3_I(inode)->truncate_sem);
706                 return PTR_ERR(handle);
707         }
708
709         if (tgen != EXT_GENERATION(tree)) {
710                 /* the tree has changed. so path can be invalid at moment */
711                 ext3_journal_stop(handle, inode);
712                 down_write(&EXT3_I(inode)->truncate_sem);
713                 return EXT_REPEAT;
714         }
715
716         down_write(&EXT3_I(inode)->truncate_sem);
717         goal = ext3_ext_find_goal(inode, path, newex->e_block);
718         count = newex->e_num;
719         pblock = ext3_new_blocks(handle, inode, &count, goal, &err);
720         if (!pblock)
721                 goto out;
722         EXT_ASSERT(count <= newex->e_num);
723
724         /* insert new extent */
725         newex->e_start = pblock;
726         newex->e_num = count;
727         err = ext3_ext_insert_extent(handle, tree, path, newex);
728         if (err)
729                 goto out;
730
731         /* correct on-disk inode size */
732         if (newex->e_num > 0) {
733                 new_i_size = (loff_t) newex->e_block + newex->e_num;
734                 new_i_size = new_i_size << inode->i_blkbits;
735                 if (new_i_size > EXT3_I(inode)->i_disksize) {
736                         EXT3_I(inode)->i_disksize = new_i_size;
737                         err = ext3_mark_inode_dirty(handle, inode);
738                 }
739         }
740
741 out:
742         ext3_journal_stop(handle, inode);
743 map:
744         if (err >= 0) {
745                 /* map blocks */
746                 if (bp->num == 0) {
747                         CERROR("hmm. why do we find this extent?\n");
748                         CERROR("initial space: %lu:%u\n",
749                                 bp->start, bp->init_num);
750                         CERROR("current extent: %u/%u/%u %d\n",
751                                 newex->e_block, newex->e_num,
752                                 newex->e_start, exist);
753                 }
754                 i = 0;
755                 if (newex->e_block < bp->start)
756                         i = bp->start - newex->e_block;
757                 if (i >= newex->e_num)
758                         CERROR("nothing to do?! i = %d, e_num = %u\n",
759                                         i, newex->e_num);
760                 for (; i < newex->e_num && bp->num; i++) {
761                         *(bp->created) = (exist == 0 ? 1 : 0);
762                         bp->created++;
763                         *(bp->blocks) = newex->e_start + i;
764                         bp->blocks++;
765                         bp->num--;
766                 }
767         }
768         return err;
769 }
770
771 int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
772                        unsigned long num, unsigned long *blocks,
773                        int *created, int create)
774 {
775         struct ext3_extents_tree tree;
776         struct bpointers bp;
777         int err;
778
779         CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
780                 block, block + num, (unsigned) inode->i_ino);
781
782         ext3_init_tree_desc(&tree, inode);
783         tree.private = &bp;
784         bp.blocks = blocks;
785         bp.created = created;
786         bp.start = block;
787         bp.init_num = bp.num = num;
788
789         down_write(&EXT3_I(inode)->truncate_sem);
790         err = ext3_ext_walk_space(&tree, block, num, ext3_ext_new_extent_cb);
791         ext3_ext_invalidate_cache(&tree);
792         up_write(&EXT3_I(inode)->truncate_sem);
793
794         return err;
795 }
796
797 int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
798                                     int pages, unsigned long *blocks,
799                                     int *created, int create)
800 {
801         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
802         int rc = 0, i = 0;
803         struct page *fp = NULL;
804         int clen = 0;
805
806         CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
807                 inode->i_ino, pages, (*page)->index);
808
809         /* pages are sorted already. so, we just have to find
810          * contig. space and process them properly */
811         while (i < pages) {
812                 if (fp == NULL) {
813                         /* start new extent */
814                         fp = *page++;
815                         clen = 1;
816                         i++;
817                         continue;
818                 } else if (fp->index + clen == (*page)->index) {
819                         /* continue the extent */
820                         page++;
821                         clen++;
822                         i++;
823                         continue;
824                 }
825
826                 /* process found extent */
827                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
828                                         clen * blocks_per_page, blocks,
829                                         created, create);
830                 if (rc)
831                         GOTO(cleanup, rc);
832
833                 /* look for next extent */
834                 fp = NULL;
835                 blocks += blocks_per_page * clen;
836                 created += blocks_per_page * clen;
837         }
838
839         if (fp)
840                 rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
841                                         clen * blocks_per_page, blocks,
842                                         created, create);
843 cleanup:
844         return rc;
845 }
846 #endif
847
848 extern int ext3_map_inode_page(struct inode *inode, struct page *page,
849                                unsigned long *blocks, int *created, int create);
850 int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
851                                    int pages, unsigned long *blocks,
852                                    int *created, int create)
853 {
854         int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
855         unsigned long *b;
856         int rc = 0, i, *cr;
857
858         for (i = 0, cr = created, b = blocks; i < pages; i++, page++) {
859                 rc = ext3_map_inode_page(inode, *page, b, cr, create);
860                 if (rc) {
861                         CERROR("ino %lu, blk %lu cr %u create %d: rc %d\n",
862                                inode->i_ino, *b, *cr, create, rc);
863                         break;
864                 }
865
866                 b += blocks_per_page;
867                 cr += blocks_per_page;
868         }
869         return rc;
870 }
871
872 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
873                                 int pages, unsigned long *blocks,
874                                 int *created, int create,
875                                 struct semaphore *optional_sem)
876 {
877         int rc;
878 #ifdef EXT3_MULTIBLOCK_ALLOCATOR
879         if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
880                 rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
881                                                      blocks, created, create);
882                 return rc;
883         }
884 #endif
885         if (optional_sem != NULL)
886                 down(optional_sem);
887         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
888                                             created, create);
889         if (optional_sem != NULL)
890                 up(optional_sem);
891
892         return rc;
893 }
894
895 extern int ext3_prep_san_write(struct inode *inode, long *blocks,
896                                int nblocks, loff_t newsize);
897 static int fsfilt_ext3_prep_san_write(struct inode *inode, long *blocks,
898                                       int nblocks, loff_t newsize)
899 {
900         return ext3_prep_san_write(inode, blocks, nblocks, newsize);
901 }
902
903 static int fsfilt_ext3_read_record(struct file * file, void *buf,
904                                    int size, loff_t *offs)
905 {
906         struct inode *inode = file->f_dentry->d_inode;
907         unsigned long block;
908         struct buffer_head *bh;
909         int err, blocksize, csize, boffs;
910
911         /* prevent reading after eof */
912         lock_kernel();
913         if (inode->i_size < *offs + size) {
914                 size = inode->i_size - *offs;
915                 unlock_kernel();
916                 if (size < 0) {
917                         CERROR("size %llu is too short for read %u@%llu\n",
918                                inode->i_size, size, *offs);
919                         return -EIO;
920                 } else if (size == 0) {
921                         return 0;
922                 }
923         } else {
924                 unlock_kernel();
925         }
926
927         blocksize = 1 << inode->i_blkbits;
928
929         while (size > 0) {
930                 block = *offs >> inode->i_blkbits;
931                 boffs = *offs & (blocksize - 1);
932                 csize = min(blocksize - boffs, size);
933                 bh = ext3_bread(NULL, inode, block, 0, &err);
934                 if (!bh) {
935                         CERROR("can't read block: %d\n", err);
936                         return err;
937                 }
938
939                 memcpy(buf, bh->b_data + boffs, csize);
940                 brelse(bh);
941
942                 *offs += csize;
943                 buf += csize;
944                 size -= csize;
945         }
946         return 0;
947 }
948
949 static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
950                                     loff_t *offs, int force_sync)
951 {
952         struct buffer_head *bh = NULL;
953         unsigned long block;
954         struct inode *inode = file->f_dentry->d_inode;
955         loff_t old_size = inode->i_size, offset = *offs;
956         loff_t new_size = inode->i_size;
957         journal_t *journal;
958         handle_t *handle;
959         int err, block_count = 0, blocksize, size, boffs;
960
961         /* Determine how many transaction credits are needed */
962         blocksize = 1 << inode->i_blkbits;
963         block_count = (*offs & (blocksize - 1)) + bufsize;
964         block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
965
966         journal = EXT3_SB(inode->i_sb)->s_journal;
967         lock_kernel();
968         handle = journal_start(journal,
969                                block_count * EXT3_DATA_TRANS_BLOCKS + 2);
970         unlock_kernel();
971         if (IS_ERR(handle)) {
972                 CERROR("can't start transaction\n");
973                 return PTR_ERR(handle);
974         }
975
976         while (bufsize > 0) {
977                 if (bh != NULL)
978                         brelse(bh);
979
980                 block = offset >> inode->i_blkbits;
981                 boffs = offset & (blocksize - 1);
982                 size = min(blocksize - boffs, bufsize);
983                 bh = ext3_bread(handle, inode, block, 1, &err);
984                 if (!bh) {
985                         CERROR("can't read/create block: %d\n", err);
986                         goto out;
987                 }
988
989                 err = ext3_journal_get_write_access(handle, bh);
990                 if (err) {
991                         CERROR("journal_get_write_access() returned error %d\n",
992                                err);
993                         goto out;
994                 }
995                 LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
996                 memcpy(bh->b_data + boffs, buf, size);
997                 err = ext3_journal_dirty_metadata(handle, bh);
998                 if (err) {
999                         CERROR("journal_dirty_metadata() returned error %d\n",
1000                                err);
1001                         goto out;
1002                 }
1003                 if (offset + size > new_size)
1004                         new_size = offset + size;
1005                 offset += size;
1006                 bufsize -= size;
1007                 buf += size;
1008         }
1009
1010         if (force_sync)
1011                 handle->h_sync = 1; /* recovery likes this */
1012 out:
1013         if (bh)
1014                 brelse(bh);
1015
1016         /* correct in-core and on-disk sizes */
1017         if (new_size > inode->i_size) {
1018                 lock_kernel();
1019                 if (new_size > inode->i_size)
1020                         inode->i_size = new_size;
1021                 if (inode->i_size > EXT3_I(inode)->i_disksize)
1022                         EXT3_I(inode)->i_disksize = inode->i_size;
1023                 if (inode->i_size > old_size)
1024                         mark_inode_dirty(inode);
1025                 unlock_kernel();
1026         }
1027
1028         lock_kernel();
1029         journal_stop(handle);
1030         unlock_kernel();
1031
1032         if (err == 0)
1033                 *offs = offset;
1034         return err;
1035 }
1036
1037 static int fsfilt_ext3_setup(struct super_block *sb)
1038 {
1039 #if 0
1040         EXT3_SB(sb)->dx_lock = fsfilt_ext3_dx_lock;
1041         EXT3_SB(sb)->dx_unlock = fsfilt_ext3_dx_unlock;
1042 #endif
1043 #ifdef S_PDIROPS
1044         CWARN("Enabling PDIROPS\n");
1045         set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
1046         sb->s_flags |= S_PDIROPS;
1047 #endif
1048         return 0;
1049 }
1050
1051 static int fsfilt_ext3_set_xattr(struct inode * inode, void *handle, char *name,
1052                                  void *buffer, int buffer_size)
1053 {
1054         int rc = 0;
1055
1056         lock_kernel();
1057
1058         rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
1059                                    name, buffer, buffer_size, 0);
1060         unlock_kernel();
1061         if (rc)
1062                 CERROR("set xattr %s from inode %lu: rc %d\n",
1063                        name,  inode->i_ino, rc);
1064         return rc;
1065 }
1066
1067 static int fsfilt_ext3_get_xattr(struct inode *inode, char *name,
1068                                  void *buffer, int buffer_size)
1069 {
1070         int rc = 0;
1071         lock_kernel();
1072
1073         rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
1074                             name, buffer, buffer_size);
1075         unlock_kernel();
1076
1077         if (buffer == NULL)
1078                 return (rc == -ENODATA) ? 0 : rc;
1079         if (rc < 0) {
1080                 CDEBUG(D_INFO, "error getting EA %s from inode %lu: rc %d\n",
1081                        name,  inode->i_ino, rc);
1082                 memset(buffer, 0, buffer_size);
1083                 return (rc == -ENODATA) ? 0 : rc;
1084         }
1085
1086         return rc;
1087 }
1088
1089 /* If fso is NULL, op is FSFILT operation, otherwise op is number of fso
1090    objects. Logs is number of logfiles to update */
1091 static int fsfilt_ext3_get_op_len(int op, struct fsfilt_objinfo *fso, int logs)
1092 {
1093         if ( !fso ) {
1094                 switch(op) {
1095                 case FSFILT_OP_CREATE:
1096                                  /* directory leaf, index & indirect & EA*/
1097                         return 4 + 3 * logs;
1098                 case FSFILT_OP_UNLINK:
1099                         return 3 * logs;
1100                 }
1101         } else {
1102                 int i;
1103                 int needed = 0;
1104                 struct super_block *sb = fso->fso_dentry->d_inode->i_sb;
1105                 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
1106                 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1107                 for (i = 0; i < op; i++, fso++) {
1108                         int nblocks = fso->fso_bufcnt * blockpp;
1109                         int ndindirect = min(nblocks, addrpp + 1);
1110                         int nindir = nblocks + ndindirect + 1;
1111
1112                         needed += nindir;
1113                 }
1114                 return needed + 3 * logs;
1115         }
1116
1117         return 0;
1118 }
1119
1120 static struct fsfilt_operations fsfilt_ext3_ops = {
1121         .fs_type                = "ext3",
1122         .fs_owner               = THIS_MODULE,
1123         .fs_start               = fsfilt_ext3_start,
1124         .fs_brw_start           = fsfilt_ext3_brw_start,
1125         .fs_commit              = fsfilt_ext3_commit,
1126         .fs_commit_async        = fsfilt_ext3_commit_async,
1127         .fs_commit_wait         = fsfilt_ext3_commit_wait,
1128         .fs_setattr             = fsfilt_ext3_setattr,
1129         .fs_iocontrol           = fsfilt_ext3_iocontrol,
1130         .fs_set_md              = fsfilt_ext3_set_md,
1131         .fs_get_md              = fsfilt_ext3_get_md,
1132         .fs_readpage            = fsfilt_ext3_readpage,
1133         .fs_add_journal_cb      = fsfilt_ext3_add_journal_cb,
1134         .fs_statfs              = fsfilt_ext3_statfs,
1135         .fs_sync                = fsfilt_ext3_sync,
1136         .fs_map_inode_pages     = fsfilt_ext3_map_inode_pages,
1137         .fs_prep_san_write      = fsfilt_ext3_prep_san_write,
1138         .fs_write_record        = fsfilt_ext3_write_record,
1139         .fs_read_record         = fsfilt_ext3_read_record,
1140         .fs_setup               = fsfilt_ext3_setup,
1141         .fs_getpage             = fsfilt_ext3_getpage,
1142         .fs_send_bio            = fsfilt_ext3_send_bio,
1143         .fs_set_xattr           = fsfilt_ext3_set_xattr,
1144         .fs_get_xattr           = fsfilt_ext3_get_xattr,
1145         .fs_get_op_len          = fsfilt_ext3_get_op_len,
1146 };
1147
1148 static int __init fsfilt_ext3_init(void)
1149 {
1150         int rc;
1151
1152         fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
1153                                       sizeof(struct fsfilt_cb_data), 0,
1154                                       0, NULL, NULL);
1155         if (!fcb_cache) {
1156                 CERROR("error allocating fsfilt journal callback cache\n");
1157                 GOTO(out, rc = -ENOMEM);
1158         }
1159
1160         rc = fsfilt_register_ops(&fsfilt_ext3_ops);
1161
1162         if (rc)
1163                 kmem_cache_destroy(fcb_cache);
1164 out:
1165         return rc;
1166 }
1167
1168 static void __exit fsfilt_ext3_exit(void)
1169 {
1170         int rc;
1171
1172         fsfilt_unregister_ops(&fsfilt_ext3_ops);
1173         rc = kmem_cache_destroy(fcb_cache);
1174
1175         if (rc || atomic_read(&fcb_cache_count)) {
1176                 CERROR("can't free fsfilt callback cache: count %d, rc = %d\n",
1177                        atomic_read(&fcb_cache_count), rc);
1178         }
1179 }
1180
1181 module_init(fsfilt_ext3_init);
1182 module_exit(fsfilt_ext3_exit);
1183
1184 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1185 MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
1186 MODULE_LICENSE("GPL");