1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter.c
6 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
8 * This code is issued under the GNU General Public License.
9 * See the file COPYING in this distribution
11 * by Peter Braam <braam@clusterfs.com>
12 * and Andreas Dilger <adilger@clusterfs.com>
16 #define DEBUG_SUBSYSTEM S_FILTER
18 #include <linux/module.h>
19 #include <linux/pagemap.h>
21 #include <linux/dcache.h>
22 #include <linux/obd_class.h>
23 #include <linux/lustre_dlm.h>
24 #include <linux/obd_filter.h>
25 #include <linux/ext3_jbd.h>
26 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
27 #include <linux/extN_jbd.h>
29 #include <linux/quotaops.h>
30 #include <linux/init.h>
31 #include <linux/stringify.h>
35 #define FILTER_ROOTINO 2
36 #define FILTER_ROOTINO_STR __stringify(FILTER_ROOTINO)
39 static char *obd_type_by_mode[S_IFMT >> S_SHIFT] = {
41 [S_IFREG >> S_SHIFT] "R",
42 [S_IFDIR >> S_SHIFT] "D",
43 [S_IFCHR >> S_SHIFT] "C",
44 [S_IFBLK >> S_SHIFT] "B",
45 [S_IFIFO >> S_SHIFT] "F",
46 [S_IFSOCK >> S_SHIFT] "S",
47 [S_IFLNK >> S_SHIFT] "L"
50 static inline const char *obd_mode_to_type(int mode)
52 return obd_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
55 /* write the pathname into the string */
56 static int filter_id(char *buf, obd_id id, obd_mode mode)
58 return sprintf(buf, "O/%s/"LPU64, obd_mode_to_type(mode), id);
61 static inline void f_dput(struct dentry *dentry)
63 CDEBUG(D_INODE, "putting %s: %p, count = %d\n",
64 dentry->d_name.name, dentry, atomic_read(&dentry->d_count) - 1);
65 LASSERT(atomic_read(&dentry->d_count) > 0);
69 /* setup the object store with correct subdirectories */
70 static int filter_prep(struct obd_device *obddev)
72 struct obd_run_ctxt saved;
73 struct filter_obd *filter = &obddev->u.filter;
74 struct dentry *dentry;
82 push_ctxt(&saved, &filter->fo_ctxt, NULL);
83 dentry = simple_mkdir(current->fs->pwd, "O", 0700);
84 CDEBUG(D_INODE, "got/created O: %p\n", dentry);
87 CERROR("cannot open/create O: rc = %d\n", rc);
90 filter->fo_dentry_O = dentry;
91 dentry = simple_mkdir(current->fs->pwd, "P", 0700);
92 CDEBUG(D_INODE, "got/created P: %p\n", dentry);
95 CERROR("cannot open/create P: rc = %d\n", rc);
99 dentry = simple_mkdir(current->fs->pwd, "D", 0700);
100 CDEBUG(D_INODE, "got/created D: %p\n", dentry);
101 if (IS_ERR(dentry)) {
102 rc = PTR_ERR(dentry);
103 CERROR("cannot open/create D: rc = %d\n", rc);
107 root = simple_mknod(dentry, FILTER_ROOTINO_STR, S_IFREG | 0755);
111 CERROR("OBD filter: cannot open/create root %d: rc = %d\n",
118 * Create directories and/or get dentries for each object type.
119 * This saves us from having to do multiple lookups for each one.
121 for (mode = 0; mode < (S_IFMT >> S_SHIFT); mode++) {
122 char *type = obd_type_by_mode[mode];
125 filter->fo_dentry_O_mode[mode] = NULL;
128 dentry = simple_mkdir(filter->fo_dentry_O, type, 0700);
129 CDEBUG(D_INODE, "got/created O/%s: %p\n", type, dentry);
130 if (IS_ERR(dentry)) {
131 rc = PTR_ERR(dentry);
132 CERROR("cannot create O/%s: rc = %d\n", type, rc);
133 GOTO(out_O_mode, rc);
135 filter->fo_dentry_O_mode[mode] = dentry;
138 file = filp_open("D/status", O_RDWR | O_CREAT, 0700);
139 if ( !file || IS_ERR(file) ) {
141 CERROR("OBD filter: cannot open/create status %s: rc = %d\n",
143 GOTO(out_O_mode, rc);
146 /* steal operations */
147 inode = file->f_dentry->d_inode;
148 filter->fo_fop = file->f_op;
149 filter->fo_iop = inode->i_op;
150 filter->fo_aops = inode->i_mapping->a_ops;
152 if (inode->i_size == 0) {
153 __u64 disk_lastino = cpu_to_le64(lastino);
154 ssize_t retval = file->f_op->write(file, (char *)&disk_lastino,
155 sizeof(disk_lastino),
157 if (retval != sizeof(disk_lastino)) {
158 CDEBUG(D_INODE, "OBD filter: error writing lastino\n");
160 GOTO(out_O_mode, rc = -EIO);
164 ssize_t retval = file->f_op->read(file, (char *)&disk_lastino,
165 sizeof(disk_lastino),
167 if (retval != sizeof(disk_lastino)) {
168 CDEBUG(D_INODE, "OBD filter: error reading lastino\n");
170 GOTO(out_O_mode, rc = -EIO);
172 lastino = le64_to_cpu(disk_lastino);
174 filter->fo_lastino = lastino;
185 struct dentry *dentry = filter->fo_dentry_O_mode[mode];
188 filter->fo_dentry_O_mode[mode] = NULL;
192 f_dput(filter->fo_dentry_O);
193 filter->fo_dentry_O = NULL;
197 /* cleanup the filter: write last used object id to status file */
198 static void filter_post(struct obd_device *obddev)
200 struct obd_run_ctxt saved;
201 struct filter_obd *filter = &obddev->u.filter;
207 push_ctxt(&saved, &filter->fo_ctxt, NULL);
208 file = filp_open("D/status", O_RDWR | O_CREAT, 0700);
210 CERROR("OBD filter: cannot create status file\n");
215 disk_lastino = cpu_to_le64(filter->fo_lastino);
216 rc = file->f_op->write(file, (char *)&disk_lastino,
217 sizeof(disk_lastino), &file->f_pos);
218 if (rc != sizeof(disk_lastino))
219 CERROR("OBD filter: error writing lastino: rc = %ld\n", rc);
221 rc = filp_close(file, NULL);
223 CERROR("OBD filter: cannot close status file: rc = %ld\n", rc);
225 for (mode = 0; mode < (S_IFMT >> S_SHIFT); mode++) {
226 struct dentry *dentry = filter->fo_dentry_O_mode[mode];
229 filter->fo_dentry_O_mode[mode] = NULL;
232 f_dput(filter->fo_dentry_O);
238 static __u64 filter_next_id(struct obd_device *obddev)
242 spin_lock(&obddev->u.filter.fo_lock);
243 id = ++obddev->u.filter.fo_lastino;
244 spin_unlock(&obddev->u.filter.fo_lock);
246 /* FIXME: write the lastino to disk here */
250 /* how to get files, dentries, inodes from object id's */
251 /* parent i_sem is already held if needed for exclusivity */
252 static struct dentry *filter_fid2dentry(struct obd_device *obddev,
253 struct dentry *dparent,
254 __u64 id, __u32 type)
256 struct super_block *sb = obddev->u.filter.fo_sb;
257 struct dentry *dchild;
262 if (!sb || !sb->s_dev) {
263 CERROR("fatal: device not initialized.\n");
264 RETURN(ERR_PTR(-ENXIO));
268 CERROR("fatal: invalid object #0\n");
270 RETURN(ERR_PTR(-ESTALE));
273 if (!(type & S_IFMT)) {
274 CERROR("OBD %s, object "LPU64" has bad type: %o\n",
275 __FUNCTION__, id, type);
276 RETURN(ERR_PTR(-EINVAL));
279 len = sprintf(name, LPU64, id);
280 CDEBUG(D_INODE, "opening object O/%s/%s\n", obd_mode_to_type(type),
282 dchild = lookup_one_len(name, dparent, len);
283 if (IS_ERR(dchild)) {
284 CERROR("child lookup error %ld\n", PTR_ERR(dchild));
288 CDEBUG(D_INODE, "got child obj O/%s/%s: %p, count = %d\n",
289 obd_mode_to_type(type), name, dchild,
290 atomic_read(&dchild->d_count));
292 LASSERT(atomic_read(&dchild->d_count) > 0);
297 static struct file *filter_obj_open(struct obd_device *obddev,
298 __u64 id, __u32 type)
300 struct super_block *sb = obddev->u.filter.fo_sb;
301 struct obd_run_ctxt saved;
306 if (!sb || !sb->s_dev) {
307 CERROR("fatal: device not initialized.\n");
308 RETURN(ERR_PTR(-ENXIO));
312 CERROR("fatal: invalid obdo "LPU64"\n", id);
313 RETURN(ERR_PTR(-ESTALE));
316 if (!(type & S_IFMT)) {
317 CERROR("OBD %s, object "LPU64" has bad type: %o\n",
318 __FUNCTION__, id, type);
319 RETURN(ERR_PTR(-EINVAL));
322 filter_id(name, id, type);
323 push_ctxt(&saved, &obddev->u.filter.fo_ctxt, NULL);
324 file = filp_open(name, O_RDONLY | O_LARGEFILE, 0 /* type? */);
327 CDEBUG(D_INODE, "opening obdo %s: rc = %p\n", name, file);
334 static struct dentry *filter_parent(struct obd_device *obddev, obd_mode mode)
336 struct filter_obd *filter = &obddev->u.filter;
338 return filter->fo_dentry_O_mode[(mode & S_IFMT) >> S_SHIFT];
343 static int filter_connect(struct lustre_handle *conn, struct obd_device *obd,
344 obd_uuid_t cluuid, struct recovd_obd *recovd,
345 ptlrpc_recovery_cb_t recover)
350 rc = class_connect(conn, obd, cluuid);
356 static int filter_disconnect(struct lustre_handle *conn)
358 struct obd_export *export = class_conn2export(conn);
362 ldlm_cancel_locks_for_export(export);
364 rc = class_disconnect(conn);
368 /* XXX cleanup preallocated inodes */
372 /* mount the file system (secretly) */
373 static int filter_setup(struct obd_device *obddev, obd_count len, void *buf)
375 struct obd_ioctl_data* data = buf;
376 struct filter_obd *filter;
377 struct vfsmount *mnt;
381 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2)
385 mnt = do_kern_mount(data->ioc_inlbuf2, 0, data->ioc_inlbuf1, NULL);
390 filter = &obddev->u.filter;;
391 filter->fo_vfsmnt = mnt;
392 filter->fo_fstype = strdup(data->ioc_inlbuf2);
393 filter->fo_sb = mnt->mnt_root->d_inode->i_sb;
394 /* XXX is this even possible if do_kern_mount succeeded? */
396 GOTO(err_kfree, err = -ENODEV);
398 OBD_SET_CTXT_MAGIC(&filter->fo_ctxt);
399 filter->fo_ctxt.pwdmnt = mnt;
400 filter->fo_ctxt.pwd = mnt->mnt_root;
401 filter->fo_ctxt.fs = get_ds();
403 err = filter_prep(obddev);
405 GOTO(err_kfree, err);
406 spin_lock_init(&filter->fo_lock);
408 obddev->obd_namespace =
409 ldlm_namespace_new("filter-tgt", LDLM_NAMESPACE_SERVER);
410 if (obddev->obd_namespace == NULL)
413 ptlrpc_init_client(LDLM_REQUEST_PORTAL, LDLM_REPLY_PORTAL,
414 "filter_ldlm_client", &obddev->obd_ldlm_client);
419 kfree(filter->fo_fstype);
421 mntput(filter->fo_vfsmnt);
431 static int filter_cleanup(struct obd_device * obddev)
433 struct super_block *sb;
436 if (!list_empty(&obddev->obd_exports)) {
437 CERROR("still has clients!\n");
438 class_disconnect_all(obddev);
439 if (!list_empty(&obddev->obd_exports)) {
440 CERROR("still has exports after forced cleanup?\n");
445 ldlm_namespace_free(obddev->obd_namespace);
447 sb = obddev->u.filter.fo_sb;
448 if (!obddev->u.filter.fo_sb)
453 shrink_dcache_parent(sb->s_root);
455 mntput(obddev->u.filter.fo_vfsmnt);
456 obddev->u.filter.fo_sb = 0;
457 kfree(obddev->u.filter.fo_fstype);
466 static inline void filter_from_inode(struct obdo *oa, struct inode *inode,
469 int type = oa->o_mode & S_IFMT;
472 CDEBUG(D_INFO, "src inode %ld (%p), dst obdo %ld valid 0x%08x\n",
473 inode->i_ino, inode, (long)oa->o_id, valid);
474 /* Don't copy the inode number in place of the object ID */
475 obdo_from_inode(oa, inode, valid);
476 oa->o_mode &= ~S_IFMT;
479 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
480 obd_rdev rdev = kdev_t_to_nr(inode->i_rdev);
482 oa->o_valid |= OBD_MD_FLRDEV;
488 static int filter_getattr(struct lustre_handle *conn, struct obdo *oa,
489 struct lov_stripe_md *md)
491 struct obd_device *obddev = class_conn2obd(conn);
492 struct dentry *dentry;
496 if (!class_conn2export(conn)) {
497 CDEBUG(D_IOCTL, "fatal: invalid client "LPX64"\n", conn->addr);
501 obddev = class_conn2obd(conn);
502 dentry = filter_fid2dentry(obddev, filter_parent(obddev, oa->o_mode),
503 oa->o_id, oa->o_mode);
505 RETURN(PTR_ERR(dentry));
507 if (!dentry->d_inode) {
508 CERROR("getattr on non-existent object: "LPU64"\n", oa->o_id);
509 GOTO(out_getattr, rc = -ENOENT);
512 filter_from_inode(oa, dentry->d_inode, oa->o_valid);
519 static int filter_setattr(struct lustre_handle *conn, struct obdo *oa,
520 struct lov_stripe_md *md)
522 struct obd_run_ctxt saved;
523 struct obd_device *obd = class_conn2obd(conn);
524 struct dentry *dentry;
530 iattr_from_obdo(&iattr, oa, oa->o_valid);
531 iattr.ia_mode = (iattr.ia_mode & ~S_IFMT) | S_IFREG;
532 dentry = filter_fid2dentry(obd, filter_parent(obd, iattr.ia_mode),
533 oa->o_id, iattr.ia_mode);
535 RETURN(PTR_ERR(dentry));
537 inode = dentry->d_inode;
539 CERROR("setattr on non-existent object: "LPU64"\n", oa->o_id);
540 GOTO(out_setattr, rc = -ENOENT);
544 if (iattr.ia_valid & ATTR_SIZE)
546 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
547 if (inode->i_op->setattr)
548 rc = inode->i_op->setattr(dentry, &iattr);
550 rc = inode_setattr(inode, &iattr);
552 if (iattr.ia_valid & ATTR_SIZE) {
554 oa->o_valid = OBD_MD_FLBLOCKS | OBD_MD_FLCTIME | OBD_MD_FLMTIME;
555 obdo_from_inode(oa, inode, oa->o_valid);
564 static int filter_open(struct lustre_handle *conn, struct obdo *oa,
565 struct lov_stripe_md *ea)
567 struct obd_export *export;
568 struct obd_device *obd;
569 struct dentry *dentry;
572 export = class_conn2export(conn);
574 CDEBUG(D_IOCTL, "fatal: invalid client "LPX64"\n", conn->addr);
578 obd = export->exp_obd;
579 dentry = filter_fid2dentry(obd, filter_parent(obd, oa->o_mode),
580 oa->o_id, oa->o_mode);
582 RETURN(PTR_ERR(dentry));
584 if (!dentry->d_inode) {
585 CERROR("opening non-existent objid "LPX64"\n", oa->o_id);
589 filter_from_inode(oa, dentry->d_inode, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
590 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
595 static int filter_close(struct lustre_handle *conn, struct obdo *oa,
596 struct lov_stripe_md *ea)
598 struct obd_device *obd;
599 struct dentry *dentry;
602 obd = class_conn2obd(conn);
604 CDEBUG(D_IOCTL, "fatal: invalid client "LPX64"\n", conn->addr);
608 dentry = filter_fid2dentry(obd, filter_parent(obd, oa->o_mode),
609 oa->o_id, oa->o_mode);
611 RETURN(PTR_ERR(dentry));
612 LASSERT(atomic_read(&dentry->d_count) > 1);
614 f_dput(dentry); /* for the close */
615 f_dput(dentry); /* for this call */
619 static int filter_create(struct lustre_handle* conn, struct obdo *oa,
620 struct lov_stripe_md **ea)
623 struct obd_run_ctxt saved;
626 struct obd_device *obd = class_conn2obd(conn);
627 struct filter_obd *filter = &obd->u.filter;
632 CERROR("invalid client "LPX64"\n", conn->addr);
636 if (!(oa->o_mode && S_IFMT)) {
637 CERROR("OBD %s, object "LPU64" has bad type: %o\n",
638 __FUNCTION__, oa->o_id, oa->o_mode);
642 oa->o_id = filter_next_id(obd);
644 //filter_id(name, oa->o_id, oa->o_mode);
645 sprintf(name, LPU64, oa->o_id);
646 mode = (oa->o_mode & ~S_IFMT) | S_IFREG;
647 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
648 new = simple_mknod(filter->fo_dentry_O_mode[S_IFREG >> S_SHIFT], name,
652 CERROR("Error mknod obj %s, err %ld\n", name, PTR_ERR(new));
656 /* Set flags for fields we have set in the inode struct */
657 oa->o_valid = OBD_MD_FLID | OBD_MD_FLBLKSZ | OBD_MD_FLBLOCKS |
658 OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME;
659 filter_from_inode(oa, new->d_inode, oa->o_valid);
665 static int filter_destroy(struct lustre_handle *conn, struct obdo *oa,
666 struct lov_stripe_md *ea)
668 struct obd_device *obd;
669 struct filter_obd *filter;
670 struct obd_run_ctxt saved;
672 struct dentry *dir_dentry, *object_dentry;
676 obd = class_conn2obd(conn);
678 CERROR("invalid client "LPX64"\n", conn->addr);
682 CDEBUG(D_INODE, "destroying object "LPD64"\n", oa->o_id);
684 dir_dentry = filter_parent(obd, oa->o_mode);
685 down(&dir_dentry->d_inode->i_sem);
687 object_dentry = filter_fid2dentry(obd, dir_dentry, oa->o_id,
689 if (IS_ERR(object_dentry))
690 GOTO(out, rc = -ENOENT);
692 inode = object_dentry->d_inode;
694 CERROR("trying to destroy negative inode "LPX64"!\n", oa->o_id);
695 GOTO(out, rc = -ENOENT);
698 if (inode->i_nlink != 1) {
699 CERROR("destroying inode with nlink = %d\n", inode->i_nlink);
703 inode->i_mode = S_IFREG;
705 if (atomic_read(&inode->i_count) > 1) {
706 #warning FIXME: need to handle open-unlinked case and move to hold dir
707 CERROR("inode has count %d\n", atomic_read(&inode->i_count));
710 filter = &obd->u.filter;
711 push_ctxt(&saved, &filter->fo_ctxt, NULL);
713 rc = vfs_unlink(dir_dentry->d_inode, object_dentry);
715 f_dput(object_dentry);
719 up(&dir_dentry->d_inode->i_sem);
723 /* NB count and offset are used for punch, but not truncate */
724 static int filter_truncate(struct lustre_handle *conn, struct obdo *oa,
725 struct lov_stripe_md *md,
726 obd_off start, obd_off end)
731 if (end != OBD_OBJECT_EOF)
732 CERROR("PUNCH not supported, only truncate works\n");
734 CDEBUG(D_INODE, "calling truncate for object "LPX64", valid = %x, "
735 "o_size = "LPD64"\n", oa->o_id, oa->o_valid, start);
737 error = filter_setattr(conn, oa, NULL);
741 static int filter_pgcache_brw(int cmd, struct lustre_handle *conn,
742 struct lov_stripe_md *lsm, obd_count oa_bufs,
743 struct brw_page *pga, brw_callback_t callback,
744 struct io_cb_data *data)
746 struct obd_run_ctxt saved;
747 struct super_block *sb;
748 int pnum; /* index to pages (bufs) */
749 unsigned long retval;
752 struct obd_device *obd = class_conn2obd(conn);
757 CDEBUG(D_IOCTL, "invalid client "LPX64"\n", conn->addr);
761 sb = obd->u.filter.fo_sb;
762 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
763 pnum = 0; /* pnum indexes buf 0..num_pages */
765 file = filter_obj_open(obd, lsm->lsm_object_id, S_IFREG);
767 GOTO(out, retval = PTR_ERR(file));
769 /* count doubles as retval */
770 for (pg = 0; pg < oa_bufs; pg++) {
771 CDEBUG(D_INODE, "OP %d obdo pgno: (%d) (%ld,"LPU64
772 ") off count ("LPU64",%d)\n",
773 cmd, pnum, file->f_dentry->d_inode->i_ino,
774 pga[pnum].off >> PAGE_CACHE_SHIFT, pga[pnum].off,
775 (int)pga[pnum].count);
776 if (cmd & OBD_BRW_WRITE) {
780 buffer = kmap(pga[pnum].pg);
781 retval = file->f_op->write(file, buffer,
784 kunmap(pga[pnum].pg);
785 CDEBUG(D_INODE, "retval %ld\n", retval);
787 loff_t off = pga[pnum].off;
788 char *buffer = kmap(pga[pnum].pg);
790 if (off >= file->f_dentry->d_inode->i_size) {
791 memset(buffer, 0, pga[pnum].count);
792 retval = pga[pnum].count;
794 retval = file->f_op->read(file, buffer,
795 pga[pnum].count, &off);
797 kunmap(pga[pnum].pg);
799 if (retval != pga[pnum].count) {
801 GOTO(out, retval = -EIO);
803 CDEBUG(D_INODE, "retval %ld\n", retval);
807 /* sizes and blocks are set by generic_file_write */
808 /* ctimes/mtimes will follow with a setattr call */
811 /* XXX: do something with callback if it is set? */
816 error = (retval >= 0) ? 0 : retval;
821 * Calculate the number of buffer credits needed to write multiple pages in
822 * a single ext3/extN transaction. No, this shouldn't be here, but as yet
823 * ext3 doesn't have a nice API for calculating this sort of thing in advance.
825 * See comment above ext3_writepage_trans_blocks for details. We assume
826 * no data journaling is being done, but it does allow for all of the pages
827 * being non-contiguous. If we are guaranteed contiguous pages we could
828 * reduce the number of (d)indirect blocks a lot.
830 * With N blocks per page and P pages, for each inode we have at most:
832 * min(N*P, blocksize/4 + 1) dindirect blocks
835 * For the entire filesystem, we have at most:
836 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
837 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
840 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
842 static int ext3_credits_needed(struct super_block *sb, int objcount,
843 struct obd_ioobj *obj)
845 struct obd_ioobj *o = obj;
846 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
847 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
850 int needed = objcount + 1;
853 for (i = 0; i < objcount; i++, o++) {
854 int nblocks = o->ioo_bufcnt * blockpp;
855 int ndindirect = min(nblocks, addrpp + 1);
856 int nindir = nblocks + ndindirect + 1;
858 nbitmaps += nindir + nblocks;
859 ngdblocks += nindir + nblocks;
864 /* Assumes ext3 and extN have same sb_info layout at the start. */
865 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
866 nbitmaps = EXT3_SB(sb)->s_groups_count;
867 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
868 ngdblocks = EXT3_SB(sb)->s_gdb_count;
870 needed += nbitmaps + ngdblocks;
873 /* We assume that there will be 1 bit set in s_dquot.flags for each
874 * quota file that is active. This is at least true for now.
876 needed += hweight32(sb_any_quota_enabled(sb)) *
877 EXT3_SINGLEDATA_TRANS_BLOCKS;
883 /* We have to start a huge journal transaction here to hold all of the
884 * metadata for the pages being written here. This is necessitated by
885 * the fact that we do lots of prepare_write operations before we do
886 * any of the matching commit_write operations, so even if we split
887 * up to use "smaller" transactions none of them could complete until
888 * all of them were opened. By having a single journal transaction,
889 * we eliminate duplicate reservations for common blocks like the
890 * superblock and group descriptors or bitmaps.
892 * We will start the transaction here, but each prepare_write will
893 * add a refcount to the transaction, and each commit_write will
894 * remove a refcount. The transaction will be closed when all of
895 * the pages have been written.
897 static void *ext3_filter_journal_start(struct filter_obd *filter,
898 int objcount, struct obd_ioobj *obj,
899 int niocount, struct niobuf_remote *nb)
901 journal_t *journal = NULL;
902 handle_t *handle = NULL;
905 /* It appears that some kernels have different values for
906 * EXT*_MAX_GROUP_LOADED (either 8 or 32), so we cannot
907 * assume anything after s_inode_bitmap_number is the same.
909 if (!strcmp(filter->fo_fstype, "ext3"))
910 journal = EXT3_SB(filter->fo_sb)->s_journal;
911 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
912 else if (!strcmp(filter->fo_fstype, "extN"))
913 journal = EXTN_SB(filter->fo_sb)->s_journal;
915 needed = ext3_credits_needed(filter->fo_sb, objcount, obj);
917 /* The number of blocks we could _possibly_ dirty can very large.
918 * We reduce our request if it is absurd (and we couldn't get that
919 * many credits for a single handle anyways).
921 * At some point we have to limit the size of I/Os sent at one time,
922 * increase the size of the journal, or we have to calculate the
923 * actual journal requirements more carefully by checking all of
924 * the blocks instead of being maximally pessimistic. It remains to
925 * be seen if this is a real problem or not.
927 if (needed > journal->j_max_transaction_buffers) {
928 CERROR("want too many journal credits (%d) using %d instead\n",
929 needed, journal->j_max_transaction_buffers);
930 needed = journal->j_max_transaction_buffers;
934 handle = journal_start(journal, needed);
937 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
943 static void *filter_journal_start(void **journal_save,
944 struct filter_obd *filter,
945 int objcount, struct obd_ioobj *obj,
946 int niocount, struct niobuf_remote *nb)
950 /* This may not be necessary - we probably never have a
951 * transaction started when we enter here, so we can
952 * remove the saving of the journal state entirely.
953 * For now leave it in just to see if it ever happens.
955 *journal_save = current->journal_info;
957 CERROR("Already have handle %p???\n", *journal_save);
959 current->journal_info = NULL;
962 if (!strcmp(filter->fo_fstype, "ext3") ||
963 !strcmp(filter->fo_fstype, "extN"))
964 handle = ext3_filter_journal_start(filter, objcount, obj,
969 static int ext3_filter_journal_stop(void *handle)
973 /* We got a refcount on the handle for each call to prepare_write,
974 * so we can drop the "parent" handle here to avoid the need for
975 * osc to call back into filterobd to close the handle. The
976 * remaining references will be dropped in commit_write.
979 rc = journal_stop((handle_t *)handle);
985 static int filter_journal_stop(void *journal_save, struct filter_obd *filter,
990 if (!strcmp(filter->fo_fstype, "ext3") ||
991 !strcmp(filter->fo_fstype, "extN"))
992 rc = ext3_filter_journal_stop(handle);
995 CERROR("error on journal stop: rc = %d\n", rc);
997 current->journal_info = journal_save;
1002 static inline void lustre_put_page(struct page *page)
1005 page_cache_release(page);
1009 #ifndef PageUptodate
1010 #define PageUptodate(page) Page_Uptodate(page)
1012 static struct page *
1013 lustre_get_page_read(struct inode *inode,
1014 struct niobuf_remote *rnb)
1016 unsigned long index = rnb->offset >> PAGE_SHIFT;
1017 struct address_space *mapping = inode->i_mapping;
1021 page = read_cache_page(mapping, index,
1022 (filler_t*)mapping->a_ops->readpage, NULL);
1023 if (!IS_ERR(page)) {
1026 if (!PageUptodate(page)) {
1027 CERROR("page index %lu not uptodate\n", index);
1028 GOTO(err_page, rc = -EIO);
1030 if (PageError(page)) {
1031 CERROR("page index %lu has error\n", index);
1032 GOTO(err_page, rc = -EIO);
1038 lustre_put_page(page);
1042 static struct page *
1043 lustre_get_page_write(struct inode *inode, unsigned long index)
1045 struct address_space *mapping = inode->i_mapping;
1049 page = grab_cache_page(mapping, index); /* locked page */
1051 if (!IS_ERR(page)) {
1053 /* Note: Called with "O" and "PAGE_SIZE" this is essentially
1054 * a no-op for most filesystems, because we write the whole
1055 * page. For partial-page I/O this will read in the page.
1057 rc = mapping->a_ops->prepare_write(NULL, page, 0, PAGE_SIZE);
1059 CERROR("page index %lu, rc = %d\n", index, rc);
1062 GOTO(err_unlock, rc);
1064 /* XXX not sure if we need this if we are overwriting page */
1065 if (PageError(page)) {
1066 CERROR("error on page index %lu, rc = %d\n", index, rc);
1068 GOTO(err_unlock, rc = -EIO);
1075 lustre_put_page(page);
1079 static int lustre_commit_write(struct page *page, unsigned from, unsigned to)
1081 struct inode *inode = page->mapping->host;
1084 err = page->mapping->a_ops->commit_write(NULL, page, from, to);
1085 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1086 if (!err && IS_SYNC(inode))
1087 err = waitfor_one_page(page);
1089 #warning ADD 2.5 waiting code here?
1091 //SetPageUptodate(page); // the client commit_write will do this
1093 SetPageReferenced(page);
1095 lustre_put_page(page);
1099 struct page *filter_get_page_write(struct inode *inode,
1100 struct niobuf_remote *rnb,
1101 struct niobuf_local *lnb, int *pglocked)
1103 unsigned long index = rnb->offset >> PAGE_SHIFT;
1104 struct address_space *mapping = inode->i_mapping;
1109 //ASSERT_PAGE_INDEX(index, GOTO(err, rc = -EINVAL));
1111 page = grab_cache_page_nowait(mapping, index); /* locked page */
1113 page = grab_cache_page(mapping, index); /* locked page */
1116 /* This page is currently locked, so get a temporary page instead. */
1117 /* XXX I believe this is a very dangerous thing to do - consider if
1118 * we had multiple writers for the same file (definitely the case
1119 * if we are using this codepath). If writer A locks the page,
1120 * writer B writes to a copy (as here), writer A drops the page
1121 * lock, and writer C grabs the lock before B does, then B will
1122 * later overwrite the data from C, even if C had LDLM locked
1123 * and initiated the write after B did.
1127 CDEBUG(D_PAGE, "ino %ld page %ld locked\n", inode->i_ino,index);
1128 addr = __get_free_pages(GFP_KERNEL, 0); /* locked page */
1130 CERROR("no memory for a temp page\n");
1132 GOTO(err, rc = -ENOMEM);
1135 memset((void *)addr, 0xBA, PAGE_SIZE);
1136 page = virt_to_page(addr);
1138 page->index = index;
1139 lnb->flags |= N_LOCAL_TEMP_PAGE;
1140 } else if (!IS_ERR(page)) {
1144 rc = mapping->a_ops->prepare_write(NULL, page,
1145 rnb->offset % PAGE_SIZE,
1148 CERROR("page index %lu, rc = %d\n", index, rc);
1151 GOTO(err_unlock, rc);
1153 /* XXX not sure if we need this if we are overwriting page */
1154 if (PageError(page)) {
1155 CERROR("error on page index %lu, rc = %d\n", index, rc);
1157 GOTO(err_unlock, rc = -EIO);
1164 lustre_put_page(page);
1170 * We need to balance prepare_write() calls with commit_write() calls.
1171 * If the page has been prepared, but we have no data for it, we don't
1172 * want to overwrite valid data on disk, but we still need to zero out
1173 * data for space which was newly allocated. Like part of what happens
1174 * in __block_prepare_write() for newly allocated blocks.
1176 * XXX currently __block_prepare_write() creates buffers for all the
1177 * pages, and the filesystems mark these buffers as BH_New if they
1178 * were newly allocated from disk. We use the BH_New flag similarly.
1180 static int filter_commit_write(struct page *page, unsigned from, unsigned to,
1183 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1185 unsigned block_start, block_end;
1186 struct buffer_head *bh, *head = page->buffers;
1187 unsigned blocksize = head->b_size;
1188 void *addr = page_address(page);
1190 /* debugging: just seeing if this ever happens */
1191 CERROR("called filter_commit_write for obj %ld:%ld on err %d\n",
1192 page->index, page->mapping->host->i_ino, err);
1194 /* Currently one buffer per page, but in the future... */
1195 for (bh = head, block_start = 0; bh != head || !block_start;
1196 block_start = block_end, bh = bh->b_this_page) {
1197 block_end = block_start + blocksize;
1199 memset(addr + block_start, 0, blocksize);
1203 return lustre_commit_write(page, from, to);
1206 static int filter_preprw(int cmd, struct lustre_handle *conn,
1207 int objcount, struct obd_ioobj *obj,
1208 int niocount, struct niobuf_remote *nb,
1209 struct niobuf_local *res, void **desc_private)
1211 struct obd_run_ctxt saved;
1212 struct obd_device *obd;
1213 struct obd_ioobj *o = obj;
1214 struct niobuf_remote *rnb = nb;
1215 struct niobuf_local *lnb = res;
1216 void *journal_save = NULL;
1222 obd = class_conn2obd(conn);
1224 CDEBUG(D_IOCTL, "invalid client "LPX64"\n", conn->addr);
1227 memset(res, 0, sizeof(*res) * niocount);
1229 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
1231 if (cmd & OBD_BRW_WRITE) {
1232 *desc_private = filter_journal_start(&journal_save,
1234 objcount, obj, niocount,
1236 if (IS_ERR(*desc_private))
1237 GOTO(out_ctxt, rc = PTR_ERR(*desc_private));
1240 for (i = 0; i < objcount; i++, o++) {
1241 struct dentry *dentry;
1242 struct inode *inode;
1245 dentry = filter_fid2dentry(obd, filter_parent(obd, S_IFREG),
1246 o->ioo_id, S_IFREG);
1248 GOTO(out_clean, rc = PTR_ERR(dentry));
1249 inode = dentry->d_inode;
1251 CERROR("trying to BRW to non-existent file "LPU64"\n",
1254 GOTO(out_clean, rc = -ENOENT);
1257 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
1261 lnb->dentry = dentry;
1263 lnb->dentry = dget(dentry);
1265 if (cmd & OBD_BRW_WRITE)
1266 page = filter_get_page_write(inode, rnb, lnb,
1269 page = lustre_get_page_read(inode, rnb);
1273 GOTO(out_clean, rc = PTR_ERR(page));
1276 lnb->addr = page_address(page);
1277 lnb->offset = rnb->offset;
1279 lnb->len = rnb->len;
1284 if (cmd & OBD_BRW_WRITE) {
1285 int err = filter_journal_stop(journal_save, &obd->u.filter,
1294 while (lnb-- > res) {
1295 CERROR("error cleanup on brw\n");
1296 f_dput(lnb->dentry);
1297 if (cmd & OBD_BRW_WRITE)
1298 filter_commit_write(lnb->page, 0, PAGE_SIZE, rc);
1300 lustre_put_page(lnb->page);
1305 static int filter_write_locked_page(struct niobuf_local *lnb)
1310 lpage = lustre_get_page_write(lnb->dentry->d_inode, lnb->page->index);
1311 if (IS_ERR(lpage)) {
1312 /* It is highly unlikely that we would ever get an error here.
1313 * The page we want to get was previously locked, so it had to
1314 * have already allocated the space, and we were just writing
1315 * over the same data, so there would be no hole in the file.
1317 * XXX: possibility of a race with truncate could exist, need
1318 * to check that. There are no guarantees w.r.t.
1319 * write order even on a local filesystem, although the
1320 * normal response would be to return the number of bytes
1321 * successfully written and leave the rest to the app.
1323 rc = PTR_ERR(lpage);
1324 CERROR("error getting locked page index %ld: rc = %d\n",
1325 lnb->page->index, rc);
1329 /* lpage is kmapped in lustre_get_page_write() above and kunmapped in
1330 * lustre_commit_write() below, lnb->page was kmapped previously in
1331 * filter_get_page_write() and kunmapped in lustre_put_page() below.
1333 memcpy(page_address(lpage), page_address(lnb->page), PAGE_SIZE);
1334 rc = lustre_commit_write(lpage, 0, PAGE_SIZE);
1336 CERROR("error committing locked page %ld: rc = %d\n",
1337 lnb->page->index, rc);
1339 lustre_put_page(lnb->page);
1344 static int filter_commitrw(int cmd, struct lustre_handle *conn,
1345 int objcount, struct obd_ioobj *obj,
1346 int niocount, struct niobuf_local *res,
1349 struct obd_run_ctxt saved;
1350 struct obd_ioobj *o;
1351 struct niobuf_local *r;
1352 struct obd_device *obd = class_conn2obd(conn);
1354 int found_locked = 0;
1359 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
1360 journal_save = current->journal_info;
1361 LASSERT(!journal_save);
1363 current->journal_info = private;
1364 for (i = 0, o = obj, r = res; i < objcount; i++, o++) {
1366 for (j = 0 ; j < o->ioo_bufcnt ; j++, r++) {
1367 struct page *page = r->page;
1372 if (r->flags & N_LOCAL_TEMP_PAGE) {
1377 if (cmd & OBD_BRW_WRITE) {
1378 int err = filter_commit_write(page, 0,
1384 lustre_put_page(page);
1389 current->journal_info = journal_save;
1394 for (i = 0, o = obj, r = res; i < objcount; i++, o++) {
1396 for (j = 0 ; j < o->ioo_bufcnt ; j++, r++) {
1398 if (!(r->flags & N_LOCAL_TEMP_PAGE))
1401 err = filter_write_locked_page(r);
1413 static int filter_statfs(struct lustre_handle *conn, struct obd_statfs *osfs)
1415 struct obd_device *obd = class_conn2obd(conn);
1420 rc = vfs_statfs(obd->u.filter.fo_sb, &sfs);
1422 statfs_pack(osfs, &sfs);
1427 static int filter_get_info(struct lustre_handle *conn, obd_count keylen,
1428 void *key, obd_count *vallen, void **val)
1430 struct obd_device *obd;
1433 obd = class_conn2obd(conn);
1435 CDEBUG(D_IOCTL, "invalid client "LPX64"\n", conn->addr);
1439 if ( keylen == strlen("blocksize") &&
1440 memcmp(key, "blocksize", keylen) == 0 ) {
1441 *vallen = sizeof(long);
1442 *val = (void *)(long)obd->u.filter.fo_sb->s_blocksize;
1446 if ( keylen == strlen("blocksize_bits") &&
1447 memcmp(key, "blocksize_bits", keylen) == 0 ){
1448 *vallen = sizeof(long);
1449 *val = (void *)(long)obd->u.filter.fo_sb->s_blocksize_bits;
1453 if ( keylen == strlen("root_ino") &&
1454 memcmp(key, "root_ino", keylen) == 0 ){
1455 *vallen = sizeof(obd_id);
1456 *val = (void *)(obd_id)FILTER_ROOTINO;
1460 CDEBUG(D_IOCTL, "invalid key\n");
1464 int filter_copy_data(struct lustre_handle *dst_conn, struct obdo *dst,
1465 struct lustre_handle *src_conn, struct obdo *src,
1466 obd_size count, obd_off offset)
1469 struct lov_stripe_md srcmd, dstmd;
1470 unsigned long index = 0;
1473 memset(&srcmd, 0, sizeof(srcmd));
1474 memset(&dstmd, 0, sizeof(dstmd));
1475 srcmd.lsm_object_id = src->o_id;
1476 dstmd.lsm_object_id = dst->o_id;
1479 CDEBUG(D_INFO, "src: ino "LPU64" blocks "LPU64", size "LPU64
1480 ", dst: ino "LPU64"\n",
1481 src->o_id, src->o_blocks, src->o_size, dst->o_id);
1482 page = alloc_page(GFP_USER);
1486 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1487 while (TryLockPage(page))
1488 ___wait_on_page(page);
1490 wait_on_page_locked(page);
1493 /* XXX with brw vector I/O, we could batch up reads and writes here,
1494 * all we need to do is allocate multiple pages to handle the I/Os
1495 * and arrays to handle the request parameters.
1497 while (index < ((src->o_size + PAGE_SIZE - 1) >> PAGE_SHIFT)) {
1499 struct io_cb_data *cbd = ll_init_cb();
1508 pg.count = PAGE_SIZE;
1509 pg.off = (page->index) << PAGE_SHIFT;
1512 page->index = index;
1513 err = obd_brw(OBD_BRW_READ, src_conn, &srcmd, 1, &pg,
1514 ll_sync_io_cb, cbd);
1527 pg.flag = OBD_BRW_CREATE;
1528 CDEBUG(D_INFO, "Read page %ld ...\n", page->index);
1530 err = obd_brw(OBD_BRW_WRITE, dst_conn, &dstmd, 1, &pg,
1531 ll_sync_io_cb, cbd);
1533 /* XXX should handle dst->o_size, dst->o_blocks here */
1539 CDEBUG(D_INFO, "Wrote page %ld ...\n", page->index);
1543 dst->o_size = src->o_size;
1544 dst->o_blocks = src->o_blocks;
1545 dst->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
1553 static struct obd_ops filter_obd_ops = {
1554 o_get_info: filter_get_info,
1555 o_setup: filter_setup,
1556 o_cleanup: filter_cleanup,
1557 o_connect: filter_connect,
1558 o_disconnect: filter_disconnect,
1559 o_statfs: filter_statfs,
1560 o_getattr: filter_getattr,
1561 o_create: filter_create,
1562 o_setattr: filter_setattr,
1563 o_destroy: filter_destroy,
1564 o_open: filter_open,
1565 o_close: filter_close,
1566 o_brw: filter_pgcache_brw,
1567 o_punch: filter_truncate,
1568 o_preprw: filter_preprw,
1569 o_commitrw: filter_commitrw
1571 o_preallocate: filter_preallocate_inodes,
1572 o_migrate: filter_migrate,
1573 o_copy: filter_copy_data,
1574 o_iterate: filter_iterate
1579 static int __init obdfilter_init(void)
1581 printk(KERN_INFO "Filtering OBD driver v0.001, info@clusterfs.com\n");
1582 return class_register_type(&filter_obd_ops, OBD_FILTER_DEVICENAME);
1585 static void __exit obdfilter_exit(void)
1587 class_unregister_type(OBD_FILTER_DEVICENAME);
1590 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1591 MODULE_DESCRIPTION("Lustre Filtering OBD driver v1.0");
1592 MODULE_LICENSE("GPL");
1594 module_init(obdfilter_init);
1595 module_exit(obdfilter_exit);