1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/filter/filter.c
6 * Copyright (C) 2001 Cluster File Systems, Inc.
8 * This code is issued under the GNU General Public License.
9 * See the file COPYING in this distribution
11 * by Peter Braam <braam@clusterfs.com>
15 #define DEBUG_SUBSYSTEM S_FILTER
17 #include <linux/module.h>
18 #include <linux/lustre_dlm.h>
19 #include <linux/obd_filter.h>
20 #include <linux/ext3_jbd.h>
21 #include <linux/quotaops.h>
22 #include <linux/init.h>
24 extern struct obd_device obd_dev[MAX_OBD_DEVICES];
27 #define FILTER_ROOTINO 2
30 static char *obd_type_by_mode[S_IFMT >> S_SHIFT] = {
32 [S_IFREG >> S_SHIFT] "R",
33 [S_IFDIR >> S_SHIFT] "D",
34 [S_IFCHR >> S_SHIFT] "C",
35 [S_IFBLK >> S_SHIFT] "B",
36 [S_IFIFO >> S_SHIFT] "F",
37 [S_IFSOCK >> S_SHIFT] "S",
38 [S_IFLNK >> S_SHIFT] "L"
41 static inline const char *obd_mode_to_type(int mode)
43 return obd_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
46 /* write the pathname into the string */
47 static int filter_id(char *buf, obd_id id, obd_mode mode)
49 return sprintf(buf, "O/%s/%Ld", obd_mode_to_type(mode),
50 (unsigned long long)id);
53 /* setup the object store with correct subdirectories */
54 static int filter_prep(struct obd_device *obddev)
56 struct obd_run_ctxt saved;
57 struct filter_obd *filter = &obddev->u.filter;
58 struct dentry *dentry;
66 push_ctxt(&saved, &filter->fo_ctxt);
67 dentry = simple_mkdir(current->fs->pwd, "O", 0700);
68 CDEBUG(D_INODE, "got/created O: %p\n", dentry);
71 CERROR("cannot open/create O: rc = %d\n", rc);
74 filter->fo_dentry_O = dentry;
75 dentry = simple_mkdir(current->fs->pwd, "P", 0700);
76 CDEBUG(D_INODE, "got/created P: %p\n", dentry);
79 CERROR("cannot open/create P: rc = %d\n", rc);
82 CDEBUG(D_INODE, "putting P: %p, count = %d\n", dentry,
83 atomic_read(&dentry->d_count) - 1);
85 dentry = simple_mkdir(current->fs->pwd, "D", 0700);
86 CDEBUG(D_INODE, "got/created D: %p\n", dentry);
89 CERROR("cannot open/create D: rc = %d\n", rc);
92 CDEBUG(D_INODE, "putting D: %p, count = %d\n", dentry,
93 atomic_read(&dentry->d_count) - 1);
97 * Create directories and/or get dentries for each object type.
98 * This saves us from having to do multiple lookups for each one.
100 for (mode = 0; mode < (S_IFMT >> S_SHIFT); mode++) {
101 char *type = obd_type_by_mode[mode];
104 filter->fo_dentry_O_mode[mode] = NULL;
107 dentry = simple_mkdir(filter->fo_dentry_O, type, 0700);
108 CDEBUG(D_INODE, "got/created O/%s: %p\n", type, dentry);
109 if (IS_ERR(dentry)) {
110 rc = PTR_ERR(dentry);
111 CERROR("cannot create O/%s: rc = %d\n", type, rc);
112 GOTO(out_O_mode, rc);
114 filter->fo_dentry_O_mode[mode] = dentry;
117 filter_id(rootid, FILTER_ROOTINO, S_IFDIR);
118 file = filp_open(rootid, O_RDWR | O_CREAT, 0755);
121 CERROR("OBD filter: cannot open/create root %s: rc = %d\n",
123 GOTO(out_O_mode, rc);
127 file = filp_open("D/status", O_RDWR | O_CREAT, 0700);
128 if ( !file || IS_ERR(file) ) {
130 CERROR("OBD filter: cannot open/create status %s: rc = %d\n",
132 GOTO(out_O_mode, rc);
135 /* steal operations */
136 inode = file->f_dentry->d_inode;
137 filter->fo_fop = file->f_op;
138 filter->fo_iop = inode->i_op;
139 filter->fo_aops = inode->i_mapping->a_ops;
141 if (inode->i_size == 0) {
142 __u64 disk_lastino = cpu_to_le64(lastino);
143 ssize_t retval = file->f_op->write(file, (char *)&disk_lastino,
144 sizeof(disk_lastino),
146 if (retval != sizeof(disk_lastino)) {
147 CDEBUG(D_INODE, "OBD filter: error writing lastino\n");
149 GOTO(out_O_mode, rc = -EIO);
153 ssize_t retval = file->f_op->read(file, (char *)&disk_lastino,
154 sizeof(disk_lastino),
156 if (retval != sizeof(disk_lastino)) {
157 CDEBUG(D_INODE, "OBD filter: error reading lastino\n");
159 GOTO(out_O_mode, rc = -EIO);
161 lastino = le64_to_cpu(disk_lastino);
163 filter->fo_lastino = lastino;
174 struct dentry *dentry = filter->fo_dentry_O_mode[mode];
176 CDEBUG(D_INODE, "putting O/%s: %p, count = %d\n",
177 obd_type_by_mode[mode], dentry,
178 atomic_read(&dentry->d_count) - 1);
180 filter->fo_dentry_O_mode[mode] = NULL;
184 CDEBUG(D_INODE, "putting O: %p, count = %d\n", filter->fo_dentry_O,
185 atomic_read(&filter->fo_dentry_O->d_count) - 1);
186 dput(filter->fo_dentry_O);
187 filter->fo_dentry_O = NULL;
191 /* cleanup the filter: write last used object id to status file */
192 static void filter_post(struct obd_device *obddev)
194 struct obd_run_ctxt saved;
195 struct filter_obd *filter = &obddev->u.filter;
201 push_ctxt(&saved, &filter->fo_ctxt);
202 file = filp_open("D/status", O_RDWR | O_CREAT, 0700);
204 CERROR("OBD filter: cannot create status file\n");
209 disk_lastino = cpu_to_le64(filter->fo_lastino);
210 rc = file->f_op->write(file, (char *)&disk_lastino,
211 sizeof(disk_lastino), &file->f_pos);
212 if (rc != sizeof(disk_lastino))
213 CERROR("OBD filter: error writing lastino: rc = %ld\n", rc);
215 rc = filp_close(file, NULL);
217 CERROR("OBD filter: cannot close status file: rc = %ld\n", rc);
219 for (mode = 0; mode < (S_IFMT >> S_SHIFT); mode++) {
220 struct dentry *dentry = filter->fo_dentry_O_mode[mode];
222 CDEBUG(D_INODE, "putting O/%s: %p, count = %d\n",
223 obd_type_by_mode[mode], dentry,
224 atomic_read(&dentry->d_count) - 1);
226 filter->fo_dentry_O_mode[mode] = NULL;
229 CDEBUG(D_INODE, "putting O: %p, count = %d\n", filter->fo_dentry_O,
230 atomic_read(&filter->fo_dentry_O->d_count) - 1);
231 dput(filter->fo_dentry_O);
237 static __u64 filter_next_id(struct obd_device *obddev)
241 spin_lock(&obddev->u.filter.fo_lock);
242 obddev->u.filter.fo_lastino++;
243 id = obddev->u.filter.fo_lastino;
244 spin_unlock(&obddev->u.filter.fo_lock);
246 /* FIXME: write the lastino to disk here */
250 /* how to get files, dentries, inodes from object id's */
251 /* parent i_sem is already held if needed for exclusivity */
252 static struct dentry *filter_fid2dentry(struct obd_device *obddev,
253 struct dentry *dparent,
254 __u64 id, __u32 type)
256 struct super_block *sb = obddev->u.filter.fo_sb;
257 struct dentry *dchild;
262 if (!sb || !sb->s_dev) {
263 CERROR("fatal: device not initialized.\n");
264 RETURN(ERR_PTR(-ENXIO));
268 CERROR("fatal: invalid object #0\n");
270 RETURN(ERR_PTR(-ESTALE));
273 if (!(type & S_IFMT)) {
274 CERROR("OBD %s, object %Lu has bad type: %o\n", __FUNCTION__,
275 (unsigned long long)id, type);
276 RETURN(ERR_PTR(-EINVAL));
279 len = sprintf(name, "%Ld", id);
280 CDEBUG(D_INODE, "opening object O/%s/%s\n", obd_mode_to_type(type),
282 dchild = lookup_one_len(name, dparent, len);
283 CDEBUG(D_INODE, "got child obj O/%s/%s: %p, count = %d\n",
284 obd_mode_to_type(type), name, dchild,
285 atomic_read(&dchild->d_count));
287 if (IS_ERR(dchild)) {
288 CERROR("child lookup error %ld\n", PTR_ERR(dchild));
295 static struct file *filter_obj_open(struct obd_device *obddev,
296 __u64 id, __u32 type)
298 struct super_block *sb = obddev->u.filter.fo_sb;
299 struct obd_run_ctxt saved;
304 if (!sb || !sb->s_dev) {
305 CERROR("fatal: device not initialized.\n");
306 RETURN(ERR_PTR(-ENXIO));
310 CERROR("fatal: invalid obdo %Lu\n", (unsigned long long)id);
311 RETURN(ERR_PTR(-ESTALE));
314 if (!(type & S_IFMT)) {
315 CERROR("OBD %s, no type (%Ld), mode %o!\n", __FUNCTION__,
316 (unsigned long long)id, type);
317 RETURN(ERR_PTR(-EINVAL));
320 filter_id(name, id, type);
321 push_ctxt(&saved, &obddev->u.filter.fo_ctxt);
322 file = filp_open(name, O_RDONLY | O_LARGEFILE, 0 /* type? */);
325 CDEBUG(D_INODE, "opening obdo %s: rc = %p\n", name, file);
332 static struct dentry *filter_parent(struct obd_device *obddev, obd_mode mode)
334 struct filter_obd *filter = &obddev->u.filter;
336 return filter->fo_dentry_O_mode[(mode & S_IFMT) >> S_SHIFT];
340 static struct inode *filter_inode_from_obj(struct obd_device *obddev,
341 __u64 id, __u32 type)
343 struct dentry *dentry;
346 dentry = filter_fid2dentry(obddev, filter_parent(obddev, type),
348 if (IS_ERR(dentry)) {
349 CERROR("%s: lookup failed: rc = %ld\n", __FUNCTION__,
355 inode = iget(dentry->d_inode->i_sb, dentry->d_inode->i_ino);
357 CDEBUG(D_INODE, "put child %p, count = %d\n", dentry,
358 atomic_read(&dentry->d_count) - 1);
360 CDEBUG(D_INODE, "got inode %p (%ld), count = %d\n", inode, inode->i_ino,
361 atomic_read(&inode->i_count));
366 static int filter_connect(struct lustre_handle *conn, struct obd_device *obd,
372 rc = class_connect(conn, obd, cluuid);
378 static int filter_disconnect(struct lustre_handle *conn)
383 rc = class_disconnect(conn);
387 /* XXX cleanup preallocated inodes */
391 /* mount the file system (secretly) */
392 static int filter_setup(struct obd_device *obddev, obd_count len, void *buf)
394 struct obd_ioctl_data* data = buf;
395 struct filter_obd *filter;
396 struct vfsmount *mnt;
400 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2)
404 mnt = do_kern_mount(data->ioc_inlbuf2, 0, data->ioc_inlbuf1, NULL);
409 filter = &obddev->u.filter;;
410 filter->fo_sb = mnt->mnt_root->d_inode->i_sb;
411 /* XXX is this even possible if do_kern_mount succeeded? */
413 GOTO(err_put, err = -ENODEV);
415 filter->fo_vfsmnt = mnt;
416 filter->fo_fstype = strdup(data->ioc_inlbuf2);
418 OBD_SET_CTXT_MAGIC(&filter->fo_ctxt);
419 filter->fo_ctxt.pwdmnt = mnt;
420 filter->fo_ctxt.pwd = mnt->mnt_root;
421 filter->fo_ctxt.fs = get_ds();
423 err = filter_prep(obddev);
425 GOTO(err_kfree, err);
426 spin_lock_init(&filter->fo_lock);
428 obddev->obd_namespace =
429 ldlm_namespace_new("filter-tgt", LDLM_NAMESPACE_SERVER);
430 if (obddev->obd_namespace == NULL)
436 kfree(filter->fo_fstype);
439 mntput(filter->fo_vfsmnt);
449 static int filter_cleanup(struct obd_device * obddev)
451 struct super_block *sb;
454 if (!list_empty(&obddev->obd_exports)) {
455 CERROR("still has clients!\n");
456 class_disconnect_all(obddev);
457 if (!list_empty(&obddev->obd_exports)) {
458 CERROR("still has exports after forced cleanup?\n");
463 ldlm_namespace_free(obddev->obd_namespace);
465 sb = obddev->u.filter.fo_sb;
466 if (!obddev->u.filter.fo_sb)
471 shrink_dcache_parent(sb->s_root);
473 mntput(obddev->u.filter.fo_vfsmnt);
474 obddev->u.filter.fo_sb = 0;
475 kfree(obddev->u.filter.fo_fstype);
484 static inline void filter_from_inode(struct obdo *oa, struct inode *inode,
487 int type = oa->o_mode & S_IFMT;
490 CDEBUG(D_INFO, "src inode %ld (%p), dst obdo %ld valid 0x%08x\n",
491 inode->i_ino, inode, (long)oa->o_id, valid);
492 /* Don't copy the inode number in place of the object ID */
493 obdo_from_inode(oa, inode, valid);
494 oa->o_mode &= ~S_IFMT;
497 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
498 obd_rdev rdev = kdev_t_to_nr(inode->i_rdev);
500 oa->o_valid |= OBD_MD_FLRDEV;
506 static int filter_getattr(struct lustre_handle *conn, struct obdo *oa,
507 struct lov_stripe_md *md)
509 struct obd_device *obddev = class_conn2obd(conn);
510 struct dentry *dentry;
513 if (!class_conn2export(conn)) {
514 CDEBUG(D_IOCTL, "fatal: invalid client %Lx\n", conn->addr);
518 obddev = class_conn2obd(conn);
519 dentry = filter_fid2dentry(obddev, filter_parent(obddev, oa->o_mode),
520 oa->o_id, oa->o_mode);
522 RETURN(PTR_ERR(dentry));
524 filter_from_inode(oa, dentry->d_inode, oa->o_valid & ~OBD_MD_FLID);
530 static int filter_setattr(struct lustre_handle *conn, struct obdo *oa,
531 struct lov_stripe_md *md)
533 struct obd_run_ctxt saved;
534 struct obd_device *obd = class_conn2obd(conn);
535 struct dentry *dentry;
541 iattr_from_obdo(&iattr, oa, oa->o_valid);
542 iattr.ia_mode = (iattr.ia_mode & ~S_IFMT) | S_IFREG;
543 dentry = filter_fid2dentry(obd, filter_parent(obd, iattr.ia_mode),
544 oa->o_id, iattr.ia_mode);
546 RETURN(PTR_ERR(dentry));
548 inode = dentry->d_inode;
550 if (iattr.ia_mode & ATTR_SIZE)
552 push_ctxt(&saved, &obd->u.filter.fo_ctxt);
553 if (inode->i_op->setattr)
554 rc = inode->i_op->setattr(dentry, &iattr);
556 rc = inode_setattr(inode, &iattr);
558 if (iattr.ia_mode & ATTR_SIZE) {
560 oa->o_valid = OBD_MD_FLBLOCKS | OBD_MD_FLCTIME | OBD_MD_FLMTIME;
561 obdo_from_inode(oa, inode, oa->o_valid);
565 CDEBUG(D_INODE, "put dentry %p, count = %d\n", inode,
566 atomic_read(&dentry->d_count) - 1);
571 static int filter_open(struct lustre_handle *conn, struct obdo *oa,
572 struct lov_stripe_md *ea)
574 struct obd_device *obd;
575 struct dentry *dentry;
578 if (!class_conn2export(conn)) {
579 CDEBUG(D_IOCTL, "fatal: invalid client %Lx\n", conn->addr);
583 obd = class_conn2obd(conn);
584 dentry = filter_fid2dentry(obd, filter_parent(obd, oa->o_mode),
585 oa->o_id, oa->o_mode);
587 RETURN(PTR_ERR(dentry));
589 oa->o_size = dentry->d_inode->i_size;
594 static int filter_close(struct lustre_handle *conn, struct obdo *oa,
595 struct lov_stripe_md *ea)
597 struct obd_device *obd;
598 struct dentry *dentry;
601 obd = class_conn2obd(conn);
603 CDEBUG(D_IOCTL, "fatal: invalid client %Lx\n", conn->addr);
607 dentry = filter_fid2dentry(obd, filter_parent(obd, oa->o_mode),
608 oa->o_id, oa->o_mode);
610 RETURN(PTR_ERR(dentry));
612 CDEBUG(D_INODE, "put dentry %p, count = %d\n", dentry,
613 atomic_read(&dentry->d_count) - 1);
614 dput(dentry); /* for the close */
615 CDEBUG(D_INODE, "put dentry %p, count = %d\n", dentry,
616 atomic_read(&dentry->d_count) - 1);
617 dput(dentry); /* for this call */
621 static int filter_create(struct lustre_handle* conn, struct obdo *oa,
622 struct lov_stripe_md **ea)
625 struct obd_run_ctxt saved;
628 struct obd_device *obd = class_conn2obd(conn);
633 CERROR("invalid client %Lx\n", conn->addr);
637 if (!(oa->o_mode && S_IFMT)) {
638 CERROR("filter obd: no type!\n");
642 oa->o_id = filter_next_id(obd);
644 filter_id(name, oa->o_id, oa->o_mode);
645 mode = (oa->o_mode & ~S_IFMT) | S_IFREG;
646 push_ctxt(&saved, &obd->u.filter.fo_ctxt);
647 file = filp_open(name, O_RDONLY | O_CREAT, mode);
650 CERROR("Error mknod obj %s, err %ld\n", name, PTR_ERR(file));
655 /* Set flags for fields we have set in the inode struct */
656 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLBLKSZ | OBD_MD_FLBLOCKS |
657 OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME |
658 OBD_MD_FLUID | OBD_MD_FLGID;
660 /* XXX Hmm, shouldn't we copy the fields into the obdo here? */
664 static int filter_destroy(struct lustre_handle *conn, struct obdo *oa,
665 struct lov_stripe_md *ea)
667 struct obd_device *obd;
668 struct filter_obd *filter;
669 struct obd_run_ctxt saved;
671 struct dentry *dir_dentry, *object_dentry;
675 obd = class_conn2obd(conn);
677 CERROR("invalid client %Lx\n", conn->addr);
681 CDEBUG(D_INODE, "destroying object %Ld\n", oa->o_id);
683 dir_dentry = filter_parent(obd, oa->o_mode);
684 down(&dir_dentry->d_inode->i_sem);
686 object_dentry = filter_fid2dentry(obd, dir_dentry, oa->o_id,
688 if (IS_ERR(object_dentry))
689 GOTO(out, rc = -ENOENT);
691 inode = object_dentry->d_inode;
693 CERROR("trying to destroy negative inode %Ld!\n", oa->o_id);
694 GOTO(out, rc = -ENOENT);
697 if (inode->i_nlink != 1) {
698 CERROR("destroying inode with nlink = %d\n", inode->i_nlink);
702 inode->i_mode = S_IFREG;
704 filter = &obd->u.filter;
705 push_ctxt(&saved, &filter->fo_ctxt);
707 rc = vfs_unlink(dir_dentry->d_inode, object_dentry);
709 CDEBUG(D_INODE, "put child %p, count = %d\n", object_dentry,
710 atomic_read(&object_dentry->d_count) - 1);
715 up(&dir_dentry->d_inode->i_sem);
719 /* NB count and offset are used for punch, but not truncate */
720 static int filter_truncate(struct lustre_handle *conn, struct obdo *oa,
721 struct lov_stripe_md *md,
722 obd_off start, obd_off end)
727 if (end != 0xffffffffffffffff)
728 CERROR("PUNCH not supported, only truncate works\n");
730 CDEBUG(D_INODE, "calling truncate for object #%Ld, valid = %x, "
731 "o_size = %Ld\n", oa->o_id, oa->o_valid, start);
733 error = filter_setattr(conn, oa, NULL);
737 static int filter_pgcache_brw(int cmd, struct lustre_handle *conn,
738 struct lov_stripe_md *md, obd_count oa_bufs,
739 struct brw_page *pga, brw_callback_t callback,
740 struct io_cb_data *data)
742 struct obd_run_ctxt saved;
743 struct super_block *sb;
744 int pnum; /* index to pages (bufs) */
745 unsigned long retval;
748 struct obd_device *obd = class_conn2obd(conn);
753 CDEBUG(D_IOCTL, "invalid client %Lx\n", conn->addr);
757 sb = obd->u.filter.fo_sb;
758 push_ctxt(&saved, &obd->u.filter.fo_ctxt);
759 pnum = 0; /* pnum indexes buf 0..num_pages */
761 file = filter_obj_open(obd, md->lmd_object_id, S_IFREG);
763 GOTO(out, retval = PTR_ERR(file));
765 /* count doubles as retval */
766 for (pg = 0; pg < oa_bufs; pg++) {
767 CDEBUG(D_INODE, "OP %d obdo pgno: (%d) (%ld,%ld) "
768 "off count (%Ld,%Ld)\n",
769 cmd, pnum, file->f_dentry->d_inode->i_ino,
770 (unsigned long)pga[pnum].off >> PAGE_CACHE_SHIFT,
771 (unsigned long long)pga[pnum].off,
772 (unsigned long long)pga[pnum].count);
773 if (cmd & OBD_BRW_WRITE) {
777 buffer = kmap(pga[pnum].pg);
778 retval = file->f_op->write(file, buffer,
781 kunmap(pga[pnum].pg);
782 CDEBUG(D_INODE, "retval %ld\n", retval);
784 loff_t off = pga[pnum].off;
785 char *buffer = kmap(pga[pnum].pg);
787 if (off >= file->f_dentry->d_inode->i_size) {
788 memset(buffer, 0, pga[pnum].count);
789 retval = pga[pnum].count;
791 retval = file->f_op->read(file, buffer,
792 pga[pnum].count, &off);
794 kunmap(pga[pnum].pg);
796 if (retval != pga[pnum].count) {
798 GOTO(out, retval = -EIO);
800 CDEBUG(D_INODE, "retval %ld\n", retval);
804 /* sizes and blocks are set by generic_file_write */
805 /* ctimes/mtimes will follow with a setattr call */
808 /* XXX: do something with callback if it is set? */
813 error = (retval >= 0) ? 0 : retval;
818 struct inode *ioobj_to_inode(struct lustre_handle *conn, struct obd_ioobj *o)
820 struct obd_device *obd = class_conn2obd(conn);
821 struct super_block *sb = obd->u.filter.fo_sb;
822 struct inode *inode = NULL;
825 if (!sb || !sb->s_dev) {
826 CDEBUG(D_SUPER, "fatal: device not initialized.\n");
831 CDEBUG(D_INODE, "fatal: invalid obdo %lu\n", (long)o->ioo_id);
835 inode = filter_inode_from_obj(obd, o->ioo_id, S_IFREG);
836 if (!inode || inode->i_nlink == 0 || is_bad_inode(inode)) {
837 CERROR("from obdo - fatal: invalid inode %ld (%s).\n",
838 (long)o->ioo_id, inode ? inode->i_nlink ? "bad inode" :
839 "no links" : "NULL");
848 * Calculate the number of buffer credits needed to write multiple pages in
849 * a single ext3/extN transaction. No, this shouldn't be here, but as yet
850 * ext3 doesn't have a nice API for calculating this sort of thing in advance.
852 * See comment above ext3_writepage_trans_blocks for details. We assume
853 * no data journaling is being done, but it does allow for all of the pages
854 * being non-contiguous. If we are guaranteed contiguous pages we could
855 * reduce the number of (d)indirect blocks a lot.
857 * With N blocks per page and P pages, for each inode we have at most:
859 * min(N*P, blocksize/4 + 1) dindirect blocks
862 * For the entire filesystem, we have at most:
863 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
864 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
867 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
869 static int ext3_credits_needed(struct super_block *sb, int objcount,
870 struct obd_ioobj *obj)
872 struct obd_ioobj *o = obj;
873 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
874 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
877 int needed = objcount + 1;
880 for (i = 0; i < objcount; i++, o++) {
881 int nblocks = o->ioo_bufcnt * blockpp;
882 int ndindirect = min(nblocks, addrpp + 1);
883 int nindir = nblocks + ndindirect + 1;
885 nbitmaps += nindir + nblocks;
886 ngdblocks += nindir + nblocks;
891 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
892 nbitmaps = EXT3_SB(sb)->s_groups_count;
893 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
894 ngdblocks = EXT3_SB(sb)->s_gdb_count;
896 needed += nbitmaps + ngdblocks;
899 /* We assume that there will be 1 bit set in s_dquot.flags for each
900 * quota file that is active. This is at least true for now.
902 needed += hweight32(sb_any_quota_enabled(sb)) *
903 EXT3_SINGLEDATA_TRANS_BLOCKS;
909 /* We have to start a huge journal transaction here to hold all of the
910 * metadata for the pages being written here. This is necessitated by
911 * the fact that we do lots of prepare_write operations before we do
912 * any of the matching commit_write operations, so even if we split
913 * up to use "smaller" transactions none of them could complete until
914 * all of them were opened. By having a single journal transaction,
915 * we eliminate duplicate reservations for common blocks like the
916 * superblock and group descriptors or bitmaps.
918 * We will start the transaction here, but each prepare_write will
919 * add a refcount to the transaction, and each commit_write will
920 * remove a refcount. The transaction will be closed when all of
921 * the pages have been written.
923 static void *ext3_filter_journal_start(struct filter_obd *filter,
924 int objcount, struct obd_ioobj *obj,
925 int niocount, struct niobuf_remote *nb)
927 journal_t *journal = NULL;
928 handle_t *handle = NULL;
931 /* Assumes ext3 and extN have same sb_info layout, but avoids issues
932 * with having extN built properly before filterobd for now.
934 journal = EXT3_SB(filter->fo_sb)->s_journal;
935 needed = ext3_credits_needed(filter->fo_sb, objcount, obj);
937 /* The number of blocks we could _possibly_ dirty can very large.
938 * We reduce our request if it is absurd (and we couldn't get that
939 * many credits for a single handle anyways).
941 * At some point we have to limit the size of I/Os sent at one time,
942 * increase the size of the journal, or we have to calculate the
943 * actual journal requirements more carefully by checking all of
944 * the blocks instead of being maximally pessimistic. It remains to
945 * be seen if this is a real problem or not.
947 if (needed > journal->j_max_transaction_buffers) {
948 CERROR("want too many journal credits (%d) using %d instead\n",
949 needed, journal->j_max_transaction_buffers);
950 needed = journal->j_max_transaction_buffers;
953 handle = journal_start(journal, needed);
955 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
961 static void *filter_journal_start(void **journal_save,
962 struct filter_obd *filter,
963 int objcount, struct obd_ioobj *obj,
964 int niocount, struct niobuf_remote *nb)
968 /* This may not be necessary - we probably never have a
969 * transaction started when we enter here, so we can
970 * remove the saving of the journal state entirely.
971 * For now leave it in just to see if it ever happens.
973 *journal_save = current->journal_info;
975 CERROR("Already have handle %p???\n", *journal_save);
977 current->journal_info = NULL;
980 if (!strcmp(filter->fo_fstype, "ext3") ||
981 !strcmp(filter->fo_fstype, "extN"))
982 handle = ext3_filter_journal_start(filter, objcount, obj,
987 static int ext3_filter_journal_stop(void *handle)
991 /* We got a refcount on the handle for each call to prepare_write,
992 * so we can drop the "parent" handle here to avoid the need for
993 * osc to call back into filterobd to close the handle. The
994 * remaining references will be dropped in commit_write.
996 rc = journal_stop((handle_t *)handle);
1001 static int filter_journal_stop(void *journal_save, struct filter_obd *filter,
1006 if (!strcmp(filter->fo_fstype, "ext3") ||
1007 !strcmp(filter->fo_fstype, "extN"))
1008 rc = ext3_filter_journal_stop(handle);
1011 CERROR("error on journal stop: rc = %d\n", rc);
1013 current->journal_info = journal_save;
1018 struct page *filter_get_page_write(struct inode *inode, unsigned long index,
1019 struct niobuf_local *lnb)
1021 struct address_space *mapping = inode->i_mapping;
1025 //ASSERT_PAGE_INDEX(index, GOTO(err, rc = -EINVAL));
1026 page = grab_cache_page_nowait(mapping, index); /* locked page */
1028 /* This page is currently locked, so get a temporary page instead. */
1029 /* XXX I believe this is a very dangerous thing to do - consider if
1030 * we had multiple writers for the same file (definitely the case
1031 * if we are using this codepath). If writer A locks the page,
1032 * writer B writes to a copy (as here), writer A drops the page
1033 * lock, and writer C grabs the lock before B does, then B will
1034 * later overwrite the data from C, even if C had LDLM locked
1035 * and initiated the write after B did.
1039 /* debugging: just seeing if this ever happens
1040 CDEBUG(D_PAGE, "ino %ld page %ld locked\n", inode->i_ino,index);
1042 CERROR("writing ino %ld page %ld locked\n", inode->i_ino,index);
1043 addr = __get_free_pages(GFP_KERNEL, 0); /* locked page */
1045 CERROR("no memory for a temp page\n");
1047 GOTO(err, rc = -ENOMEM);
1049 page = virt_to_page(addr);
1051 page->index = index;
1052 lnb->flags |= N_LOCAL_TEMP_PAGE;
1053 } else if (!IS_ERR(page)) {
1054 /* Note: Called with "O" and "PAGE_SIZE" this is essentially
1055 * a no-op for most filesystems, because we write the whole
1056 * page. For partial-page I/O this will read in the page.
1059 rc = mapping->a_ops->prepare_write(NULL, page, 0, PAGE_SIZE);
1061 CERROR("page index %lu, rc = %d\n", index, rc);
1064 GOTO(err_unlock, rc);
1066 /* XXX not sure if we need this if we are overwriting page */
1067 if (PageError(page)) {
1068 CERROR("error on page index %lu, rc = %d\n", index, rc);
1070 GOTO(err_unlock, rc = -EIO);
1077 lustre_put_page(page);
1083 * We need to balance prepare_write() calls with commit_write() calls.
1084 * If the page has been prepared, but we have no data for it, we don't
1085 * want to overwrite valid data on disk, but we still need to zero out
1086 * data for space which was newly allocated. Like part of what happens
1087 * in __block_prepare_write() for newly allocated blocks.
1089 * XXX currently __block_prepare_write() creates buffers for all the
1090 * pages, and the filesystems mark these buffers as BH_New if they
1091 * were newly allocated from disk. We use the BH_New flag similarly.
1093 static int filter_commit_write(struct page *page, unsigned from, unsigned to,
1097 unsigned block_start, block_end;
1098 struct buffer_head *bh, *head = page->buffers;
1099 unsigned blocksize = head->b_size;
1100 void *addr = page_address(page);
1102 /* debugging: just seeing if this ever happens */
1103 CERROR("called filter_commit_write for obj %ld:%ld on err %d\n",
1104 page->index, page->mapping->host->i_ino, err);
1106 /* Currently one buffer per page, but in the future... */
1107 for (bh = head, block_start = 0; bh != head || !block_start;
1108 block_start = block_end, bh = bh->b_this_page) {
1109 block_end = block_start + blocksize;
1111 memset(addr + block_start, 0, blocksize);
1115 return lustre_commit_write(page, from, to);
1118 static int filter_preprw(int cmd, struct lustre_handle *conn,
1119 int objcount, struct obd_ioobj *obj,
1120 int niocount, struct niobuf_remote *nb,
1121 struct niobuf_local *res, void **desc_private)
1123 struct obd_run_ctxt saved;
1124 struct obd_device *obd;
1125 struct obd_ioobj *o = obj;
1126 struct niobuf_remote *b = nb;
1127 struct niobuf_local *r = res;
1128 void *journal_save = NULL;
1133 memset(res, 0, sizeof(*res) * niocount);
1134 obd = class_conn2obd(conn);
1136 push_ctxt(&saved, &obd->u.filter.fo_ctxt);
1138 if (cmd & OBD_BRW_WRITE) {
1139 *desc_private = filter_journal_start(&journal_save,
1141 objcount, obj, niocount,
1143 if (IS_ERR(*desc_private))
1144 GOTO(out_ctxt, rc = PTR_ERR(*desc_private));
1147 for (i = 0; i < objcount; i++, o++) {
1148 struct dentry *dentry;
1149 struct inode *inode;
1152 dentry = filter_fid2dentry(obd, filter_parent(obd, S_IFREG),
1153 o->ioo_id, S_IFREG);
1155 GOTO(out_clean, rc = PTR_ERR(dentry));
1156 inode = dentry->d_inode;
1158 CERROR("trying to BRW to non-existent file %Ld\n",
1159 (unsigned long long)o->ioo_id);
1161 GOTO(out_clean, rc = -ENOENT);
1164 for (j = 0; j < o->ioo_bufcnt; j++, b++, r++) {
1165 unsigned long index = b->offset >> PAGE_SHIFT;
1171 r->dentry = dget(dentry);
1173 if (cmd & OBD_BRW_WRITE)
1174 page = filter_get_page_write(inode, index, r);
1176 page = lustre_get_page_read(inode, index);
1180 GOTO(out_clean, rc = PTR_ERR(page));
1183 r->addr = page_address(page);
1184 r->offset = b->offset;
1191 if (cmd & OBD_BRW_WRITE) {
1192 int err = filter_journal_stop(journal_save, &obd->u.filter,
1203 if (cmd & OBD_BRW_WRITE)
1204 filter_commit_write(r->page, 0, PAGE_SIZE, rc);
1206 lustre_put_page(r->page);
1211 static int filter_write_locked_page(struct niobuf_local *lnb)
1216 lpage = lustre_get_page_write(lnb->dentry->d_inode, lnb->page->index);
1217 if (IS_ERR(lpage)) {
1218 /* It is highly unlikely that we would ever get an error here.
1219 * The page we want to get was previously locked, so it had to
1220 * have already allocated the space, and we were just writing
1221 * over the same data, so there would be no hole in the file.
1223 * XXX: possibility of a race with truncate could exist, need
1224 * to check that. There are no guarantees w.r.t.
1225 * write order even on a local filesystem, although the
1226 * normal response would be to return the number of bytes
1227 * successfully written and leave the rest to the app.
1229 rc = PTR_ERR(lpage);
1230 CERROR("error getting locked page index %ld: rc = %d\n",
1231 lnb->page->index, rc);
1235 /* lpage is kmapped in lustre_get_page_write() above and kunmapped in
1236 * lustre_commit_write() below, lnb->page was kmapped previously in
1237 * filter_get_page_write() and kunmapped in lustre_put_page() below.
1239 memcpy(page_address(lpage), page_address(lnb->page), PAGE_SIZE);
1240 rc = lustre_commit_write(lpage, 0, PAGE_SIZE);
1242 CERROR("error committing locked page %ld: rc = %d\n",
1243 lnb->page->index, rc);
1245 lustre_put_page(lnb->page);
1250 static int filter_commitrw(int cmd, struct lustre_handle *conn,
1251 int objcount, struct obd_ioobj *obj,
1252 int niocount, struct niobuf_local *res,
1255 struct obd_run_ctxt saved;
1256 struct obd_ioobj *o;
1257 struct niobuf_local *r;
1258 struct obd_device *obd = class_conn2obd(conn);
1260 int found_locked = 0;
1265 push_ctxt(&saved, &obd->u.filter.fo_ctxt);
1266 journal_save = current->journal_info;
1267 LASSERT(!journal_save);
1269 current->journal_info = private;
1270 for (i = 0, o = obj, r = res; i < objcount; i++, o++) {
1272 for (j = 0 ; j < o->ioo_bufcnt ; j++, r++) {
1273 struct page *page = r->page;
1278 if (r->flags & N_LOCAL_TEMP_PAGE) {
1283 if (cmd & OBD_BRW_WRITE) {
1284 int err = filter_commit_write(page, 0,
1290 lustre_put_page(page);
1295 current->journal_info = journal_save;
1300 for (i = 0, o = obj, r = res; i < objcount; i++, o++) {
1302 for (j = 0 ; j < o->ioo_bufcnt ; j++, r++) {
1304 if (!(r->flags & N_LOCAL_TEMP_PAGE))
1307 err = filter_write_locked_page(r);
1319 static int filter_statfs(struct lustre_handle *conn, struct statfs *statfs)
1321 struct obd_device *obd = class_conn2obd(conn);
1324 RETURN(vfs_statfs(obd->u.filter.fo_sb, statfs));
1327 static int filter_get_info(struct lustre_handle *conn, obd_count keylen,
1328 void *key, obd_count *vallen, void **val)
1330 struct obd_device *obd;
1331 struct obd_export * export;
1334 if (!(export = class_conn2export(conn))) {
1335 CDEBUG(D_IOCTL, "invalid client %Lx\n", conn->addr);
1339 obd = class_conn2obd(conn);
1341 if ( keylen == strlen("blocksize") &&
1342 memcmp(key, "blocksize", keylen) == 0 ) {
1343 *vallen = sizeof(long);
1344 *val = (void *)(long)obd->u.filter.fo_sb->s_blocksize;
1348 if ( keylen == strlen("blocksize_bits") &&
1349 memcmp(key, "blocksize_bits", keylen) == 0 ){
1350 *vallen = sizeof(long);
1351 *val = (void *)(long)obd->u.filter.fo_sb->s_blocksize_bits;
1355 if ( keylen == strlen("root_ino") &&
1356 memcmp(key, "root_ino", keylen) == 0 ){
1357 *vallen = sizeof(obd_id);
1358 *val = (void *)(obd_id)FILTER_ROOTINO;
1362 CDEBUG(D_IOCTL, "invalid key\n");
1366 int filter_copy_data(struct lustre_handle *dst_conn, struct obdo *dst,
1367 struct lustre_handle *src_conn, struct obdo *src,
1368 obd_size count, obd_off offset)
1371 struct lov_stripe_md srcmd, dstmd;
1372 unsigned long index = 0;
1375 memset(&srcmd, 0, sizeof(srcmd));
1376 memset(&dstmd, 0, sizeof(dstmd));
1377 srcmd.lmd_object_id = src->o_id;
1378 dstmd.lmd_object_id = dst->o_id;
1381 CDEBUG(D_INFO, "src: ino %Ld blocks %Ld, size %Ld, dst: ino %Ld\n",
1382 (unsigned long long)src->o_id, (unsigned long long)src->o_blocks,
1383 (unsigned long long)src->o_size, (unsigned long long)dst->o_id);
1384 page = alloc_page(GFP_USER);
1388 while (TryLockPage(page))
1389 ___wait_on_page(page);
1391 /* XXX with brw vector I/O, we could batch up reads and writes here,
1392 * all we need to do is allocate multiple pages to handle the I/Os
1393 * and arrays to handle the request parameters.
1395 while (index < ((src->o_size + PAGE_SIZE - 1) >> PAGE_SHIFT)) {
1397 struct io_cb_data *cbd = ll_init_cb();
1406 pg.count = PAGE_SIZE;
1407 pg.off = (page->index) << PAGE_SHIFT;
1410 page->index = index;
1411 err = obd_brw(OBD_BRW_READ, src_conn, &srcmd, 1, &pg,
1412 ll_sync_io_cb, cbd);
1425 pg.flag = OBD_BRW_CREATE;
1426 CDEBUG(D_INFO, "Read page %ld ...\n", page->index);
1428 err = obd_brw(OBD_BRW_WRITE, dst_conn, &dstmd, 1, &pg,
1429 ll_sync_io_cb, cbd);
1431 /* XXX should handle dst->o_size, dst->o_blocks here */
1437 CDEBUG(D_INFO, "Wrote page %ld ...\n", page->index);
1441 dst->o_size = src->o_size;
1442 dst->o_blocks = src->o_blocks;
1443 dst->o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
1451 static struct obd_ops filter_obd_ops = {
1452 o_get_info: filter_get_info,
1453 o_setup: filter_setup,
1454 o_cleanup: filter_cleanup,
1455 o_connect: filter_connect,
1456 o_disconnect: filter_disconnect,
1457 o_statfs: filter_statfs,
1458 o_getattr: filter_getattr,
1459 o_create: filter_create,
1460 o_setattr: filter_setattr,
1461 o_destroy: filter_destroy,
1462 o_open: filter_open,
1463 o_close: filter_close,
1464 o_brw: filter_pgcache_brw,
1465 o_punch: filter_truncate,
1466 o_preprw: filter_preprw,
1467 o_commitrw: filter_commitrw
1469 o_preallocate: filter_preallocate_inodes,
1470 o_migrate: filter_migrate,
1471 o_copy: filter_copy_data,
1472 o_iterate: filter_iterate
1477 static int __init obdfilter_init(void)
1479 printk(KERN_INFO "Filtering OBD driver v0.001, info@clusterfs.com\n");
1480 return class_register_type(&filter_obd_ops, OBD_FILTER_DEVICENAME);
1483 static void __exit obdfilter_exit(void)
1485 class_unregister_type(OBD_FILTER_DEVICENAME);
1488 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1489 MODULE_DESCRIPTION("Lustre Filtering OBD driver v1.0");
1490 MODULE_LICENSE("GPL");
1492 module_init(obdfilter_init);
1493 module_exit(obdfilter_exit);