1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter.c
6 * Copyright (c) 2001, 2002 Cluster File Systems, Inc.
7 * Author: Peter Braam <braam@clusterfs.com>
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #define DEBUG_SUBSYSTEM S_FILTER
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
32 #include <linux/dcache.h>
33 #include <linux/obd_class.h>
34 #include <linux/lustre_dlm.h>
35 #include <linux/obd_filter.h>
36 #include <linux/ext3_jbd.h>
37 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
38 #include <linux/extN_jbd.h>
40 #include <linux/quotaops.h>
41 #include <linux/init.h>
42 #include <linux/random.h>
43 #include <linux/stringify.h>
44 #include <linux/lprocfs_status.h>
46 extern struct lprocfs_vars status_class_var[];
47 extern struct lprocfs_vars status_var_nm_1[];
49 static kmem_cache_t *filter_open_cache;
50 static kmem_cache_t *filter_dentry_cache;
52 #define FILTER_ROOTINO 2
53 #define FILTER_ROOTINO_STR __stringify(FILTER_ROOTINO)
56 static char *obd_type_by_mode[S_IFMT >> S_SHIFT] = {
58 [S_IFREG >> S_SHIFT] "R",
59 [S_IFDIR >> S_SHIFT] "D",
60 [S_IFCHR >> S_SHIFT] "C",
61 [S_IFBLK >> S_SHIFT] "B",
62 [S_IFIFO >> S_SHIFT] "F",
63 [S_IFSOCK >> S_SHIFT] "S",
64 [S_IFLNK >> S_SHIFT] "L"
67 static inline const char *obd_mode_to_type(int mode)
69 return obd_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
72 /* write the pathname into the string */
73 static int filter_id(char *buf, obd_id id, obd_mode mode)
75 return sprintf(buf, "O/%s/"LPU64, obd_mode_to_type(mode), id);
78 static inline void f_dput(struct dentry *dentry)
80 /* Can't go inside filter_ddelete because it can block */
81 CDEBUG(D_INODE, "putting %s: %p, count = %d\n",
82 dentry->d_name.name, dentry, atomic_read(&dentry->d_count) - 1);
83 LASSERT(atomic_read(&dentry->d_count) > 0);
88 /* Not racy w.r.t. others, because we are the only user of this dentry */
89 static void filter_drelease(struct dentry *dentry)
92 kmem_cache_free(filter_dentry_cache, dentry->d_fsdata);
95 struct dentry_operations filter_dops = {
96 .d_release = filter_drelease,
99 /* setup the object store with correct subdirectories */
100 static int filter_prep(struct obd_device *obd)
102 struct obd_run_ctxt saved;
103 struct filter_obd *filter = &obd->u.filter;
104 struct dentry *dentry;
112 push_ctxt(&saved, &filter->fo_ctxt, NULL);
113 dentry = simple_mkdir(current->fs->pwd, "O", 0700);
114 CDEBUG(D_INODE, "got/created O: %p\n", dentry);
115 if (IS_ERR(dentry)) {
116 rc = PTR_ERR(dentry);
117 CERROR("cannot open/create O: rc = %d\n", rc);
120 filter->fo_dentry_O = dentry;
121 dentry = simple_mkdir(current->fs->pwd, "P", 0700);
122 CDEBUG(D_INODE, "got/created P: %p\n", dentry);
123 if (IS_ERR(dentry)) {
124 rc = PTR_ERR(dentry);
125 CERROR("cannot open/create P: rc = %d\n", rc);
129 dentry = simple_mkdir(current->fs->pwd, "D", 0700);
130 CDEBUG(D_INODE, "got/created D: %p\n", dentry);
131 if (IS_ERR(dentry)) {
132 rc = PTR_ERR(dentry);
133 CERROR("cannot open/create D: rc = %d\n", rc);
137 root = simple_mknod(dentry, FILTER_ROOTINO_STR, S_IFREG | 0755);
141 CERROR("OBD filter: cannot open/create root %d: rc = %d\n",
148 * Create directories and/or get dentries for each object type.
149 * This saves us from having to do multiple lookups for each one.
151 for (mode = 0; mode < (S_IFMT >> S_SHIFT); mode++) {
152 char *type = obd_type_by_mode[mode];
155 filter->fo_dentry_O_mode[mode] = NULL;
158 dentry = simple_mkdir(filter->fo_dentry_O, type, 0700);
159 CDEBUG(D_INODE, "got/created O/%s: %p\n", type, dentry);
160 if (IS_ERR(dentry)) {
161 rc = PTR_ERR(dentry);
162 CERROR("cannot create O/%s: rc = %d\n", type, rc);
163 GOTO(out_O_mode, rc);
165 filter->fo_dentry_O_mode[mode] = dentry;
168 file = filp_open("D/status", O_RDWR | O_CREAT, 0700);
169 if ( !file || IS_ERR(file) ) {
171 CERROR("OBD filter: cannot open/create status %s: rc = %d\n",
173 GOTO(out_O_mode, rc);
176 /* steal operations */
177 inode = file->f_dentry->d_inode;
178 filter->fo_fop = file->f_op;
179 filter->fo_iop = inode->i_op;
180 filter->fo_aops = inode->i_mapping->a_ops;
182 if (inode->i_size == 0) {
183 __u64 disk_lastobjid = cpu_to_le64(lastobjid);
184 ssize_t retval = file->f_op->write(file,(char *)&disk_lastobjid,
185 sizeof(disk_lastobjid),
187 if (retval != sizeof(disk_lastobjid)) {
188 CDEBUG(D_INODE,"OBD filter: error writing lastobjid\n");
190 GOTO(out_O_mode, rc = -EIO);
193 __u64 disk_lastobjid;
194 ssize_t retval = file->f_op->read(file, (char *)&disk_lastobjid,
195 sizeof(disk_lastobjid),
197 if (retval != sizeof(disk_lastobjid)) {
198 CDEBUG(D_INODE,"OBD filter: error reading lastobjid\n");
200 GOTO(out_O_mode, rc = -EIO);
202 lastobjid = le64_to_cpu(disk_lastobjid);
204 filter->fo_lastobjid = lastobjid;
209 pop_ctxt(&saved, &filter->fo_ctxt, NULL);
215 struct dentry *dentry = filter->fo_dentry_O_mode[mode];
218 filter->fo_dentry_O_mode[mode] = NULL;
222 f_dput(filter->fo_dentry_O);
223 filter->fo_dentry_O = NULL;
227 /* cleanup the filter: write last used object id to status file */
228 static void filter_post(struct obd_device *obd)
230 struct obd_run_ctxt saved;
231 struct filter_obd *filter = &obd->u.filter;
232 __u64 disk_lastobjid;
237 push_ctxt(&saved, &filter->fo_ctxt, NULL);
238 file = filp_open("D/status", O_RDWR | O_CREAT, 0700);
240 CERROR("OBD filter: cannot create status file\n");
245 disk_lastobjid = cpu_to_le64(filter->fo_lastobjid);
246 rc = file->f_op->write(file, (char *)&disk_lastobjid,
247 sizeof(disk_lastobjid), &file->f_pos);
248 if (rc != sizeof(disk_lastobjid))
249 CERROR("OBD filter: error writing lastobjid: rc = %ld\n", rc);
251 rc = filp_close(file, NULL);
253 CERROR("OBD filter: cannot close status file: rc = %ld\n", rc);
255 for (mode = 0; mode < (S_IFMT >> S_SHIFT); mode++) {
256 struct dentry *dentry = filter->fo_dentry_O_mode[mode];
259 filter->fo_dentry_O_mode[mode] = NULL;
262 f_dput(filter->fo_dentry_O);
264 pop_ctxt(&saved, &filter->fo_ctxt, NULL);
268 static __u64 filter_next_id(struct obd_device *obd)
272 spin_lock(&obd->u.filter.fo_objidlock);
273 id = ++obd->u.filter.fo_lastobjid;
274 spin_unlock(&obd->u.filter.fo_objidlock);
276 /* FIXME: write the lastobjid to disk here */
280 /* how to get files, dentries, inodes from object id's */
281 /* parent i_sem is already held if needed for exclusivity */
282 static struct dentry *filter_fid2dentry(struct obd_device *obd,
283 struct dentry *dparent,
284 __u64 id, __u32 type, int locked)
286 struct super_block *sb = obd->u.filter.fo_sb;
287 struct dentry *dchild;
292 if (!sb || !sb->s_dev) {
293 CERROR("fatal: device not initialized.\n");
294 RETURN(ERR_PTR(-ENXIO));
298 CERROR("fatal: invalid object #0\n");
300 RETURN(ERR_PTR(-ESTALE));
303 if (!(type & S_IFMT)) {
304 CERROR("OBD %s, object "LPU64" has bad type: %o\n",
305 __FUNCTION__, id, type);
306 RETURN(ERR_PTR(-EINVAL));
309 len = sprintf(name, LPU64, id);
310 CDEBUG(D_INODE, "opening object O/%s/%s\n", obd_mode_to_type(type),
313 down(&dparent->d_inode->i_sem);
314 dchild = lookup_one_len(name, dparent, len);
316 up(&dparent->d_inode->i_sem);
317 if (IS_ERR(dchild)) {
318 CERROR("child lookup error %ld\n", PTR_ERR(dchild));
322 CDEBUG(D_INODE, "got child obj O/%s/%s: %p, count = %d\n",
323 obd_mode_to_type(type), name, dchild,
324 atomic_read(&dchild->d_count));
326 LASSERT(atomic_read(&dchild->d_count) > 0);
331 static inline struct dentry *filter_parent(struct obd_device *obd,
334 struct filter_obd *filter = &obd->u.filter;
336 return filter->fo_dentry_O_mode[(mode & S_IFMT) >> S_SHIFT];
339 static struct file *filter_obj_open(struct obd_export *export,
340 __u64 id, __u32 type)
342 struct filter_obd *filter = &export->exp_obd->u.filter;
343 struct super_block *sb = filter->fo_sb;
344 struct dentry *dentry;
345 struct filter_export_data *fed = &export->exp_filter_data;
346 struct filter_dentry_data *fdd;
347 struct filter_file_data *ffd;
348 struct obd_run_ctxt saved;
353 if (!sb || !sb->s_dev) {
354 CERROR("fatal: device not initialized.\n");
355 RETURN(ERR_PTR(-ENXIO));
359 CERROR("fatal: invalid obdo "LPU64"\n", id);
360 RETURN(ERR_PTR(-ESTALE));
363 if (!(type & S_IFMT)) {
364 CERROR("OBD %s, object "LPU64" has bad type: %o\n",
365 __FUNCTION__, id, type);
366 RETURN(ERR_PTR(-EINVAL));
369 ffd = kmem_cache_alloc(filter_open_cache, SLAB_KERNEL);
371 CERROR("obdfilter: out of memory\n");
372 RETURN(ERR_PTR(-ENOMEM));
375 /* We preallocate this to avoid blocking while holding fo_fddlock */
376 fdd = kmem_cache_alloc(filter_dentry_cache, SLAB_KERNEL);
378 CERROR("obdfilter: out of memory\n");
379 GOTO(out_ffd, file = ERR_PTR(-ENOMEM));
382 filter_id(name, id, type);
383 push_ctxt(&saved, &filter->fo_ctxt, NULL);
384 file = filp_open(name, O_RDWR | O_LARGEFILE, 0 /* type? */);
385 pop_ctxt(&saved, &filter->fo_ctxt, NULL);
388 CERROR("error opening %s: rc %d\n", name, PTR_ERR(file));
392 dentry = file->f_dentry;
393 spin_lock(&filter->fo_fddlock);
394 if (dentry->d_fsdata) {
395 spin_unlock(&filter->fo_fddlock);
396 kmem_cache_free(filter_dentry_cache, fdd);
397 fdd = dentry->d_fsdata;
398 LASSERT(kmem_cache_validate(filter_dentry_cache, fdd));
399 /* should only happen during client recovery */
400 if (fdd->fdd_flags & FILTER_FLAG_DESTROY)
401 CDEBUG(D_INODE,"opening destroyed object "LPX64"\n",id);
402 atomic_inc(&fdd->fdd_open_count);
404 atomic_set(&fdd->fdd_open_count, 1);
406 /* If this is racy, then we can use {cmp}xchg and atomic_add */
407 dentry->d_fsdata = fdd;
408 spin_unlock(&filter->fo_fddlock);
411 get_random_bytes(&ffd->ffd_servercookie, sizeof(ffd->ffd_servercookie));
412 ffd->ffd_file = file;
413 file->private_data = ffd;
416 dentry->d_op = &filter_dops;
418 LASSERT(dentry->d_op == &filter_dops);
420 spin_lock(&fed->fed_lock);
421 list_add(&ffd->ffd_export_list, &fed->fed_open_head);
422 spin_unlock(&fed->fed_lock);
424 CDEBUG(D_INODE, "opened objid "LPX64": rc = %p\n", id, file);
430 kmem_cache_free(filter_dentry_cache, fdd);
432 ffd->ffd_servercookie = DEAD_HANDLE_MAGIC;
433 kmem_cache_free(filter_open_cache, ffd);
437 /* Caller must hold i_sem on dir_dentry->d_inode */
438 static int filter_destroy_internal(struct obd_device *obd,
439 struct dentry *dir_dentry,
440 struct dentry *object_dentry)
442 struct obd_run_ctxt saved;
443 struct inode *inode = object_dentry->d_inode;
447 if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 1) {
448 CERROR("destroying objid %*s nlink = %d, count = %d\n",
449 object_dentry->d_name.len,
450 object_dentry->d_name.name,
451 inode->i_nlink, atomic_read(&inode->i_count));
454 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
455 rc = vfs_unlink(dir_dentry->d_inode, object_dentry);
456 /* XXX unlink from PENDING directory now too */
457 pop_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
460 CERROR("error unlinking objid %*s: rc %d\n",
461 object_dentry->d_name.len,
462 object_dentry->d_name.name, rc);
467 static int filter_close_internal(struct obd_device *obd,
468 struct filter_file_data *ffd)
470 struct file *filp = ffd->ffd_file;
471 struct dentry *object_dentry = dget(filp->f_dentry);
472 struct filter_dentry_data *fdd = object_dentry->d_fsdata;
476 LASSERT(filp->private_data == ffd);
479 rc = filp_close(filp, 0);
481 if (atomic_dec_and_test(&fdd->fdd_open_count) &&
482 fdd->fdd_flags & FILTER_FLAG_DESTROY) {
483 struct dentry *dir_dentry = filter_parent(obd, S_IFREG);
485 down(&dir_dentry->d_inode->i_sem);
486 rc2 = filter_destroy_internal(obd, dir_dentry, object_dentry);
489 up(&dir_dentry->d_inode->i_sem);
492 f_dput(object_dentry);
493 kmem_cache_free(filter_open_cache, ffd);
499 static int filter_connect(struct lustre_handle *conn, struct obd_device *obd,
500 obd_uuid_t cluuid, struct recovd_obd *recovd,
501 ptlrpc_recovery_cb_t recover)
503 struct obd_export *exp;
508 rc = class_connect(conn, obd, cluuid);
511 exp = class_conn2export(conn);
514 INIT_LIST_HEAD(&exp->exp_filter_data.fed_open_head);
515 spin_lock_init(&exp->exp_filter_data.fed_lock);
524 static int filter_disconnect(struct lustre_handle *conn)
526 struct obd_export *exp = class_conn2export(conn);
527 struct filter_export_data *fed;
532 fed = &exp->exp_filter_data;
533 spin_lock(&fed->fed_lock);
534 while (!list_empty(&fed->fed_open_head)) {
535 struct filter_file_data *ffd;
537 ffd = list_entry(fed->fed_open_head.next, typeof(*ffd),
539 list_del(&ffd->ffd_export_list);
540 spin_unlock(&fed->fed_lock);
542 CERROR("force closing file %*s on disconnect\n",
543 ffd->ffd_file->f_dentry->d_name.len,
544 ffd->ffd_file->f_dentry->d_name.name);
546 filter_close_internal(exp->exp_obd, ffd);
547 spin_lock(&fed->fed_lock);
549 spin_unlock(&fed->fed_lock);
551 ldlm_cancel_locks_for_export(exp);
552 rc = class_disconnect(conn);
556 /* XXX cleanup preallocated inodes */
560 /* mount the file system (secretly) */
561 static int filter_setup(struct obd_device *obd, obd_count len, void *buf)
563 struct obd_ioctl_data* data = buf;
564 struct filter_obd *filter;
565 struct vfsmount *mnt;
569 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2)
573 mnt = do_kern_mount(data->ioc_inlbuf2, 0, data->ioc_inlbuf1, NULL);
578 filter = &obd->u.filter;;
579 filter->fo_vfsmnt = mnt;
580 filter->fo_fstype = strdup(data->ioc_inlbuf2);
581 filter->fo_sb = mnt->mnt_root->d_inode->i_sb;
582 CERROR("%s: mnt is %p\n", data->ioc_inlbuf1, filter->fo_vfsmnt);
583 /* XXX is this even possible if do_kern_mount succeeded? */
585 GOTO(err_kfree, err = -ENODEV);
587 OBD_SET_CTXT_MAGIC(&filter->fo_ctxt);
588 filter->fo_ctxt.pwdmnt = mnt;
589 filter->fo_ctxt.pwd = mnt->mnt_root;
590 filter->fo_ctxt.fs = get_ds();
592 err = filter_prep(obd);
594 GOTO(err_kfree, err);
595 spin_lock_init(&filter->fo_fddlock);
596 spin_lock_init(&filter->fo_objidlock);
597 INIT_LIST_HEAD(&filter->fo_export_list);
600 ldlm_namespace_new("filter-tgt", LDLM_NAMESPACE_SERVER);
601 if (obd->obd_namespace == NULL)
604 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
605 "filter_ldlm_cb_client", &obd->obd_ldlm_client);
610 kfree(filter->fo_fstype);
612 mntput(filter->fo_vfsmnt);
622 static int filter_cleanup(struct obd_device *obd)
624 struct super_block *sb;
627 if (!list_empty(&obd->obd_exports)) {
628 CERROR("still has clients!\n");
629 class_disconnect_all(obd);
630 if (!list_empty(&obd->obd_exports)) {
631 CERROR("still has exports after forced cleanup?\n");
636 ldlm_namespace_free(obd->obd_namespace);
638 sb = obd->u.filter.fo_sb;
639 if (!obd->u.filter.fo_sb)
644 shrink_dcache_parent(sb->s_root);
646 mntput(obd->u.filter.fo_vfsmnt);
647 obd->u.filter.fo_sb = 0;
648 kfree(obd->u.filter.fo_fstype);
657 static void filter_from_inode(struct obdo *oa, struct inode *inode, int valid)
659 int type = oa->o_mode & S_IFMT;
662 CDEBUG(D_INFO, "src inode %ld (%p), dst obdo %ld valid 0x%08x\n",
663 inode->i_ino, inode, (long)oa->o_id, valid);
664 /* Don't copy the inode number in place of the object ID */
665 obdo_from_inode(oa, inode, valid);
666 oa->o_mode &= ~S_IFMT;
669 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
670 obd_rdev rdev = kdev_t_to_nr(inode->i_rdev);
672 oa->o_valid |= OBD_MD_FLRDEV;
678 static struct filter_file_data *filter_handle2ffd(struct lustre_handle *handle)
680 struct filter_file_data *ffd = NULL;
683 if (!handle || !handle->addr)
686 ffd = (struct filter_file_data *)(unsigned long)(handle->addr);
687 if (!kmem_cache_validate(filter_open_cache, (void *)ffd))
690 if (ffd->ffd_servercookie != handle->cookie)
693 LASSERT(ffd->ffd_file->private_data == ffd);
697 static struct dentry *__filter_oa2dentry(struct lustre_handle *conn,
698 struct obdo *oa, int locked,char *what)
700 struct dentry *dentry = NULL;
702 if (oa->o_valid & OBD_MD_FLHANDLE) {
703 struct lustre_handle *ost_handle = obdo_handle(oa);
704 struct filter_file_data *ffd = filter_handle2ffd(ost_handle);
707 dentry = dget(ffd->ffd_file->f_dentry);
711 struct obd_device *obd = class_conn2obd(conn);
713 CERROR("invalid client "LPX64"\n", conn->addr);
714 RETURN(ERR_PTR(-EINVAL));
716 dentry = filter_fid2dentry(obd, filter_parent(obd, oa->o_mode),
717 oa->o_id, oa->o_mode, locked);
720 if (IS_ERR(dentry)) {
721 CERROR("%s error looking up object: "LPX64"\n", what, oa->o_id);
725 if (!dentry->d_inode) {
726 CERROR("%s on non-existent object: "LPX64"\n", what, oa->o_id);
728 RETURN(ERR_PTR(-ENOENT));
734 #define filter_oa2dentry(conn, oa, locked) __filter_oa2dentry(conn, oa, locked,\
737 static int filter_getattr(struct lustre_handle *conn, struct obdo *oa,
738 struct lov_stripe_md *md)
740 struct dentry *dentry = NULL;
744 dentry = filter_oa2dentry(conn, oa, 0);
746 RETURN(PTR_ERR(dentry));
748 filter_from_inode(oa, dentry->d_inode, oa->o_valid);
754 static int filter_setattr(struct lustre_handle *conn, struct obdo *oa,
755 struct lov_stripe_md *md)
757 struct obd_run_ctxt saved;
758 struct obd_device *obd = class_conn2obd(conn);
759 struct dentry *dentry;
765 dentry = filter_oa2dentry(conn, oa, 0);
768 RETURN(PTR_ERR(dentry));
770 iattr_from_obdo(&iattr, oa, oa->o_valid);
771 iattr.ia_mode = (iattr.ia_mode & ~S_IFMT) | S_IFREG;
772 inode = dentry->d_inode;
775 if (iattr.ia_valid & ATTR_SIZE)
777 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
778 if (inode->i_op->setattr)
779 rc = inode->i_op->setattr(dentry, &iattr);
781 rc = inode_setattr(inode, &iattr);
782 pop_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
783 if (iattr.ia_valid & ATTR_SIZE) {
785 oa->o_valid = OBD_MD_FLBLOCKS | OBD_MD_FLCTIME | OBD_MD_FLMTIME;
786 obdo_from_inode(oa, inode, oa->o_valid);
794 static int filter_open(struct lustre_handle *conn, struct obdo *oa,
795 struct lov_stripe_md *ea)
797 struct obd_export *export;
798 struct lustre_handle *handle;
799 struct filter_file_data *ffd;
804 export = class_conn2export(conn);
806 CDEBUG(D_IOCTL, "fatal: invalid client "LPX64"\n", conn->addr);
810 filp = filter_obj_open(export, oa->o_id, oa->o_mode);
812 GOTO(out, rc = PTR_ERR(filp));
814 filter_from_inode(oa, filp->f_dentry->d_inode, oa->o_valid);
816 ffd = filp->private_data;
817 handle = obdo_handle(oa);
818 handle->addr = (__u64)(unsigned long)ffd;
819 handle->cookie = ffd->ffd_servercookie;
820 oa->o_valid |= OBD_MD_FLHANDLE;
826 static int filter_close(struct lustre_handle *conn, struct obdo *oa,
827 struct lov_stripe_md *ea)
829 struct obd_export *exp;
830 struct filter_file_data *ffd;
831 struct filter_export_data *fed;
835 exp = class_conn2export(conn);
837 CDEBUG(D_IOCTL, "fatal: invalid client "LPX64"\n", conn->addr);
841 if (!(oa->o_valid & OBD_MD_FLHANDLE)) {
842 CERROR("no handle for close of objid "LPX64"\n", oa->o_id);
846 ffd = filter_handle2ffd(obdo_handle(oa));
848 struct lustre_handle *handle = obdo_handle(oa);
849 CERROR("bad handle ("LPX64") or cookie ("LPX64") for close\n",
850 handle->addr, handle->cookie);
854 fed = &exp->exp_filter_data;
855 spin_lock(&fed->fed_lock);
856 list_del(&ffd->ffd_export_list);
857 spin_unlock(&fed->fed_lock);
859 rc = filter_close_internal(exp->exp_obd, ffd);
864 static int filter_create(struct lustre_handle *conn, struct obdo *oa,
865 struct lov_stripe_md **ea)
867 struct obd_device *obd = class_conn2obd(conn);
869 struct obd_run_ctxt saved;
875 CERROR("invalid client "LPX64"\n", conn->addr);
879 if (!(oa->o_mode & S_IFMT)) {
880 CERROR("OBD %s, object "LPU64" has bad type: %o\n",
881 __FUNCTION__, oa->o_id, oa->o_mode);
885 oa->o_id = filter_next_id(obd);
887 //filter_id(name, oa->o_id, oa->o_mode);
888 sprintf(name, LPU64, oa->o_id);
889 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
890 new = simple_mknod(filter_parent(obd, oa->o_mode), name, oa->o_mode);
891 pop_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
893 CERROR("Error mknod obj %s, err %ld\n", name, PTR_ERR(new));
897 /* Set flags for fields we have set in the inode struct */
898 oa->o_valid = OBD_MD_FLID | OBD_MD_FLBLKSZ | OBD_MD_FLBLOCKS |
899 OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME;
900 filter_from_inode(oa, new->d_inode, oa->o_valid);
906 static int filter_destroy(struct lustre_handle *conn, struct obdo *oa,
907 struct lov_stripe_md *ea)
909 struct obd_device *obd = class_conn2obd(conn);
910 struct dentry *dir_dentry, *object_dentry;
911 struct filter_dentry_data *fdd;
916 CERROR("invalid client "LPX64"\n", conn->addr);
920 CDEBUG(D_INODE, "destroying objid "LPX64"\n", oa->o_id);
922 dir_dentry = filter_parent(obd, oa->o_mode);
923 down(&dir_dentry->d_inode->i_sem);
925 object_dentry = filter_oa2dentry(conn, oa, 1);
926 if (IS_ERR(object_dentry))
927 GOTO(out, rc = -ENOENT);
929 fdd = object_dentry->d_fsdata;
930 if (fdd && atomic_read(&fdd->fdd_open_count)) {
931 if (!(fdd->fdd_flags & FILTER_FLAG_DESTROY)) {
932 fdd->fdd_flags |= FILTER_FLAG_DESTROY;
933 /* XXX put into PENDING directory in case of crash */
935 "defer destroy of %dx open objid "LPX64"\n",
936 atomic_read(&fdd->fdd_open_count), oa->o_id);
939 "repeat destroy of %dx open objid "LPX64"\n",
940 atomic_read(&fdd->fdd_open_count), oa->o_id);
941 GOTO(out_dput, rc = 0);
944 rc = filter_destroy_internal(obd, dir_dentry, object_dentry);
946 f_dput(object_dentry);
950 up(&dir_dentry->d_inode->i_sem);
954 /* NB count and offset are used for punch, but not truncate */
955 static int filter_truncate(struct lustre_handle *conn, struct obdo *oa,
956 struct lov_stripe_md *lsm,
957 obd_off start, obd_off end)
962 if (end != OBD_OBJECT_EOF)
963 CERROR("PUNCH not supported, only truncate works\n");
965 CDEBUG(D_INODE, "calling truncate for object "LPX64", valid = %x, "
966 "o_size = "LPD64"\n", oa->o_id, oa->o_valid, start);
968 error = filter_setattr(conn, oa, NULL);
972 static int filter_pgcache_brw(int cmd, struct lustre_handle *conn,
973 struct lov_stripe_md *lsm, obd_count oa_bufs,
974 struct brw_page *pga, struct obd_brw_set *set)
976 struct obd_export *export = class_conn2export(conn);
977 struct obd_run_ctxt saved;
978 struct super_block *sb;
979 int pnum; /* index to pages (bufs) */
980 unsigned long retval;
987 CDEBUG(D_IOCTL, "invalid client "LPX64"\n", conn->addr);
991 sb = export->exp_obd->u.filter.fo_sb;
992 push_ctxt(&saved, &export->exp_obd->u.filter.fo_ctxt, NULL);
993 pnum = 0; /* pnum indexes buf 0..num_pages */
995 file = filter_obj_open(export, lsm->lsm_object_id, S_IFREG);
997 GOTO(out, retval = PTR_ERR(file));
999 /* count doubles as retval */
1000 for (pg = 0; pg < oa_bufs; pg++) {
1001 CDEBUG(D_INODE, "OP %d obdo pgno: (%d) (%ld,"LPU64
1002 ") off count ("LPU64",%d)\n",
1003 cmd, pnum, file->f_dentry->d_inode->i_ino,
1004 pga[pnum].off >> PAGE_CACHE_SHIFT, pga[pnum].off,
1005 (int)pga[pnum].count);
1006 if (cmd & OBD_BRW_WRITE) {
1009 off = pga[pnum].off;
1010 buffer = kmap(pga[pnum].pg);
1011 retval = file->f_op->write(file, buffer,
1014 kunmap(pga[pnum].pg);
1015 CDEBUG(D_INODE, "retval %ld\n", retval);
1017 loff_t off = pga[pnum].off;
1018 char *buffer = kmap(pga[pnum].pg);
1020 if (off >= file->f_dentry->d_inode->i_size) {
1021 memset(buffer, 0, pga[pnum].count);
1022 retval = pga[pnum].count;
1024 retval = file->f_op->read(file, buffer,
1025 pga[pnum].count, &off);
1027 kunmap(pga[pnum].pg);
1029 if (retval != pga[pnum].count) {
1030 filp_close(file, 0);
1031 GOTO(out, retval = -EIO);
1033 CDEBUG(D_INODE, "retval %ld\n", retval);
1037 /* sizes and blocks are set by generic_file_write */
1038 /* ctimes/mtimes will follow with a setattr call */
1039 filp_close(file, 0);
1041 /* XXX: do something with callback if it is set? */
1045 pop_ctxt(&saved, &export->exp_obd->u.filter.fo_ctxt, NULL);
1046 error = (retval >= 0) ? 0 : retval;
1051 * Calculate the number of buffer credits needed to write multiple pages in
1052 * a single ext3/extN transaction. No, this shouldn't be here, but as yet
1053 * ext3 doesn't have a nice API for calculating this sort of thing in advance.
1055 * See comment above ext3_writepage_trans_blocks for details. We assume
1056 * no data journaling is being done, but it does allow for all of the pages
1057 * being non-contiguous. If we are guaranteed contiguous pages we could
1058 * reduce the number of (d)indirect blocks a lot.
1060 * With N blocks per page and P pages, for each inode we have at most:
1062 * min(N*P, blocksize/4 + 1) dindirect blocks
1065 * For the entire filesystem, we have at most:
1066 * min(sum(nindir + P), ngroups) bitmap blocks (from the above)
1067 * min(sum(nindir + P), gdblocks) group descriptor blocks (from the above)
1070 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quota files
1072 static int ext3_credits_needed(struct super_block *sb, int objcount,
1073 struct obd_ioobj *obj)
1075 struct obd_ioobj *o = obj;
1076 int blockpp = 1 << (PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
1077 int addrpp = EXT3_ADDR_PER_BLOCK(sb) * blockpp;
1080 int needed = objcount + 1;
1083 for (i = 0; i < objcount; i++, o++) {
1084 int nblocks = o->ioo_bufcnt * blockpp;
1085 int ndindirect = min(nblocks, addrpp + 1);
1086 int nindir = nblocks + ndindirect + 1;
1088 nbitmaps += nindir + nblocks;
1089 ngdblocks += nindir + nblocks;
1094 /* Assumes ext3 and extN have same sb_info layout at the start. */
1095 if (nbitmaps > EXT3_SB(sb)->s_groups_count)
1096 nbitmaps = EXT3_SB(sb)->s_groups_count;
1097 if (ngdblocks > EXT3_SB(sb)->s_gdb_count)
1098 ngdblocks = EXT3_SB(sb)->s_gdb_count;
1100 needed += nbitmaps + ngdblocks;
1103 /* We assume that there will be 1 bit set in s_dquot.flags for each
1104 * quota file that is active. This is at least true for now.
1106 needed += hweight32(sb_any_quota_enabled(sb)) *
1107 EXT3_SINGLEDATA_TRANS_BLOCKS;
1113 /* We have to start a huge journal transaction here to hold all of the
1114 * metadata for the pages being written here. This is necessitated by
1115 * the fact that we do lots of prepare_write operations before we do
1116 * any of the matching commit_write operations, so even if we split
1117 * up to use "smaller" transactions none of them could complete until
1118 * all of them were opened. By having a single journal transaction,
1119 * we eliminate duplicate reservations for common blocks like the
1120 * superblock and group descriptors or bitmaps.
1122 * We will start the transaction here, but each prepare_write will
1123 * add a refcount to the transaction, and each commit_write will
1124 * remove a refcount. The transaction will be closed when all of
1125 * the pages have been written.
1127 static void *ext3_filter_journal_start(struct filter_obd *filter,
1128 int objcount, struct obd_ioobj *obj,
1129 int niocount, struct niobuf_remote *nb)
1131 journal_t *journal = NULL;
1132 handle_t *handle = NULL;
1135 /* It appears that some kernels have different values for
1136 * EXT*_MAX_GROUP_LOADED (either 8 or 32), so we cannot
1137 * assume anything after s_inode_bitmap_number is the same.
1139 if (!strcmp(filter->fo_fstype, "ext3"))
1140 journal = EXT3_SB(filter->fo_sb)->s_journal;
1141 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1142 else if (!strcmp(filter->fo_fstype, "extN"))
1143 journal = EXTN_SB(filter->fo_sb)->s_journal;
1145 needed = ext3_credits_needed(filter->fo_sb, objcount, obj);
1147 /* The number of blocks we could _possibly_ dirty can very large.
1148 * We reduce our request if it is absurd (and we couldn't get that
1149 * many credits for a single handle anyways).
1151 * At some point we have to limit the size of I/Os sent at one time,
1152 * increase the size of the journal, or we have to calculate the
1153 * actual journal requirements more carefully by checking all of
1154 * the blocks instead of being maximally pessimistic. It remains to
1155 * be seen if this is a real problem or not.
1157 if (needed > journal->j_max_transaction_buffers) {
1158 CERROR("want too many journal credits (%d) using %d instead\n",
1159 needed, journal->j_max_transaction_buffers);
1160 needed = journal->j_max_transaction_buffers;
1164 handle = journal_start(journal, needed);
1167 CERROR("can't get handle for %d credits: rc = %ld\n", needed,
1173 static void *filter_journal_start(void **journal_save,
1174 struct filter_obd *filter,
1175 int objcount, struct obd_ioobj *obj,
1176 int niocount, struct niobuf_remote *nb)
1178 void *handle = NULL;
1180 /* This may not be necessary - we probably never have a
1181 * transaction started when we enter here, so we can
1182 * remove the saving of the journal state entirely.
1183 * For now leave it in just to see if it ever happens.
1185 *journal_save = current->journal_info;
1186 if (*journal_save) {
1187 CERROR("Already have handle %p???\n", *journal_save);
1189 current->journal_info = NULL;
1192 if (!strcmp(filter->fo_fstype, "ext3") ||
1193 !strcmp(filter->fo_fstype, "extN"))
1194 handle = ext3_filter_journal_start(filter, objcount, obj,
1199 static int ext3_filter_journal_stop(void *handle)
1203 /* We got a refcount on the handle for each call to prepare_write,
1204 * so we can drop the "parent" handle here to avoid the need for
1205 * osc to call back into filterobd to close the handle. The
1206 * remaining references will be dropped in commit_write.
1209 rc = journal_stop((handle_t *)handle);
1215 static int filter_journal_stop(void *journal_save, struct filter_obd *filter,
1220 if (!strcmp(filter->fo_fstype, "ext3") ||
1221 !strcmp(filter->fo_fstype, "extN"))
1222 rc = ext3_filter_journal_stop(handle);
1225 CERROR("error on journal stop: rc = %d\n", rc);
1227 current->journal_info = journal_save;
1232 static inline void lustre_put_page(struct page *page)
1235 page_cache_release(page);
1239 static struct page *
1240 lustre_get_page_read(struct inode *inode, struct niobuf_remote *rnb)
1242 unsigned long index = rnb->offset >> PAGE_SHIFT;
1243 struct address_space *mapping = inode->i_mapping;
1247 page = read_cache_page(mapping, index,
1248 (filler_t*)mapping->a_ops->readpage, NULL);
1249 if (!IS_ERR(page)) {
1252 if (!PageUptodate(page)) {
1253 CERROR("page index %lu not uptodate\n", index);
1254 GOTO(err_page, rc = -EIO);
1256 if (PageError(page)) {
1257 CERROR("page index %lu has error\n", index);
1258 GOTO(err_page, rc = -EIO);
1264 lustre_put_page(page);
1268 static struct page *
1269 lustre_get_page_write(struct inode *inode, unsigned long index)
1271 struct address_space *mapping = inode->i_mapping;
1275 page = grab_cache_page(mapping, index); /* locked page */
1277 if (!IS_ERR(page)) {
1279 /* Note: Called with "O" and "PAGE_SIZE" this is essentially
1280 * a no-op for most filesystems, because we write the whole
1281 * page. For partial-page I/O this will read in the page.
1283 rc = mapping->a_ops->prepare_write(NULL, page, 0, PAGE_SIZE);
1285 CERROR("page index %lu, rc = %d\n", index, rc);
1288 GOTO(err_unlock, rc);
1290 /* XXX not sure if we need this if we are overwriting page */
1291 if (PageError(page)) {
1292 CERROR("error on page index %lu, rc = %d\n", index, rc);
1294 GOTO(err_unlock, rc = -EIO);
1301 lustre_put_page(page);
1305 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1306 int waitfor_one_page(struct page *page)
1308 wait_on_page_locked(page);
1313 static int lustre_commit_write(struct page *page, unsigned from, unsigned to)
1315 struct inode *inode = page->mapping->host;
1318 err = page->mapping->a_ops->commit_write(NULL, page, from, to);
1319 if (!err && IS_SYNC(inode))
1320 err = waitfor_one_page(page);
1321 //SetPageUptodate(page); // the client commit_write will do this
1323 SetPageReferenced(page);
1325 lustre_put_page(page);
1329 struct page *filter_get_page_write(struct inode *inode,
1330 struct niobuf_remote *rnb,
1331 struct niobuf_local *lnb, int *pglocked)
1333 unsigned long index = rnb->offset >> PAGE_SHIFT;
1334 struct address_space *mapping = inode->i_mapping;
1339 //ASSERT_PAGE_INDEX(index, GOTO(err, rc = -EINVAL));
1341 page = grab_cache_page_nowait(mapping, index); /* locked page */
1343 page = grab_cache_page(mapping, index); /* locked page */
1346 /* This page is currently locked, so get a temporary page instead. */
1347 /* XXX I believe this is a very dangerous thing to do - consider if
1348 * we had multiple writers for the same file (definitely the case
1349 * if we are using this codepath). If writer A locks the page,
1350 * writer B writes to a copy (as here), writer A drops the page
1351 * lock, and writer C grabs the lock before B does, then B will
1352 * later overwrite the data from C, even if C had LDLM locked
1353 * and initiated the write after B did.
1357 CDEBUG(D_PAGE, "ino %ld page %ld locked\n", inode->i_ino,index);
1358 addr = __get_free_pages(GFP_KERNEL, 0); /* locked page */
1360 CERROR("no memory for a temp page\n");
1362 GOTO(err, rc = -ENOMEM);
1365 memset((void *)addr, 0xBA, PAGE_SIZE);
1366 page = virt_to_page(addr);
1368 page->index = index;
1369 lnb->flags |= N_LOCAL_TEMP_PAGE;
1370 } else if (!IS_ERR(page)) {
1374 rc = mapping->a_ops->prepare_write(NULL, page,
1375 rnb->offset % PAGE_SIZE,
1378 CERROR("page index %lu, rc = %d\n", index, rc);
1381 GOTO(err_unlock, rc);
1383 /* XXX not sure if we need this if we are overwriting page */
1384 if (PageError(page)) {
1385 CERROR("error on page index %lu, rc = %d\n", index, rc);
1387 GOTO(err_unlock, rc = -EIO);
1394 lustre_put_page(page);
1400 * We need to balance prepare_write() calls with commit_write() calls.
1401 * If the page has been prepared, but we have no data for it, we don't
1402 * want to overwrite valid data on disk, but we still need to zero out
1403 * data for space which was newly allocated. Like part of what happens
1404 * in __block_prepare_write() for newly allocated blocks.
1406 * XXX currently __block_prepare_write() creates buffers for all the
1407 * pages, and the filesystems mark these buffers as BH_New if they
1408 * were newly allocated from disk. We use the BH_New flag similarly.
1410 static int filter_commit_write(struct page *page, unsigned from, unsigned to,
1413 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1415 unsigned block_start, block_end;
1416 struct buffer_head *bh, *head = page->buffers;
1417 unsigned blocksize = head->b_size;
1418 void *addr = page_address(page);
1420 /* debugging: just seeing if this ever happens */
1421 CERROR("called filter_commit_write for obj %ld:%ld on err %d\n",
1422 page->index, page->mapping->host->i_ino, err);
1424 /* Currently one buffer per page, but in the future... */
1425 for (bh = head, block_start = 0; bh != head || !block_start;
1426 block_start = block_end, bh = bh->b_this_page) {
1427 block_end = block_start + blocksize;
1429 memset(addr + block_start, 0, blocksize);
1433 return lustre_commit_write(page, from, to);
1436 static int filter_preprw(int cmd, struct lustre_handle *conn,
1437 int objcount, struct obd_ioobj *obj,
1438 int niocount, struct niobuf_remote *nb,
1439 struct niobuf_local *res, void **desc_private)
1441 struct obd_run_ctxt saved;
1442 struct obd_device *obd;
1443 struct obd_ioobj *o = obj;
1444 struct niobuf_remote *rnb = nb;
1445 struct niobuf_local *lnb = res;
1446 void *journal_save = NULL;
1452 obd = class_conn2obd(conn);
1454 CDEBUG(D_IOCTL, "invalid client "LPX64"\n", conn->addr);
1457 memset(res, 0, sizeof(*res) * niocount);
1459 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
1461 if (cmd & OBD_BRW_WRITE) {
1462 *desc_private = filter_journal_start(&journal_save,
1464 objcount, obj, niocount,
1466 if (IS_ERR(*desc_private))
1467 GOTO(out_ctxt, rc = PTR_ERR(*desc_private));
1470 obd_kmap_get(niocount, 1);
1472 for (i = 0; i < objcount; i++, o++) {
1473 struct dentry *dentry;
1474 struct inode *inode;
1477 dentry = filter_fid2dentry(obd, filter_parent(obd, S_IFREG),
1478 o->ioo_id, S_IFREG, 0);
1480 GOTO(out_clean, rc = PTR_ERR(dentry));
1481 inode = dentry->d_inode;
1483 CERROR("trying to BRW to non-existent file "LPU64"\n",
1486 GOTO(out_clean, rc = -ENOENT);
1489 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
1493 lnb->dentry = dentry;
1495 lnb->dentry = dget(dentry);
1497 if (cmd & OBD_BRW_WRITE)
1498 page = filter_get_page_write(inode, rnb, lnb,
1501 page = lustre_get_page_read(inode, rnb);
1505 GOTO(out_clean, rc = PTR_ERR(page));
1508 lnb->addr = page_address(page);
1509 lnb->offset = rnb->offset;
1511 lnb->len = rnb->len;
1516 if (cmd & OBD_BRW_WRITE) {
1517 int err = filter_journal_stop(journal_save, &obd->u.filter,
1523 pop_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
1526 while (lnb-- > res) {
1527 CERROR("error cleanup on brw\n");
1528 f_dput(lnb->dentry);
1529 if (cmd & OBD_BRW_WRITE)
1530 filter_commit_write(lnb->page, 0, PAGE_SIZE, rc);
1532 lustre_put_page(lnb->page);
1534 obd_kmap_put(niocount);
1538 static int filter_write_locked_page(struct niobuf_local *lnb)
1543 lpage = lustre_get_page_write(lnb->dentry->d_inode, lnb->page->index);
1544 if (IS_ERR(lpage)) {
1545 /* It is highly unlikely that we would ever get an error here.
1546 * The page we want to get was previously locked, so it had to
1547 * have already allocated the space, and we were just writing
1548 * over the same data, so there would be no hole in the file.
1550 * XXX: possibility of a race with truncate could exist, need
1551 * to check that. There are no guarantees w.r.t.
1552 * write order even on a local filesystem, although the
1553 * normal response would be to return the number of bytes
1554 * successfully written and leave the rest to the app.
1556 rc = PTR_ERR(lpage);
1557 CERROR("error getting locked page index %ld: rc = %d\n",
1558 lnb->page->index, rc);
1562 /* lpage is kmapped in lustre_get_page_write() above and kunmapped in
1563 * lustre_commit_write() below, lnb->page was kmapped previously in
1564 * filter_get_page_write() and kunmapped in lustre_put_page() below.
1566 memcpy(page_address(lpage), page_address(lnb->page), PAGE_SIZE);
1567 rc = lustre_commit_write(lpage, 0, PAGE_SIZE);
1569 CERROR("error committing locked page %ld: rc = %d\n",
1570 lnb->page->index, rc);
1572 lustre_put_page(lnb->page);
1577 static int filter_commitrw(int cmd, struct lustre_handle *conn,
1578 int objcount, struct obd_ioobj *obj,
1579 int niocount, struct niobuf_local *res,
1582 struct obd_run_ctxt saved;
1583 struct obd_ioobj *o;
1584 struct niobuf_local *r;
1585 struct obd_device *obd = class_conn2obd(conn);
1587 int found_locked = 0;
1592 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
1594 journal_save = current->journal_info;
1595 LASSERT(!journal_save);
1597 current->journal_info = private;
1599 for (i = 0, o = obj, r = res; i < objcount; i++, o++) {
1601 for (j = 0 ; j < o->ioo_bufcnt ; j++, r++) {
1602 struct page *page = r->page;
1607 if (r->flags & N_LOCAL_TEMP_PAGE) {
1612 if (cmd & OBD_BRW_WRITE) {
1613 int err = filter_commit_write(page, 0,
1619 lustre_put_page(page);
1626 current->journal_info = journal_save;
1632 for (i = 0, o = obj, r = res; i < objcount; i++, o++) {
1634 for (j = 0 ; j < o->ioo_bufcnt ; j++, r++) {
1636 if (!(r->flags & N_LOCAL_TEMP_PAGE))
1639 err = filter_write_locked_page(r);
1648 pop_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
1652 static int filter_statfs(struct lustre_handle *conn, struct obd_statfs *osfs)
1654 struct obd_device *obd = class_conn2obd(conn);
1659 rc = vfs_statfs(obd->u.filter.fo_sb, &sfs);
1661 statfs_pack(osfs, &sfs);
1666 static int filter_get_info(struct lustre_handle *conn, obd_count keylen,
1667 void *key, obd_count *vallen, void **val)
1669 struct obd_device *obd;
1672 obd = class_conn2obd(conn);
1674 CDEBUG(D_IOCTL, "invalid client "LPX64"\n", conn->addr);
1678 if ( keylen == strlen("blocksize") &&
1679 memcmp(key, "blocksize", keylen) == 0 ) {
1680 *vallen = sizeof(long);
1681 *val = (void *)(long)obd->u.filter.fo_sb->s_blocksize;
1685 if ( keylen == strlen("blocksize_bits") &&
1686 memcmp(key, "blocksize_bits", keylen) == 0 ){
1687 *vallen = sizeof(long);
1688 *val = (void *)(long)obd->u.filter.fo_sb->s_blocksize_bits;
1692 if ( keylen == strlen("root_ino") &&
1693 memcmp(key, "root_ino", keylen) == 0 ){
1694 *vallen = sizeof(obd_id);
1695 *val = (void *)(obd_id)FILTER_ROOTINO;
1699 CDEBUG(D_IOCTL, "invalid key\n");
1703 int filter_copy_data(struct lustre_handle *dst_conn, struct obdo *dst,
1704 struct lustre_handle *src_conn, struct obdo *src,
1705 obd_size count, obd_off offset)
1708 struct lov_stripe_md srcmd, dstmd;
1709 unsigned long index = 0;
1712 memset(&srcmd, 0, sizeof(srcmd));
1713 memset(&dstmd, 0, sizeof(dstmd));
1714 srcmd.lsm_object_id = src->o_id;
1715 dstmd.lsm_object_id = dst->o_id;
1718 CDEBUG(D_INFO, "src: ino "LPU64" blocks "LPU64", size "LPU64
1719 ", dst: ino "LPU64"\n",
1720 src->o_id, src->o_blocks, src->o_size, dst->o_id);
1721 page = alloc_page(GFP_USER);
1725 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1726 while (TryLockPage(page))
1727 ___wait_on_page(page);
1729 wait_on_page_locked(page);
1732 /* XXX with brw vector I/O, we could batch up reads and writes here,
1733 * all we need to do is allocate multiple pages to handle the I/Os
1734 * and arrays to handle the request parameters.
1736 while (index < ((src->o_size + PAGE_SIZE - 1) >> PAGE_SHIFT)) {
1738 struct obd_brw_set *set;
1740 set = obd_brw_set_new();
1748 pg.count = PAGE_SIZE;
1749 pg.off = (page->index) << PAGE_SHIFT;
1752 page->index = index;
1753 set->brw_callback = ll_brw_sync_wait;
1754 err = obd_brw(OBD_BRW_READ, src_conn, &srcmd, 1, &pg, set);
1755 obd_brw_set_free(set);
1761 set = obd_brw_set_new();
1767 pg.flag = OBD_BRW_CREATE;
1768 CDEBUG(D_INFO, "Read page %ld ...\n", page->index);
1770 set->brw_callback = ll_brw_sync_wait;
1771 err = obd_brw(OBD_BRW_WRITE, dst_conn, &dstmd, 1, &pg, set);
1772 obd_brw_set_free(set);
1774 /* XXX should handle dst->o_size, dst->o_blocks here */
1780 CDEBUG(D_INFO, "Wrote page %ld ...\n", page->index);
1784 dst->o_size = src->o_size;
1785 dst->o_blocks = src->o_blocks;
1786 dst->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
1792 int filter_attach(struct obd_device *dev, obd_count len, void *data)
1794 return lprocfs_reg_obd(dev, status_var_nm_1, dev);
1797 int filter_detach(struct obd_device *dev)
1799 return lprocfs_dereg_obd(dev);
1801 static struct obd_ops filter_obd_ops = {
1802 o_attach: filter_attach,
1803 o_detach: filter_detach,
1804 o_get_info: filter_get_info,
1805 o_setup: filter_setup,
1806 o_cleanup: filter_cleanup,
1807 o_connect: filter_connect,
1808 o_disconnect: filter_disconnect,
1809 o_statfs: filter_statfs,
1810 o_getattr: filter_getattr,
1811 o_create: filter_create,
1812 o_setattr: filter_setattr,
1813 o_destroy: filter_destroy,
1814 o_open: filter_open,
1815 o_close: filter_close,
1816 o_brw: filter_pgcache_brw,
1817 o_punch: filter_truncate,
1818 o_preprw: filter_preprw,
1819 o_commitrw: filter_commitrw
1821 o_preallocate: filter_preallocate_inodes,
1822 o_migrate: filter_migrate,
1823 o_copy: filter_copy_data,
1824 o_iterate: filter_iterate
1829 static int __init obdfilter_init(void)
1831 printk(KERN_INFO "Filtering OBD driver v0.001, info@clusterfs.com\n");
1832 filter_open_cache = kmem_cache_create("ll_filter_fdata",
1833 sizeof(struct filter_file_data),
1835 if (!filter_open_cache)
1838 filter_dentry_cache = kmem_cache_create("ll_filter_dentry",
1839 sizeof(struct filter_dentry_data),
1841 if (!filter_dentry_cache) {
1842 kmem_cache_destroy(filter_open_cache);
1846 return class_register_type(&filter_obd_ops, status_class_var,
1847 OBD_FILTER_DEVICENAME);
1850 static void __exit obdfilter_exit(void)
1852 class_unregister_type(OBD_FILTER_DEVICENAME);
1853 if (kmem_cache_destroy(filter_dentry_cache))
1854 CERROR("couldn't free obdfilter dentry cache\n");
1855 if (kmem_cache_destroy(filter_open_cache))
1856 CERROR("couldn't free obdfilter open cache\n");
1859 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1860 MODULE_DESCRIPTION("Lustre Filtering OBD driver v1.0");
1861 MODULE_LICENSE("GPL");
1863 module_init(obdfilter_init);
1864 module_exit(obdfilter_exit);