*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/osd-zfs/osd_handler.c
* Top-level entry points into osd module
struct lu_env env;
int rc;
- LASSERT(site->ls_obj_hash);
-
rc = lu_env_init(&env, LCT_SHRINKER);
if (rc) {
CERROR("%s: can't initialize shrinker env: rc = %d\n",
if (!spa_writeable(dmu_objset_spa(os)) ||
osd->od_dev_set_rdonly || osd->od_prop_rdonly)
- osfs->os_state |= OS_STATE_READONLY;
+ osfs->os_state |= OS_STATFS_READONLY;
return 0;
}
/* ZFS does not support reporting nonrotional status yet, so return
* flag only if user has set nonrotational.
*/
- osfs->os_state |= osd->od_nonrotational ? OS_STATE_NONROT : 0;
+ osfs->os_state |= osd->od_nonrotational ? OS_STATFS_NONROT : 0;
RETURN(0);
}
param->ddp_brw_size = osd->od_max_blksz;
else
param->ddp_brw_size = ONE_MB_BRW_SIZE;
+
+#ifdef HAVE_DMU_OFFSET_NEXT
+ param->ddp_has_lseek_data_hole = osd->od_sync_on_lseek;
+#else
+ param->ddp_has_lseek_data_hole = false;
+#endif
}
/*
RETURN(0);
}
-static struct dt_device_operations osd_dt_ops = {
- .dt_root_get = osd_root_get,
- .dt_statfs = osd_statfs,
- .dt_trans_create = osd_trans_create,
- .dt_trans_start = osd_trans_start,
- .dt_trans_stop = osd_trans_stop,
- .dt_trans_cb_add = osd_trans_cb_add,
- .dt_conf_get = osd_conf_get,
- .dt_sync = osd_sync,
- .dt_commit_async = osd_commit_async,
- .dt_ro = osd_ro,
+/* reserve or free quota for some operation */
+static int osd_reserve_or_free_quota(const struct lu_env *env,
+ struct dt_device *dev,
+ struct lquota_id_info *qi)
+{
+ struct osd_device *osd = osd_dt_dev(dev);
+ struct qsd_instance *qsd = NULL;
+ int rc;
+
+ ENTRY;
+
+ if (qi->lqi_is_blk)
+ qsd = osd->od_quota_slave_dt;
+ else
+ qsd = osd->od_quota_slave_md;
+
+ rc = qsd_reserve_or_free_quota(env, qsd, qi);
+ RETURN(rc);
+}
+
+static const struct dt_device_operations osd_dt_ops = {
+ .dt_root_get = osd_root_get,
+ .dt_statfs = osd_statfs,
+ .dt_trans_create = osd_trans_create,
+ .dt_trans_start = osd_trans_start,
+ .dt_trans_stop = osd_trans_stop,
+ .dt_trans_cb_add = osd_trans_cb_add,
+ .dt_conf_get = osd_conf_get,
+ .dt_sync = osd_sync,
+ .dt_commit_async = osd_commit_async,
+ .dt_ro = osd_ro,
+ .dt_reserve_or_free_quota = osd_reserve_or_free_quota,
};
/*
static int osd_mount(const struct lu_env *env,
struct osd_device *o, struct lustre_cfg *cfg)
{
- char *mntdev = lustre_cfg_string(cfg, 1);
- char *str = lustre_cfg_string(cfg, 2);
- char *svname = lustre_cfg_string(cfg, 4);
+ char *mntdev = lustre_cfg_string(cfg, 1);
+ char *str = lustre_cfg_string(cfg, 2);
+ char *svname = lustre_cfg_string(cfg, 4);
+ time64_t interval = AS_DEFAULT;
dnode_t *rootdn;
- const char *opts;
- int rc;
+ const char *opts;
+ bool resetoi = false;
+ int rc;
+
ENTRY;
if (o->od_os != NULL)
if (rc >= sizeof(o->od_svname))
RETURN(-E2BIG);
+ opts = lustre_cfg_string(cfg, 3);
+
o->od_index_backup_stop = 0;
o->od_index = -1; /* -1 means index is invalid */
rc = server_name2index(o->od_svname, &o->od_index, NULL);
}
if (flags & LMD_FLG_NOSCRUB)
- o->od_auto_scrub_interval = AS_NEVER;
+ interval = AS_NEVER;
}
if (server_name_is_ost(o->od_svname))
if (rc)
GOTO(err, rc);
+ if (opts && strstr(opts, "resetoi"))
+ resetoi = true;
+
+ rc = lprocfs_init_brw_stats(&o->od_brw_stats);
+ if (rc)
+ GOTO(err, rc);
+
o->od_in_init = 1;
- rc = osd_scrub_setup(env, o);
+ rc = osd_scrub_setup(env, o, interval, resetoi);
o->od_in_init = 0;
if (rc)
GOTO(err, rc);
/* currently it's no need to prepare qsd_instance_md for OST */
if (!o->od_is_ost) {
o->od_quota_slave_md = qsd_init(env, o->od_svname,
- &o->od_dt_dev,
- o->od_proc_entry, true);
+ &o->od_dt_dev, o->od_proc_entry,
+ true, false);
if (IS_ERR(o->od_quota_slave_md)) {
rc = PTR_ERR(o->od_quota_slave_md);
o->od_quota_slave_md = NULL;
}
o->od_quota_slave_dt = qsd_init(env, o->od_svname, &o->od_dt_dev,
- o->od_proc_entry, false);
+ o->od_proc_entry, false, false);
if (IS_ERR(o->od_quota_slave_dt)) {
if (o->od_quota_slave_md != NULL) {
#endif
/* parse mount option "noacl", and enable ACL by default */
- opts = lustre_cfg_string(cfg, 3);
if (opts == NULL || strstr(opts, "noacl") == NULL)
o->od_posix_acl = 1;
osd_unlinked_drain(env, o);
-err:
- if (rc && o->od_os) {
- osd_dmu_objset_disown(o->od_os, B_TRUE, o);
- o->od_os = NULL;
- }
+ RETURN(0);
+
+err:
RETURN(rc);
}
o->od_dt_dev.dd_ops = &osd_dt_ops;
sema_init(&o->od_otable_sem, 1);
INIT_LIST_HEAD(&o->od_ios_list);
- o->od_auto_scrub_interval = AS_DEFAULT;
+ o->od_sync_on_lseek = B_TRUE;
/* ZFS does not support reporting nonrotional status yet, so this flag
* is only set if explicitly set by the user.
}
static struct lu_device *osd_device_fini(const struct lu_env *env,
- struct lu_device *dev);
+ struct lu_device *d)
+{
+ struct osd_device *o = osd_dev(d);
+ int rc;
+
+ ENTRY;
+ osd_index_backup(env, o, false);
+ if (o->od_os) {
+ osd_objset_unregister_callbacks(o);
+ if (!o->od_dt_dev.dd_rdonly) {
+ osd_sync(env, lu2dt_dev(d));
+ txg_wait_callbacks(
+ spa_get_dsl(dmu_objset_spa(o->od_os)));
+ }
+ }
+
+ /* now with all the callbacks completed we can cleanup the remainings */
+ osd_shutdown(env, o);
+ osd_scrub_cleanup(env, o);
+
+ rc = osd_procfs_fini(o);
+ if (rc) {
+ CERROR("proc fini error %d\n", rc);
+ RETURN(ERR_PTR(rc));
+ }
+
+ if (o->od_os)
+ osd_umount(env, o);
+
+ RETURN(NULL);
+}
+
+
+static struct lu_device *osd_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct osd_device *o = osd_dev(d);
+
+ ENTRY;
+ /* XXX: make osd top device in order to release reference */
+ if (d->ld_site) {
+ d->ld_site->ls_top_dev = d;
+ lu_site_purge(env, d->ld_site, -1);
+ lu_site_print(env, d->ld_site, &d->ld_site->ls_obj_hash.nelems,
+ D_ERROR, lu_cdebug_printer);
+ }
+ if (o->od_site.ls_bottom_dev)
+ lu_site_fini(&o->od_site);
+ dt_device_fini(&o->od_dt_dev);
+ OBD_FREE_PTR(o);
+
+ RETURN(NULL);
+}
static struct lu_device *osd_device_alloc(const struct lu_env *env,
struct lu_device_type *type,
rc = osd_device_init0(env, dev, cfg);
if (rc == 0) {
rc = osd_mount(env, dev, cfg);
- if (rc)
+ if (rc) {
osd_device_fini(env, osd2lu_dev(dev));
- }
- if (rc)
+ osd_device_free(env, osd2lu_dev(dev));
+ dev = NULL;
+ }
+ } else {
dt_device_fini(&dev->od_dt_dev);
+ }
}
- if (unlikely(rc != 0))
+ if (unlikely(rc != 0) && dev)
OBD_FREE_PTR(dev);
return rc == 0 ? osd2lu_dev(dev) : ERR_PTR(rc);
}
-static struct lu_device *osd_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct osd_device *o = osd_dev(d);
- ENTRY;
-
- /* XXX: make osd top device in order to release reference */
- d->ld_site->ls_top_dev = d;
- lu_site_purge(env, d->ld_site, -1);
- if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
- lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
- }
- lu_site_fini(&o->od_site);
- dt_device_fini(&o->od_dt_dev);
- OBD_FREE_PTR(o);
-
- RETURN (NULL);
-}
-
-static struct lu_device *osd_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- struct osd_device *o = osd_dev(d);
- int rc;
- ENTRY;
-
-
- if (o->od_os) {
- osd_objset_unregister_callbacks(o);
- if (!o->od_dt_dev.dd_rdonly) {
- osd_sync(env, lu2dt_dev(d));
- txg_wait_callbacks(
- spa_get_dsl(dmu_objset_spa(o->od_os)));
- }
- }
-
- /* now with all the callbacks completed we can cleanup the remainings */
- osd_shutdown(env, o);
- osd_scrub_cleanup(env, o);
-
- rc = osd_procfs_fini(o);
- if (rc) {
- CERROR("proc fini error %d\n", rc);
- RETURN(ERR_PTR(rc));
- }
-
- if (o->od_os)
- osd_umount(env, o);
-
- RETURN(NULL);
-}
-
static int osd_device_init(const struct lu_env *env, struct lu_device *d,
const char *name, struct lu_device *next)
{
RETURN(rc);
}
-struct lu_device_operations osd_lu_ops = {
+/**
+ * Implementation of lu_device_operations::ldo_fid_alloc() for OSD
+ *
+ * Allocate FID.
+ *
+ * see include/lu_object.h for the details.
+ */
+static int osd_fid_alloc(const struct lu_env *env, struct lu_device *d,
+ struct lu_fid *fid, struct lu_object *parent,
+ const struct lu_name *name)
+{
+ struct osd_device *osd = osd_dev(d);
+
+ return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
+}
+
+const struct lu_device_operations osd_lu_ops = {
.ldo_object_alloc = osd_object_alloc,
.ldo_process_config = osd_process_config,
.ldo_recovery_complete = osd_recovery_complete,
.ldo_prepare = osd_prepare,
+ .ldo_fid_alloc = osd_fid_alloc,
};
static void osd_type_start(struct lu_device_type *t)
{
}
-int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
- struct lu_fid *fid, struct md_op_data *op_data)
-{
- struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
-
- return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
-}
-
-static struct lu_device_type_operations osd_device_type_ops = {
+static const struct lu_device_type_operations osd_device_type_ops = {
.ldto_init = osd_type_init,
.ldto_fini = osd_type_fini,
.o_owner = THIS_MODULE,
.o_connect = osd_obd_connect,
.o_disconnect = osd_obd_disconnect,
- .o_fid_alloc = osd_fid_alloc
};
static int __init osd_init(void)
if (rc)
return rc;
- rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,
+ rc = class_register_type(&osd_obd_device_ops, NULL, true,
LUSTRE_OSD_ZFS_NAME, &osd_device_type);
if (rc)
lu_kmem_fini(osd_caches);