* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2016, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/module.h>
#include <linux/kthread.h>
#include <obd_class.h>
-#include <uapi/linux/lustre_ioctl.h>
+#include <uapi/linux/lustre/lustre_ioctl.h>
#include <lustre_mds.h>
#include <obd_support.h>
#include <lu_object.h>
-#include <uapi/linux/lustre_param.h>
+#include <uapi/linux/lustre/lustre_param.h>
#include <lustre_fid.h>
#include <lustre_nodemap.h>
#include <lustre_barrier.h>
/* LU-8040 Set defaults here, before values configs */
mdd->mdd_cl.mc_flags = 0; /* off by default */
mdd->mdd_cl.mc_mask = CHANGELOG_DEFMASK;
+ mdd->mdd_cl.mc_deniednext = 60; /* 60 secs by default */
dev = lustre_cfg_string(lcfg, 0);
if (dev == NULL)
RETURN(rc);
mdd->mdd_atime_diff = MAX_ATIME_DIFF;
- /* sync permission changes */
- mdd->mdd_sync_permission = 1;
+ /* sync permission changes */
+ mdd->mdd_sync_permission = 1;
+ /* enable changelog garbage collection */
+ mdd->mdd_changelog_gc = 0;
+ /* with a significant amount of idle time */
+ mdd->mdd_changelog_max_idle_time = CHLOG_MAX_IDLE_TIME;
+ /* or a significant amount of late indexes */
+ mdd->mdd_changelog_max_idle_indexes = CHLOG_MAX_IDLE_INDEXES;
+ /* with a reasonable interval between each check */
+ mdd->mdd_changelog_min_gc_interval = CHLOG_MIN_GC_INTERVAL;
+ /* with a very few number of free entries */
+ mdd->mdd_changelog_min_free_cat_entries = CHLOG_MIN_FREE_CAT_ENTRIES;
dt_conf_get(env, mdd->mdd_child, &mdd->mdd_dt_conf);
mdd->mdd_cl.mc_index = 0;
spin_lock_init(&mdd->mdd_cl.mc_lock);
- mdd->mdd_cl.mc_starttime = cfs_time_current_64();
+ mdd->mdd_cl.mc_starttime = ktime_get();
spin_lock_init(&mdd->mdd_cl.mc_user_lock);
mdd->mdd_cl.mc_lastuser = 0;
/* Some records were purged, so reset repeat-access time (so we
record new mtime update records, so users can see a file has been
changed since the last purge) */
- mdd->mdd_cl.mc_starttime = cfs_time_current_64();
+ mdd->mdd_cl.mc_starttime = ktime_get();
rc = llog_cancel(env, ctxt, (struct llog_cookie *)&endrec, 0);
out:
ENTRY;
if (mdd->mdd_cl.mc_mask & (1 << CL_MARK)) {
- mdd->mdd_cl.mc_starttime = cfs_time_current_64();
+ mdd->mdd_cl.mc_starttime = ktime_get();
RETURN(0);
}
llog_ctxt_put(ctxt);
/* assume on or off event; reset repeat-access time */
- mdd->mdd_cl.mc_starttime = cfs_time_current_64();
+ mdd->mdd_cl.mc_starttime = ktime_get();
RETURN(rc);
}
lfsck_degister(env, m->mdd_bottom);
mdd_hsm_actions_llog_fini(env, m);
mdd_changelog_fini(env, m);
- orph_index_fini(env, m);
+ mdd_orphan_index_fini(env, m);
mdd_dot_lustre_cleanup(env, m);
if (mdd2obd_dev(m)->u.obt.obt_nodemap_config_file) {
nm_config_file_deregister_tgt(env,
break;
case LCFG_PRE_CLEANUP:
rc = next->ld_ops->ldo_process_config(env, next, cfg);
- mdd_generic_thread_stop(&m->mdd_orph_cleanup_thread);
+ mdd_generic_thread_stop(&m->mdd_orphan_cleanup_thread);
break;
case LCFG_CLEANUP:
rc = next->ld_ops->ldo_process_config(env, next, cfg);
}
static int mdd_recovery_complete(const struct lu_env *env,
- struct lu_device *d)
+ struct lu_device *d)
{
- struct mdd_device *mdd = lu2mdd_dev(d);
+ struct mdd_device *mdd = lu2mdd_dev(d);
struct lu_device *next;
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
- LASSERT(mdd != NULL);
+ LASSERT(mdd != NULL);
next = &mdd->mdd_child->dd_lu_dev;
- /* XXX: orphans handling. */
if (!mdd->mdd_bottom->dd_rdonly)
mdd_orphan_cleanup(env, mdd);
- rc = next->ld_ops->ldo_recovery_complete(env, next);
+ rc = next->ld_ops->ldo_recovery_complete(env, next);
- RETURN(rc);
+ RETURN(rc);
}
int mdd_local_file_create(const struct lu_env *env, struct mdd_device *mdd,
mdd->mdd_root_fid = fid;
}
- rc = orph_index_init(env, mdd);
+ rc = mdd_orphan_index_init(env, mdd);
if (rc < 0)
GOTO(out_dot, rc);
out_changelog:
mdd_changelog_fini(env, mdd);
out_orph:
- orph_index_fini(env, mdd);
+ mdd_orphan_index_fini(env, mdd);
out_dot:
if (mdd_seq_site(mdd)->ss_node_id == 0)
mdd_dot_lustre_cleanup(env, mdd);
RETURN(rc);
}
-static int mdd_maxeasize_get(const struct lu_env *env, struct md_device *m,
- int *easize)
+static const struct dt_device_param *mdd_dtconf_get(const struct lu_env *env,
+ struct md_device *m)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
- ENTRY;
- *easize = mdd->mdd_dt_conf.ddp_max_ea_size;
-
- RETURN(0);
+ return &mdd->mdd_dt_conf;
}
static int mdd_llog_ctxt_get(const struct lu_env *env, struct md_device *m,
}
*id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
rec->cur_endrec = mdd->mdd_cl.mc_index;
+
+ rec->cur_time = (__u32)get_seconds();
+ if (OBD_FAIL_CHECK(OBD_FAIL_TIME_IN_CHLOG_USER))
+ rec->cur_time = 0;
+
spin_unlock(&mdd->mdd_cl.mc_user_lock);
rc = llog_cat_add(env, ctxt->loc_handle, &rec->cur_hdr, NULL);
RETURN(rc);
}
-static int mdd_changelog_user_purge(const struct lu_env *env,
- struct mdd_device *mdd, __u32 id)
+int mdd_changelog_user_purge(const struct lu_env *env,
+ struct mdd_device *mdd, __u32 id)
{
struct mdd_changelog_user_purge mcup = {
.mcup_id = id,
* We now know the record to flush.
*/
rec->cur_endrec = mcuc->mcuc_endrec;
+
+ rec->cur_time = (__u32)get_seconds();
+ if (OBD_FAIL_CHECK(OBD_FAIL_TIME_IN_CHLOG_USER))
+ rec->cur_time = 0;
+
mcuc->mcuc_flush = true;
CDEBUG(D_IOCTL, "Rewriting changelog user %u endrec to %llu\n",
barrier_exit(mdd->mdd_bottom);
RETURN(rc);
}
- case OBD_IOC_GET_MNTOPT: {
- mntopt_t *mntopts = (mntopt_t *)karg;
- *mntopts = mdd->mdd_dt_conf.ddp_mntopts;
- RETURN(0);
- }
case OBD_IOC_START_LFSCK: {
rc = lfsck_start(env, mdd->mdd_bottom,
(struct lfsck_start_param *)karg);
.mdo_root_get = mdd_root_get,
.mdo_llog_ctxt_get = mdd_llog_ctxt_get,
.mdo_iocontrol = mdd_iocontrol,
- .mdo_maxeasize_get = mdd_maxeasize_get,
+ .mdo_dtconf_get = mdd_dtconf_get,
};
static struct lu_device_type_operations mdd_device_type_ops = {