* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* lustre/osd-ldiskfs/osd_scrub.c
#include "osd_oi.h"
#include "osd_scrub.h"
-#define HALF_SEC (HZ >> 1)
+#define HALF_SEC msecs_to_jiffies(MSEC_PER_SEC >> 1)
#define OSD_OTABLE_MAX_HASH 0x00000000ffffffffULL
{
struct scrub_file *sf = &scrub->os_file;
- CDEBUG(D_LFSCK, "%.16s: reset OI scrub file, flags = "LPX64"\n",
- osd_scrub2name(scrub), flags);
+ CDEBUG(D_LFSCK, "%.16s: reset OI scrub file, old flags = "
+ LPX64", add flags = "LPX64"\n",
+ osd_scrub2name(scrub), sf->sf_flags, flags);
+
memcpy(sf->sf_uuid, uuid, 16);
sf->sf_status = SS_INIT;
sf->sf_flags |= flags;
+ sf->sf_flags &= ~SF_AUTO;
sf->sf_run_time = 0;
sf->sf_time_latest_start = 0;
sf->sf_time_last_checkpoint = 0;
sf->sf_items_checked = 0;
sf->sf_items_updated = 0;
sf->sf_items_failed = 0;
- sf->sf_items_updated_prior = 0;
+ if (!scrub->os_in_join)
+ sf->sf_items_updated_prior = 0;
+
sf->sf_items_noscrub = 0;
sf->sf_items_igif = 0;
}
{
struct filter_fid_old *ff = &info->oti_ff;
struct dentry *dentry = &info->oti_obj_dentry;
+ struct lu_fid *tfid = &info->oti_fid;
handle_t *jh;
int size = 0;
int rc;
if (dev->od_scrub.os_file.sf_param & SP_DRYRUN)
RETURN(0);
+ if (fid_is_idif(fid) && dev->od_index_in_idif == 0) {
+ struct ost_id *oi = &info->oti_ostid;
+
+ fid_to_ostid(fid, oi);
+ ostid_to_fid(tfid, oi, 0);
+ } else {
+ *tfid = *fid;
+ }
+
/* We want the LMA to fit into the 256-byte OST inode, so operate
* as following:
* 1) read old XATTR_NAME_FID and save the parent FID;
if (IS_ERR(jh)) {
rc = PTR_ERR(jh);
CDEBUG(D_LFSCK, "%s: fail to start trans for convert ff "
- DFID": rc = %d\n", osd_name(dev), PFID(fid), rc);
+ DFID": rc = %d\n", osd_name(dev), PFID(tfid), rc);
RETURN(rc);
}
}
/* 3) make new LMA and add it */
- rc = osd_ea_fid_set(info, inode, fid, LMAC_FID_ON_OST, 0);
+ rc = osd_ea_fid_set(info, inode, tfid, LMAC_FID_ON_OST, 0);
if (rc == 0 && reset)
size = sizeof(struct filter_fid);
else if (rc != 0 && removed)
ldiskfs_journal_stop(jh);
if (rc < 0)
CDEBUG(D_LFSCK, "%s: fail to convert ff "DFID": rc = %d\n",
- osd_name(dev), PFID(fid), rc);
+ osd_name(dev), PFID(tfid), rc);
return rc;
}
}
}
- scrub->os_full_speed = 1;
+ if (!scrub->os_partial_scan)
+ scrub->os_full_speed = 1;
+
ops = DTO_INDEX_INSERT;
idx = osd_oi_fid2idx(dev, fid);
switch (val) {
GOTO(out, rc = 0);
} else {
- scrub->os_full_speed = 1;
+ if (!scrub->os_partial_scan)
+ scrub->os_full_speed = 1;
+
sf->sf_flags |= SF_INCONSISTENT;
/* XXX: If the device is restored from file-level backup, then
sf->sf_items_updated_prior++;
else
sf->sf_items_updated++;
-
- /* The target has been changed, need to be re-loaded. */
- lu_object_purge(info->oti_env, osd2lu_dev(dev), fid);
}
GOTO(out, rc);
bool drop_dryrun = false;
ENTRY;
+ CDEBUG(D_LFSCK, "%.16s: OI scrub prep, flags = 0x%x\n",
+ osd_scrub2name(scrub), flags);
+
down_write(&scrub->os_rwsem);
if (flags & SS_SET_FAILOUT)
sf->sf_param |= SP_FAILOUT;
-
- if (flags & SS_CLEAR_FAILOUT)
+ else if (flags & SS_CLEAR_FAILOUT)
sf->sf_param &= ~SP_FAILOUT;
- if (flags & SS_SET_DRYRUN)
+ if (flags & SS_SET_DRYRUN) {
sf->sf_param |= SP_DRYRUN;
-
- if (flags & SS_CLEAR_DRYRUN && sf->sf_param & SP_DRYRUN) {
+ } else if (flags & SS_CLEAR_DRYRUN && sf->sf_param & SP_DRYRUN) {
sf->sf_param &= ~SP_DRYRUN;
drop_dryrun = true;
}
osd_scrub_file_reset(scrub,
LDISKFS_SB(osd_sb(dev))->s_es->s_uuid, 0);
- if (flags & SS_AUTO) {
+ if (flags & SS_AUTO_FULL) {
scrub->os_full_speed = 1;
+ scrub->os_partial_scan = 0;
sf->sf_flags |= SF_AUTO;
- /* For the case of OI scrub auto triggered, NOT dryrun. */
- sf->sf_param &= ~SP_FAILOUT;
+ } else if (flags & SS_AUTO_PARTIAL) {
+ scrub->os_full_speed = 0;
+ scrub->os_partial_scan = 1;
+ sf->sf_flags |= SF_AUTO;
+ } else if (sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE)) {
+ scrub->os_full_speed = 1;
+ scrub->os_partial_scan = 0;
} else {
scrub->os_full_speed = 0;
+ scrub->os_partial_scan = 0;
}
- if (sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_UPGRADE))
- scrub->os_full_speed = 1;
-
- scrub->os_in_prior = 0;
spin_lock(&scrub->os_lock);
+ scrub->os_in_prior = 0;
scrub->os_waiting = 0;
scrub->os_paused = 0;
+ scrub->os_in_join = 0;
+ scrub->os_full_scrub = 0;
spin_unlock(&scrub->os_lock);
scrub->os_new_checked = 0;
if (drop_dryrun && sf->sf_pos_first_inconsistent != 0)
sf->sf_status = SS_SCANNING;
sf->sf_time_latest_start = cfs_time_current_sec();
sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
+ sf->sf_pos_last_checkpoint = sf->sf_pos_latest_start - 1;
rc = osd_scrub_file_store(scrub);
if (rc == 0) {
spin_lock(&scrub->os_lock);
struct scrub_file *sf = &scrub->os_file;
ENTRY;
+ CDEBUG(D_LFSCK, "%.16s: OI scrub post, result = %d\n",
+ osd_scrub2name(scrub), result);
+
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
thread_set_flags(&scrub->os_thread, SVC_STOPPING);
struct osd_device *dev,
struct osd_iit_param *param,
struct osd_idmap_cache **oic,
- int noslot);
+ const bool noslot);
typedef int (*osd_iit_exec_policy)(struct osd_thread_info *info,
struct osd_device *dev,
struct osd_iit_param *param,
struct osd_idmap_cache *oic,
- int *noslot, int rc);
+ bool *noslot, int rc);
static int osd_iit_next(struct osd_iit_param *param, __u32 *pos)
{
static int osd_scrub_next(struct osd_thread_info *info, struct osd_device *dev,
struct osd_iit_param *param,
- struct osd_idmap_cache **oic, int noslot)
+ struct osd_idmap_cache **oic, const bool noslot)
{
struct osd_scrub *scrub = &dev->od_scrub;
struct ptlrpc_thread *thread = &scrub->os_thread;
struct l_wait_info lwi;
lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- !list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
- &lwi);
+ if (likely(lwi.lwi_timeout > 0))
+ l_wait_event(thread->t_ctl_waitq,
+ !list_empty(&scrub->os_inconsistent_items) ||
+ !thread_is_running(thread),
+ &lwi);
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
return 0;
}
- if (noslot != 0)
+ if (noslot)
return SCRUB_NEXT_WAIT;
rc = osd_iit_next(param, &scrub->os_pos_current);
static int osd_preload_next(struct osd_thread_info *info,
struct osd_device *dev, struct osd_iit_param *param,
- struct osd_idmap_cache **oic, int noslot)
+ struct osd_idmap_cache **oic, const bool noslot)
{
struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
struct osd_scrub *scrub;
static int osd_scrub_exec(struct osd_thread_info *info, struct osd_device *dev,
struct osd_iit_param *param,
- struct osd_idmap_cache *oic, int *noslot, int rc)
+ struct osd_idmap_cache *oic, bool *noslot, int rc)
{
struct l_wait_info lwi = { 0 };
struct osd_scrub *scrub = &dev->od_scrub;
return 0;
if (ooc != NULL && osd_scrub_has_window(scrub, ooc)) {
- *noslot = 0;
+ *noslot = false;
return 0;
}
&lwi);
if (ooc != NULL && osd_scrub_has_window(scrub, ooc))
- *noslot = 0;
+ *noslot = false;
else
- *noslot = 1;
+ *noslot = true;
return 0;
}
static int osd_preload_exec(struct osd_thread_info *info,
struct osd_device *dev, struct osd_iit_param *param,
- struct osd_idmap_cache *oic, int *noslot, int rc)
+ struct osd_idmap_cache *oic, bool *noslot, int rc)
{
struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
#define SCRUB_IT_ALL 1
#define SCRUB_IT_CRASH 2
+static void osd_scrub_join(struct osd_device *dev, __u32 flags,
+ bool inconsistent)
+{
+ struct osd_scrub *scrub = &dev->od_scrub;
+ struct ptlrpc_thread *thread = &scrub->os_thread;
+ struct scrub_file *sf = &scrub->os_file;
+ int rc;
+ ENTRY;
+
+ LASSERT(!(flags & SS_AUTO_PARTIAL));
+
+ down_write(&scrub->os_rwsem);
+ scrub->os_in_join = 1;
+ if (flags & SS_SET_FAILOUT)
+ sf->sf_param |= SP_FAILOUT;
+ else if (flags & SS_CLEAR_FAILOUT)
+ sf->sf_param &= ~SP_FAILOUT;
+
+ if (flags & SS_SET_DRYRUN)
+ sf->sf_param |= SP_DRYRUN;
+ else if (flags & SS_CLEAR_DRYRUN)
+ sf->sf_param &= ~SP_DRYRUN;
+
+ if (flags & SS_RESET) {
+ osd_scrub_file_reset(scrub,
+ LDISKFS_SB(osd_sb(dev))->s_es->s_uuid,
+ inconsistent ? SF_INCONSISTENT : 0);
+ sf->sf_status = SS_SCANNING;
+ }
+
+ if (flags & SS_AUTO_FULL) {
+ sf->sf_flags |= SF_AUTO;
+ scrub->os_full_speed = 1;
+ }
+
+ if (sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_UPGRADE))
+ scrub->os_full_speed = 1;
+ else
+ scrub->os_full_speed = 0;
+
+ scrub->os_new_checked = 0;
+ if (sf->sf_pos_last_checkpoint != 0)
+ sf->sf_pos_latest_start = sf->sf_pos_last_checkpoint + 1;
+ else
+ sf->sf_pos_latest_start = LDISKFS_FIRST_INO(osd_sb(dev)) + 1;
+
+ scrub->os_pos_current = sf->sf_pos_latest_start;
+ sf->sf_time_latest_start = cfs_time_current_sec();
+ sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
+ sf->sf_pos_last_checkpoint = sf->sf_pos_latest_start - 1;
+ rc = osd_scrub_file_store(scrub);
+ if (rc != 0)
+ CDEBUG(D_LFSCK, "%.16s: fail to store scrub file when join "
+ "the OI scrub: rc = %d\n", osd_scrub2name(scrub), rc);
+
+ spin_lock(&scrub->os_lock);
+ scrub->os_waiting = 0;
+ scrub->os_paused = 0;
+ scrub->os_partial_scan = 0;
+ scrub->os_in_join = 0;
+ scrub->os_full_scrub = 0;
+ spin_unlock(&scrub->os_lock);
+ wake_up_all(&thread->t_ctl_waitq);
+ up_write(&scrub->os_rwsem);
+
+ EXIT;
+}
+
static int osd_inode_iteration(struct osd_thread_info *info,
struct osd_device *dev, __u32 max, bool preload)
{
+ struct osd_scrub *scrub = &dev->od_scrub;
+ struct ptlrpc_thread *thread = &scrub->os_thread;
+ struct scrub_file *sf = &scrub->os_file;
osd_iit_next_policy next;
osd_iit_exec_policy exec;
__u32 *pos;
__u32 *count;
- struct osd_iit_param param;
+ struct osd_iit_param param = { NULL };
+ struct l_wait_info lwi = { 0 };
__u32 limit;
- int noslot = 0;
int rc;
+ bool noslot = true;
ENTRY;
+ param.sb = osd_sb(dev);
+ if (preload)
+ goto full;
+
+ while (scrub->os_partial_scan && !scrub->os_in_join) {
+ struct osd_idmap_cache *oic = NULL;
+
+ rc = osd_scrub_next(info, dev, ¶m, &oic, noslot);
+ switch (rc) {
+ case SCRUB_NEXT_EXIT:
+ RETURN(0);
+ case SCRUB_NEXT_CRASH:
+ RETURN(SCRUB_IT_CRASH);
+ case SCRUB_NEXT_FATAL:
+ RETURN(-EINVAL);
+ case SCRUB_NEXT_WAIT: {
+ struct kstatfs *ksfs = &info->oti_ksfs;
+ __u64 saved_flags;
+
+ if (dev->od_full_scrub_ratio == OFSR_NEVER ||
+ unlikely(sf->sf_items_updated_prior == 0))
+ goto wait;
+
+ if (dev->od_full_scrub_ratio == OFSR_DIRECTLY ||
+ scrub->os_full_scrub) {
+ osd_scrub_join(dev, SS_AUTO_FULL | SS_RESET,
+ true);
+ goto full;
+ }
+
+ rc = param.sb->s_op->statfs(param.sb->s_root, ksfs);
+ if (rc == 0) {
+ __u64 used = ksfs->f_files - ksfs->f_ffree;
+
+ do_div(used, sf->sf_items_updated_prior);
+ /* If we hit too much inconsistent OI
+ * mappings during the partial scan,
+ * then scan the device completely. */
+ if (used < dev->od_full_scrub_ratio) {
+ osd_scrub_join(dev,
+ SS_AUTO_FULL | SS_RESET, true);
+ goto full;
+ }
+ }
+
+wait:
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) &&
+ cfs_fail_val > 0)
+ continue;
+
+ saved_flags = sf->sf_flags;
+ sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE | SF_AUTO);
+ sf->sf_status = SS_COMPLETED;
+ l_wait_event(thread->t_ctl_waitq,
+ !thread_is_running(thread) ||
+ !scrub->os_partial_scan ||
+ scrub->os_in_join ||
+ !list_empty(&scrub->os_inconsistent_items),
+ &lwi);
+ sf->sf_flags = saved_flags;
+ sf->sf_status = SS_SCANNING;
+
+ if (unlikely(!thread_is_running(thread)))
+ RETURN(0);
+
+ if (!scrub->os_partial_scan || scrub->os_in_join)
+ goto full;
+
+ continue;
+ }
+ default:
+ LASSERTF(rc == 0, "rc = %d\n", rc);
+
+ osd_scrub_exec(info, dev, ¶m, oic, &noslot, rc);
+ break;
+ }
+ }
+
+full:
if (!preload) {
- struct osd_scrub *scrub = &dev->od_scrub;
+ l_wait_event(thread->t_ctl_waitq,
+ !thread_is_running(thread) || !scrub->os_in_join,
+ &lwi);
+ if (unlikely(!thread_is_running(thread)))
+ RETURN(0);
+ }
+
+ noslot = false;
+ if (!preload) {
next = osd_scrub_next;
exec = osd_scrub_exec;
pos = &scrub->os_pos_current;
pos = &ooc->ooc_pos_preload;
count = &ooc->ooc_cached_items;
}
- param.sb = osd_sb(dev);
limit = le32_to_cpu(LDISKFS_SB(param.sb)->s_es->s_inodes_count);
while (*pos <= limit && *count < max) {
struct osd_idmap_cache *oic = NULL;
+ struct ldiskfs_group_desc *desc;
param.bg = (*pos - 1) / LDISKFS_INODES_PER_GROUP(param.sb);
+ desc = ldiskfs_get_group_desc(param.sb, param.bg, NULL);
+ if (desc == NULL)
+ RETURN(-EIO);
+
+ ldiskfs_lock_group(param.sb, param.bg);
+ if (desc->bg_flags & cpu_to_le16(LDISKFS_BG_INODE_UNINIT)) {
+ ldiskfs_unlock_group(param.sb, param.bg);
+ *pos = 1 + (param.bg + 1) *
+ LDISKFS_INODES_PER_GROUP(param.sb);
+ continue;
+ }
+ ldiskfs_unlock_group(param.sb, param.bg);
+
param.offset = (*pos - 1) % LDISKFS_INODES_PER_GROUP(param.sb);
param.gbase = 1 + param.bg * LDISKFS_INODES_PER_GROUP(param.sb);
param.bitmap = ldiskfs_read_inode_bitmap(param.sb, param.bg);
if (param.bitmap == NULL) {
CDEBUG(D_LFSCK, "%.16s: fail to read bitmap for %u, "
"scrub will stop, urgent mode\n",
- LDISKFS_SB(param.sb)->s_es->s_volume_name,
- (__u32)param.bg);
+ osd_scrub2name(scrub), (__u32)param.bg);
RETURN(-EIO);
}
while (param.offset < LDISKFS_INODES_PER_GROUP(param.sb) &&
*count < max) {
+ if (param.offset +
+ ldiskfs_itable_unused_count(param.sb, desc) >
+ LDISKFS_INODES_PER_GROUP(param.sb))
+ goto next_group;
+
rc = next(info, dev, ¶m, &oic, noslot);
switch (rc) {
case SCRUB_NEXT_BREAK:
GOTO(out, rc);
}
- if (!scrub->os_full_speed) {
+ if (!scrub->os_full_speed && !scrub->os_partial_scan) {
struct l_wait_info lwi = { 0 };
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = &it->ooi_cache;
char *olm_name;
struct lu_fid olm_fid;
__u16 olm_flags;
+ __u16 olm_namelen;
scandir_t olm_scandir;
filldir_t olm_filldir;
};
static const struct osd_lf_map osd_lf_maps[] = {
/* CATALOGS */
{ CATLIST, { FID_SEQ_LOCAL_FILE, LLOG_CATALOGS_OID, 0 }, OLF_SHOW_NAME,
- NULL, NULL },
+ sizeof(CATLIST) - 1, NULL, NULL },
/* CONFIGS */
{ MOUNT_CONFIGS_DIR, { FID_SEQ_LOCAL_FILE, MGS_CONFIGS_OID, 0 },
- OLF_SCAN_SUBITEMS, osd_ios_general_scan,
- osd_ios_varfid_fill },
+ OLF_SCAN_SUBITEMS, sizeof(MOUNT_CONFIGS_DIR) - 1,
+ osd_ios_general_scan, osd_ios_varfid_fill },
/* NIDTBL_VERSIONS */
{ MGS_NIDTBL_DIR, { 0, 0, 0 }, OLF_SCAN_SUBITEMS,
- osd_ios_general_scan, osd_ios_varfid_fill },
+ sizeof(MGS_NIDTBL_DIR) - 1, osd_ios_general_scan,
+ osd_ios_varfid_fill },
/* PENDING */
- { "PENDING", { 0, 0, 0 }, 0, NULL, NULL },
+ { "PENDING", { 0, 0, 0 }, 0, sizeof("PENDING") - 1, NULL, NULL },
/* ROOT */
- { "ROOT", { FID_SEQ_ROOT, 1, 0 },
- OLF_SCAN_SUBITEMS | OLF_HIDE_FID, osd_ios_ROOT_scan, NULL },
+ { "ROOT", { FID_SEQ_ROOT, FID_OID_ROOT, 0 },
+ OLF_SCAN_SUBITEMS | OLF_HIDE_FID, sizeof("ROOT") - 1,
+ osd_ios_ROOT_scan, NULL },
/* changelog_catalog */
- { CHANGELOG_CATALOG, { 0, 0, 0 }, 0, NULL, NULL },
+ { CHANGELOG_CATALOG, { 0, 0, 0 }, 0, sizeof(CHANGELOG_CATALOG) - 1,
+ NULL, NULL },
/* changelog_users */
- { CHANGELOG_USERS, { 0, 0, 0 }, 0, NULL, NULL },
+ { CHANGELOG_USERS, { 0, 0, 0 }, 0, sizeof(CHANGELOG_USERS) - 1,
+ NULL, NULL },
/* fld */
{ "fld", { FID_SEQ_LOCAL_FILE, FLD_INDEX_OID, 0 }, OLF_SHOW_NAME,
- NULL, NULL },
+ sizeof("fld") - 1, NULL, NULL },
/* last_rcvd */
{ LAST_RCVD, { FID_SEQ_LOCAL_FILE, LAST_RECV_OID, 0 }, OLF_SHOW_NAME,
- NULL, NULL },
-
- /* lfsck_bookmark */
- { "lfsck_bookmark", { 0, 0, 0 }, 0, NULL, NULL },
+ sizeof(LAST_RCVD) - 1, NULL, NULL },
/* lov_objid */
{ LOV_OBJID, { FID_SEQ_LOCAL_FILE, MDD_LOV_OBJ_OID, 0 }, OLF_SHOW_NAME,
- NULL, NULL },
+ sizeof(LOV_OBJID) - 1, NULL, NULL },
/* lov_objseq */
{ LOV_OBJSEQ, { FID_SEQ_LOCAL_FILE, MDD_LOV_OBJ_OSEQ, 0 },
- OLF_SHOW_NAME, NULL, NULL },
+ OLF_SHOW_NAME, sizeof(LOV_OBJSEQ) - 1, NULL, NULL },
/* quota_master */
- { QMT_DIR, { 0, 0, 0 }, OLF_SCAN_SUBITEMS,
+ { QMT_DIR, { 0, 0, 0 }, OLF_SCAN_SUBITEMS, sizeof(QMT_DIR) - 1,
osd_ios_general_scan, osd_ios_varfid_fill },
/* quota_slave */
- { QSD_DIR, { 0, 0, 0 }, OLF_SCAN_SUBITEMS,
+ { QSD_DIR, { 0, 0, 0 }, OLF_SCAN_SUBITEMS, sizeof(QSD_DIR) - 1,
osd_ios_general_scan, osd_ios_varfid_fill },
/* seq_ctl */
{ "seq_ctl", { FID_SEQ_LOCAL_FILE, FID_SEQ_CTL_OID, 0 },
- OLF_SHOW_NAME, NULL, NULL },
+ OLF_SHOW_NAME, sizeof("seq_ctl") - 1, NULL, NULL },
/* seq_srv */
{ "seq_srv", { FID_SEQ_LOCAL_FILE, FID_SEQ_SRV_OID, 0 },
- OLF_SHOW_NAME, NULL, NULL },
+ OLF_SHOW_NAME, sizeof("seq_srv") - 1, NULL, NULL },
/* health_check */
{ HEALTH_CHECK, { FID_SEQ_LOCAL_FILE, OFD_HEALTH_CHECK_OID, 0 },
- OLF_SHOW_NAME, NULL, NULL },
+ OLF_SHOW_NAME, sizeof(HEALTH_CHECK) - 1, NULL, NULL },
+
+ /* LFSCK */
+ { LFSCK_DIR, { 0, 0, 0 }, 0, sizeof(LFSCK_DIR) - 1,
+ osd_ios_general_scan, osd_ios_varfid_fill },
+
+ /* lfsck_bookmark */
+ { LFSCK_BOOKMARK, { 0, 0, 0 }, 0, sizeof(LFSCK_BOOKMARK) - 1,
+ NULL, NULL },
+
+ /* lfsck_layout */
+ { LFSCK_LAYOUT, { 0, 0, 0 }, 0, sizeof(LFSCK_LAYOUT) - 1,
+ NULL, NULL },
/* lfsck_namespace */
- { "lfsck_namespace", { 0, 0, 0 }, 0, NULL, NULL },
+ { LFSCK_NAMESPACE, { 0, 0, 0 }, 0, sizeof(LFSCK_NAMESPACE) - 1,
+ NULL, NULL },
/* OBJECTS, upgrade from old device */
- { OBJECTS, { 0, 0, 0 }, OLF_SCAN_SUBITEMS, osd_ios_OBJECTS_scan, NULL },
+ { OBJECTS, { 0, 0, 0 }, OLF_SCAN_SUBITEMS, sizeof(OBJECTS) - 1,
+ osd_ios_OBJECTS_scan, NULL },
/* lquota_v2.user, upgrade from old device */
- { "lquota_v2.user", { 0, 0, 0 }, 0, NULL, NULL },
+ { "lquota_v2.user", { 0, 0, 0 }, 0, sizeof("lquota_v2.user") - 1,
+ NULL, NULL },
/* lquota_v2.group, upgrade from old device */
- { "lquota_v2.group", { 0, 0, 0 }, 0, NULL, NULL },
+ { "lquota_v2.group", { 0, 0, 0 }, 0, sizeof("lquota_v2.group") - 1,
+ NULL, NULL },
/* LAST_GROUP, upgrade from old device */
{ "LAST_GROUP", { FID_SEQ_LOCAL_FILE, OFD_LAST_GROUP_OID, 0 },
- OLF_SHOW_NAME, NULL, NULL },
+ OLF_SHOW_NAME, sizeof("LAST_GROUP") - 1, NULL, NULL },
/* SLAVE_LOG, llog for destroy slave stripes of striped dir */
{ "SLAVE_LOG", { FID_SEQ_LOCAL_FILE, SLAVE_LLOG_CATALOGS_OID, 0 },
- OLF_SHOW_NAME, NULL, NULL },
+ OLF_SHOW_NAME, sizeof("SLAVE_LOG") - 1, NULL, NULL },
/* lost+found */
{ "lost+found", { FID_SEQ_LOCAL_FILE, OSD_LPF_OID, 0 },
- OLF_SCAN_SUBITEMS, osd_ios_general_scan, osd_ios_lf_fill },
+ OLF_SCAN_SUBITEMS, sizeof("lost+found") - 1,
+ osd_ios_general_scan, osd_ios_lf_fill },
- { NULL, { 0, 0, 0 }, 0, NULL, NULL }
+ { NULL, { 0, 0, 0 }, 0, 0, NULL, NULL }
};
/* Add the new introduced files under .lustre/ in the list in the future. */
static const struct osd_lf_map osd_dl_maps[] = {
/* .lustre/fid */
{ "fid", { FID_SEQ_DOT_LUSTRE, FID_OID_DOT_LUSTRE_OBF, 0 }, 0,
- NULL, NULL },
+ sizeof("fid") - 1, NULL, NULL },
/* .lustre/lost+found */
{ "lost+found", { FID_SEQ_DOT_LUSTRE, FID_OID_DOT_LUSTRE_LPF, 0 }, 0,
- NULL, NULL },
+ sizeof("lost+found") - 1, NULL, NULL },
- { NULL, { 0, 0, 0 }, 0, NULL, NULL }
+ { NULL, { 0, 0, 0 }, 0, 0, NULL, NULL }
};
struct osd_ios_item {
RETURN(0);
for (map = osd_dl_maps; map->olm_name != NULL; map++) {
- if (strlen(map->olm_name) != namelen)
+ if (map->olm_namelen != namelen)
continue;
if (strncmp(map->olm_name, name, namelen) == 0)
RETURN(0);
for (map = osd_lf_maps; map->olm_name != NULL; map++) {
- if (strlen(map->olm_name) != namelen)
+ if (map->olm_namelen != namelen)
continue;
if (strncmp(map->olm_name, name, namelen) == 0)
.oifb_info = info,
.oifb_dev = dev,
.oifb_dentry = dentry };
- struct file *filp = &info->oti_it_ea.oie_file;
+ struct file *filp = &info->oti_file;
struct inode *inode = dentry->d_inode;
const struct file_operations *fops = inode->i_fop;
int rc;
child = osd_ios_lookup_one_len(map->olm_name,
osd_sb(dev)->s_root,
- strlen(map->olm_name));
+ map->olm_namelen);
if (!IS_ERR(child))
dput(child);
else if (PTR_ERR(child) == -ENOENT)
int rc;
ENTRY;
-again:
/* os_lock: sync status between stop and scrub thread */
spin_lock(&scrub->os_lock);
+
+again:
if (thread_is_running(thread)) {
spin_unlock(&scrub->os_lock);
- RETURN(-EALREADY);
- } else if (unlikely(thread_is_stopping(thread))) {
+ if (!(scrub->os_file.sf_flags & SF_AUTO) ||
+ (flags & (SS_AUTO_FULL | SS_AUTO_PARTIAL)))
+ RETURN(-EALREADY);
+
+ osd_scrub_join(dev, flags, false);
+ spin_lock(&scrub->os_lock);
+ if (!thread_is_running(thread))
+ goto again;
+
+ spin_unlock(&scrub->os_lock);
+ RETURN(0);
+ }
+
+ if (unlikely(thread_is_stopping(thread))) {
spin_unlock(&scrub->os_lock);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
+ spin_lock(&scrub->os_lock);
goto again;
}
spin_unlock(&scrub->os_lock);
RETURN(0);
}
-int osd_scrub_start(struct osd_device *dev)
+int osd_scrub_start(struct osd_device *dev, __u32 flags)
{
int rc;
ENTRY;
/* od_otable_mutex: prevent curcurrent start/stop */
mutex_lock(&dev->od_otable_mutex);
- rc = do_osd_scrub_start(dev, SS_AUTO);
+ rc = do_osd_scrub_start(dev, flags);
mutex_unlock(&dev->od_otable_mutex);
RETURN(rc == -EALREADY ? 0 : rc);
lu_igif_build(fid, inode->i_ino, inode->i_generation);
rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
if (rc != 0) {
- filp_close(filp, 0);
+ filp_close(filp, NULL);
pop_ctxt(&saved, ctxt);
RETURN(rc);
}
scrub->os_inode = igrab(inode);
- filp_close(filp, 0);
+ filp_close(filp, NULL);
pop_ctxt(&saved, ctxt);
rc = osd_scrub_file_load(scrub);
sf->sf_internal_flags = SIF_NO_HANDLE_OLD_FID;
dirty = 1;
} else if (rc != 0) {
- RETURN(rc);
+ GOTO(cleanup_inode, rc);
} else {
if (memcmp(sf->sf_uuid, es->s_uuid, 16) != 0) {
osd_scrub_file_reset(scrub, es->s_uuid,SF_INCONSISTENT);
if (dirty != 0) {
rc = osd_scrub_file_store(scrub);
if (rc != 0)
- RETURN(rc);
+ GOTO(cleanup_inode, rc);
}
/* Initialize OI files. */
rc = osd_oi_init(info, dev);
if (rc < 0)
- RETURN(rc);
+ GOTO(cleanup_inode, rc);
rc = osd_initial_OI_scrub(info, dev);
- if (rc == 0) {
- if (sf->sf_flags & SF_UPGRADE ||
- !(sf->sf_internal_flags & SIF_NO_HANDLE_OLD_FID ||
- sf->sf_success_count > 0)) {
- dev->od_igif_inoi = 0;
- dev->od_check_ff = dev->od_is_ost;
- } else {
- dev->od_igif_inoi = 1;
- dev->od_check_ff = 0;
- }
-
- if (sf->sf_flags & SF_INCONSISTENT)
- /* The 'od_igif_inoi' will be set under the
- * following cases:
- * 1) new created system, or
- * 2) restored from file-level backup, or
- * 3) the upgrading completed.
- *
- * The 'od_igif_inoi' may be cleared by OI scrub
- * later if found that the system is upgrading. */
- dev->od_igif_inoi = 1;
+ if (rc != 0)
+ GOTO(cleanup_oi, rc);
- if (!dev->od_noscrub &&
- ((sf->sf_status == SS_PAUSED) ||
- (sf->sf_status == SS_CRASHED &&
- sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
- SF_UPGRADE | SF_AUTO)) ||
- (sf->sf_status == SS_INIT &&
- sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
- SF_UPGRADE))))
- rc = osd_scrub_start(dev);
+ if (sf->sf_flags & SF_UPGRADE ||
+ !(sf->sf_internal_flags & SIF_NO_HANDLE_OLD_FID ||
+ sf->sf_success_count > 0)) {
+ dev->od_igif_inoi = 0;
+ dev->od_check_ff = dev->od_is_ost;
+ } else {
+ dev->od_igif_inoi = 1;
+ dev->od_check_ff = 0;
}
+ if (sf->sf_flags & SF_INCONSISTENT)
+ /* The 'od_igif_inoi' will be set under the
+ * following cases:
+ * 1) new created system, or
+ * 2) restored from file-level backup, or
+ * 3) the upgrading completed.
+ *
+ * The 'od_igif_inoi' may be cleared by OI scrub
+ * later if found that the system is upgrading. */
+ dev->od_igif_inoi = 1;
+
+ if (!dev->od_noscrub &&
+ ((sf->sf_status == SS_PAUSED) ||
+ (sf->sf_status == SS_CRASHED &&
+ sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE | SF_AUTO)) ||
+ (sf->sf_status == SS_INIT &&
+ sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE))))
+ rc = osd_scrub_start(dev, SS_AUTO_FULL);
+
+ if (rc != 0)
+ GOTO(cleanup_oi, rc);
+
/* it is possible that dcache entries may keep objects after they are
* deleted by OSD. While it looks safe this can cause object data to
* stay until umount causing failures in tests calculating free space,
* anymore let's just free them after use here */
shrink_dcache_sb(sb);
- RETURN(rc);
+ RETURN(0);
+cleanup_oi:
+ osd_oi_fini(info, dev);
+cleanup_inode:
+ iput(scrub->os_inode);
+ scrub->os_inode = NULL;
+
+ return rc;
}
void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev)
start |= SS_CLEAR_DRYRUN;
}
- rc = do_osd_scrub_start(dev, start);
+ rc = do_osd_scrub_start(dev, start & ~SS_AUTO_PARTIAL);
if (rc < 0 && rc != -EALREADY) {
dev->od_otable_it = NULL;
OBD_FREE_PTR(it);
/* high priority inconsistent items list APIs */
+#define SCRUB_BAD_OIMAP_DECAY_INTERVAL 60
+
int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
int insert)
{
oii->oii_cache = *oic;
oii->oii_insert = insert;
+ if (scrub->os_partial_scan) {
+ __u64 now = cfs_time_current_sec();
+
+ /* If there haven't been errors in a long time,
+ * decay old count until either the errors are
+ * gone or we reach the current interval. */
+ while (unlikely(scrub->os_bad_oimap_count > 0 &&
+ scrub->os_bad_oimap_time +
+ SCRUB_BAD_OIMAP_DECAY_INTERVAL < now)) {
+ scrub->os_bad_oimap_count >>= 1;
+ scrub->os_bad_oimap_time +=
+ SCRUB_BAD_OIMAP_DECAY_INTERVAL;
+ }
+
+ scrub->os_bad_oimap_time = now;
+ if (++scrub->os_bad_oimap_count >
+ dev->od_full_scrub_threshold_rate)
+ scrub->os_full_scrub = 1;
+ }
+
spin_lock(&scrub->os_lock);
if (unlikely(!thread_is_running(thread))) {
spin_unlock(&scrub->os_lock);
if (thread_is_running(&scrub->os_thread)) {
cfs_duration_t duration = cfs_time_current() -
scrub->os_time_last_checkpoint;
- __u64 new_checked = scrub->os_new_checked * HZ;
+ __u64 new_checked = msecs_to_jiffies(scrub->os_new_checked *
+ MSEC_PER_SEC);
__u32 rtime = sf->sf_run_time +
cfs_duration_sec(duration + HALF_SEC);