* GPL HEADER END
*/
/*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* lustre/osd-ldiskfs/osd_scrub.c
#define HALF_SEC (CFS_HZ >> 1)
+#define OSD_OTABLE_MAX_HASH 0x00000000ffffffffULL
+
#define SCRUB_NEXT_BREAK 1 /* exit current loop and process next group */
#define SCRUB_NEXT_CONTINUE 2 /* skip current object and process next bit */
#define SCRUB_NEXT_EXIT 3 /* exit all the loops */
return scrub->os_pos_current < ooc->ooc_pos_preload + SCRUB_WINDOW_SIZE;
}
+/**
+ * update/insert/delete the specified OI mapping (@fid @id) according to the ops
+ *
+ * \retval 1, changed nothing
+ * \retval 0, changed successfully
+ * \retval -ve, on error
+ */
static int osd_scrub_refresh_mapping(struct osd_thread_info *info,
struct osd_device *dev,
const struct lu_fid *fid,
RETURN(-ENOMEM);
}
- if (ops == DTO_INDEX_UPDATE) {
+ switch (ops) {
+ case DTO_INDEX_UPDATE:
rc = iam_update(jh, bag, (const struct iam_key *)oi_fid,
(struct iam_rec *)oi_id, ipd);
- } else {
+ if (unlikely(rc == -ENOENT)) {
+ /* Some unlink thread may removed the OI mapping. */
+ rc = 1;
+ }
+ break;
+ case DTO_INDEX_INSERT:
rc = iam_insert(jh, bag, (const struct iam_key *)oi_fid,
(struct iam_rec *)oi_id, ipd);
- if (rc == -EEXIST) {
+ if (unlikely(rc == -EEXIST)) {
rc = 1;
/* XXX: There are trouble things when adding OI
* mapping for IGIF object, which may cause
*
* Anyway, it is rare, only exists in theory. */
}
+ break;
+ case DTO_INDEX_DELETE:
+ rc = iam_delete(jh, bag, (const struct iam_key *)oi_fid, ipd);
+ if (rc == -ENOENT) {
+ /* It is normal that the unlink thread has removed the
+ * OI mapping already. */
+ rc = 1;
+ }
+ break;
+ default:
+ LASSERTF(0, "Unexpected ops %d\n", ops);
+ break;
}
osd_ipd_put(info->oti_env, bag, ipd);
ldiskfs_journal_stop(jh);
if (fid_is_igif(fid))
sf->sf_items_igif++;
- if (val == SCRUB_NEXT_NOLMA && !dev->od_handle_nolma)
+ if ((val == SCRUB_NEXT_NOLMA) &&
+ (!dev->od_handle_nolma || OBD_FAIL_CHECK(OBD_FAIL_FID_NOLMA)))
GOTO(out, rc = 0);
if ((oii != NULL && oii->oii_insert) || (val == SCRUB_NEXT_NOLMA))
GOTO(out, rc);
}
- /* Prevent the inode to be unlinked during OI scrub. */
- mutex_lock(&inode->i_mutex);
+ /* Check whether the inode to be unlinked during OI scrub. */
if (unlikely(inode->i_nlink == 0)) {
- mutex_unlock(&inode->i_mutex);
iput(inode);
GOTO(out, rc = 0);
}
ops = DTO_INDEX_INSERT;
idx = osd_oi_fid2idx(dev, fid);
if (val == SCRUB_NEXT_NOLMA) {
- rc = osd_ea_fid_set(info, inode, fid);
+ sf->sf_flags |= SF_UPGRADE;
+ scrub->os_full_speed = 1;
+ rc = osd_ea_fid_set(info, inode, fid, 0);
if (rc != 0)
GOTO(out, rc);
+
+ if (!(sf->sf_flags & SF_INCONSISTENT))
+ dev->od_igif_inoi = 0;
} else {
- sf->sf_flags |= SF_RECREATED | SF_INCONSISTENT;
+ sf->sf_flags |= SF_RECREATED;
+ scrub->os_full_speed = 1;
if (unlikely(!ldiskfs_test_bit(idx, sf->sf_oi_bitmap)))
ldiskfs_set_bit(idx, sf->sf_oi_bitmap);
}
GOTO(out, rc = 0);
} else {
sf->sf_flags |= SF_INCONSISTENT;
+ scrub->os_full_speed = 1;
+
+ /* XXX: If the device is restored from file-level backup, then
+ * some IGIFs may have been already in OI files, and some
+ * may be not yet. Means upgrading from 1.8 may be partly
+ * processed, but some clients may hold some immobilized
+ * IGIFs, and use them to access related objects. Under
+ * such case, OSD does not know whether an given IGIF has
+ * been processed or to be processed, and it also cannot
+ * generate local ino#/gen# directly from the immobilized
+ * IGIF because of the backup/restore. Then force OSD to
+ * lookup the given IGIF in OI files, and if no entry,
+ * then ask the client to retry after upgrading completed.
+ * No better choice. */
+ dev->od_igif_inoi = 1;
}
rc = osd_scrub_refresh_mapping(info, dev, fid, lid, ops);
}
if (ops == DTO_INDEX_INSERT) {
- mutex_unlock(&inode->i_mutex);
+ /* There may be conflict unlink during the OI scrub,
+ * if happend, then remove the new added OI mapping. */
+ if (unlikely(inode->i_nlink == 0))
+ osd_scrub_refresh_mapping(info, dev, fid, lid,
+ DTO_INDEX_DELETE);
iput(inode);
}
up_write(&scrub->os_rwsem);
}
sf->sf_time_last_checkpoint = cfs_time_current_sec();
if (result > 0) {
+ struct osd_device *dev =
+ container_of0(scrub, struct osd_device, od_scrub);
+
+ dev->od_igif_inoi = 1;
sf->sf_status = SS_COMPLETED;
memset(sf->sf_oi_bitmap, 0, SCRUB_OI_BITMAP_SIZE);
sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
if (rc == 0) {
- if (!scrub) {
- if (!fid_is_client_visible(&lma->lma_self_fid))
- rc = SCRUB_NEXT_CONTINUE;
- else
- *fid = lma->lma_self_fid;
- }
+ if (fid_is_llog(&lma->lma_self_fid) ||
+ (!scrub && fid_is_internal(&lma->lma_self_fid)))
+ rc = SCRUB_NEXT_CONTINUE;
+ else
+ *fid = lma->lma_self_fid;
} else if (rc == -ENODATA) {
lu_igif_build(fid, inode->i_ino, inode->i_generation);
if (scrub)
return rc;
}
+static inline int
+osd_scrub_wakeup(struct osd_scrub *scrub, struct osd_otable_it *it)
+{
+ spin_lock(&scrub->os_lock);
+ if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
+ !cfs_list_empty(&scrub->os_inconsistent_items) ||
+ it->ooi_waiting || !thread_is_running(&scrub->os_thread))
+ scrub->os_waiting = 0;
+ else
+ scrub->os_waiting = 1;
+ spin_unlock(&scrub->os_lock);
+
+ return !scrub->os_waiting;
+}
+
static int osd_scrub_exec(struct osd_thread_info *info, struct osd_device *dev,
struct osd_iit_param *param,
struct osd_idmap_cache *oic, int *noslot, int rc)
next:
scrub->os_pos_current = param->gbase + ++(param->offset);
+
+wait:
if (it != NULL && it->ooi_waiting &&
ooc->ooc_pos_preload < scrub->os_pos_current) {
+ spin_lock(&scrub->os_lock);
it->ooi_waiting = 0;
cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ spin_unlock(&scrub->os_lock);
}
if (scrub->os_full_speed || rc == SCRUB_NEXT_CONTINUE)
return 0;
-wait:
if (osd_scrub_has_window(scrub, ooc)) {
*noslot = 0;
return 0;
}
- scrub->os_waiting = 1;
l_wait_event(thread->t_ctl_waitq,
- osd_scrub_has_window(scrub, ooc) ||
- !cfs_list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
+ osd_scrub_wakeup(scrub, it),
&lwi);
- scrub->os_waiting = 0;
if (osd_scrub_has_window(scrub, ooc))
*noslot = 0;
#define SCRUB_IT_CRASH 2
static int osd_inode_iteration(struct osd_thread_info *info,
- struct osd_device *dev, __u32 max, int preload)
+ struct osd_device *dev, __u32 max, bool preload)
{
osd_iit_next_policy next;
osd_iit_exec_policy exec;
int rc;
ENTRY;
- if (preload == 0) {
+ if (!preload) {
struct osd_scrub *scrub = &dev->od_scrub;
next = osd_scrub_next;
brelse(param.bitmap);
RETURN(rc);
}
+
+ if (preload && dev->od_otable_it->ooi_stopping) {
+ brelse(param.bitmap);
+ RETURN(0);
+ }
}
next_group:
brelse(param.bitmap);
+
+ if (preload && dev->od_otable_it->ooi_stopping)
+ RETURN(0);
}
if (*pos > limit)
ENTRY;
rc = osd_inode_iteration(osd_oti_get(env), dev,
- OSD_OTABLE_IT_CACHE_SIZE, 1);
+ OSD_OTABLE_IT_CACHE_SIZE, true);
if (rc == SCRUB_IT_ALL)
it->ooi_all_cached = 1;
if (unlikely(!thread_is_running(thread)))
GOTO(post, rc = 0);
- LASSERT(scrub->os_pos_current >= ooc->ooc_pos_preload);
scrub->os_pos_current = ooc->ooc_pos_preload;
}
CDEBUG(D_LFSCK, "OI scrub: flags = 0x%x, pos = %u\n",
scrub->os_start_flags, scrub->os_pos_current);
- rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, 0);
+ rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, false);
if (unlikely(rc == SCRUB_IT_CRASH))
GOTO(out, rc = -EINVAL);
GOTO(post, rc);
{ "PENDING", { FID_SEQ_LOCAL_FILE, MDD_ORPHAN_OID, 0 }, 0, NULL, NULL },
/* ROOT */
- { "ROOT", { FID_SEQ_LOCAL_FILE, MDD_ROOT_INDEX_OID, 0 },
+ { "ROOT", { FID_SEQ_ROOT, 1, 0 },
OLF_SCAN_SUBITEMS | OLF_HIDE_FID, osd_ios_ROOT_scan, NULL },
- /* capa_keys */
- { CAPA_KEYS, { FID_SEQ_LOCAL_FILE, MDD_CAPA_KEYS_OID, 0 }, 0,
- NULL, NULL },
-
/* changelog_catalog */
{ CHANGELOG_CATALOG, { 0, 0, 0 }, 0, NULL, NULL },
{ QSD_DIR, { 0, 0, 0 }, OLF_SCAN_SUBITEMS,
osd_ios_general_scan, osd_ios_varfid_fill },
- /* seq-200000003-lastid */
- { "seq-200000003-lastid", { FID_SEQ_LOCAL_NAME, 1, 0 }, 0,
- NULL, NULL },
-
/* seq_ctl */
{ "seq_ctl", { FID_SEQ_LOCAL_FILE, FID_SEQ_CTL_OID, 0 },
OLF_SHOW_NAME, NULL, NULL },
{ HEALTH_CHECK, { FID_SEQ_LOCAL_FILE, OFD_HEALTH_CHECK_OID, 0 },
OLF_SHOW_NAME, NULL, NULL },
+ /* lfsck_namespace */
+ { "lfsck_namespace", { FID_SEQ_LOCAL_FILE, LFSCK_BOOKMARK_OID, 0 }, 0,
+ NULL, NULL },
+
/* OBJECTS, upgrade from old device */
{ OBJECTS, { 0, 0, 0 }, OLF_SCAN_SUBITEMS, osd_ios_OBJECTS_scan, NULL },
lu_igif_build(&tfid, inode->i_ino, inode->i_generation);
else
tfid = *fid;
- rc = osd_ea_fid_set(info, inode, &tfid);
+ rc = osd_ea_fid_set(info, inode, &tfid, 0);
if (rc != 0)
RETURN(rc);
} else {
int rc;
ENTRY;
- /* XXX: This is a temporary flag to disable initial OI scrub until
- * the patcp for handling special FIDs in the OI files ready.
- * We do not want to merge the two patches together, because
- * it makes the patch too large to be reviewed. */
- if (!dev->od_init_scrub)
- RETURN(0);
-
while (1) {
rc = scandir(info, dev, dentry, filldir);
if (item != NULL) {
RETURN(rc);
rc = osd_initial_OI_scrub(info, dev);
- if (rc == 0 && !dev->od_noscrub &&
- ((sf->sf_status == SS_PAUSED) ||
- (sf->sf_status == SS_CRASHED &&
- sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_UPGRADE |
- SF_AUTO)) ||
- (sf->sf_status == SS_INIT &&
- sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_UPGRADE))))
- rc = osd_scrub_start(dev);
+ if (rc == 0) {
+ if ((sf->sf_flags & SF_UPGRADE) &&
+ !(sf->sf_flags & SF_INCONSISTENT))
+ /* The 'od_igif_inoi' will be set after the
+ * upgrading completed, needs NOT remount. */
+ dev->od_igif_inoi = 0;
+ else
+ /* The 'od_igif_inoi' will be set under the
+ * following cases:
+ * 1) new created system, or
+ * 2) restored from file-level backup, or
+ * 3) the upgrading completed.
+ *
+ * The 'od_igif_inoi' may be cleared by OI scrub
+ * later if found that the system is upgrading. */
+ dev->od_igif_inoi = 1;
+
+ if (!dev->od_noscrub &&
+ ((sf->sf_status == SS_PAUSED) ||
+ (sf->sf_status == SS_CRASHED &&
+ sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE | SF_AUTO)) ||
+ (sf->sf_status == SS_INIT &&
+ sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE))))
+ rc = osd_scrub_start(dev);
+ }
RETURN(rc);
}
dev->od_otable_it = it;
it->ooi_dev = dev;
+ it->ooi_pid = cfs_curproc_pid();
it->ooi_cache.ooc_consumer_idx = -1;
if (flags & DOIF_OUTUSED)
it->ooi_used_outside = 1;
}
rc = do_osd_scrub_start(dev, start);
- if (rc == -EALREADY) {
- it->ooi_cache.ooc_pos_preload = scrub->os_pos_current - 1;
- } else if (rc < 0) {
+ if (rc < 0 && rc != -EALREADY) {
dev->od_otable_it = NULL;
OBD_FREE_PTR(it);
- GOTO(out, it = ERR_PTR(-EALREADY));
- } else {
- it->ooi_cache.ooc_pos_preload = scrub->os_pos_current;
+ GOTO(out, it = ERR_PTR(rc));
}
+ it->ooi_cache.ooc_pos_preload = scrub->os_pos_current;
+
GOTO(out, it);
out:
OBD_FREE_PTR(it);
}
+static int osd_otable_it_get(const struct lu_env *env,
+ struct dt_it *di, const struct dt_key *key)
+{
+ return 0;
+}
+
/**
- * XXX: Temporary used to notify otable iteration to be paused.
+ * It is hack here:
+ *
+ * Sometimes the otable-based iteration driver (LFSCK) may be blocked in OSD
+ * layer when someone wants to stop/pause the iteration. Under such case, we
+ * need some mechanism to notify the event and wakeup the blocker.
*/
static void osd_otable_it_put(const struct lu_env *env, struct dt_it *di)
{
- struct osd_device *dev = ((struct osd_otable_it *)di)->ooi_dev;
+ struct osd_otable_it *it = (struct osd_otable_it *)di;
+ struct osd_device *dev = it->ooi_dev;
/* od_otable_mutex: prevent curcurrent init/fini */
mutex_lock(&dev->od_otable_mutex);
- dev->od_scrub.os_paused = 1;
+ if (it->ooi_pid == cfs_curproc_pid()) {
+ dev->od_scrub.os_paused = 1;
+ } else {
+ struct ptlrpc_thread *thread = &dev->od_scrub.os_thread;
+
+ it->ooi_stopping = 1;
+ if (it->ooi_waiting)
+ cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ }
mutex_unlock(&dev->od_otable_mutex);
}
-/**
- * Set the OSD layer iteration start position as the specified key.
- *
- * The LFSCK out of OSD layer does not know the detail of the key, so if there
- * are several keys, they cannot be compared out of OSD, so call "::get()" for
- * each key, and OSD will select the smallest one by itself.
- */
-static int osd_otable_it_get(const struct lu_env *env,
- struct dt_it *di, const struct dt_key *key)
+static inline int
+osd_otable_it_wakeup(struct osd_scrub *scrub, struct osd_otable_it *it)
{
- struct osd_otable_it *it = (struct osd_otable_it *)di;
- struct osd_otable_cache *ooc = &it->ooi_cache;
- const char *str = (const char *)key;
- __u32 ino;
- ENTRY;
-
- /* Forbid to set iteration position after iteration started. */
- if (it->ooi_user_ready)
- RETURN(-EPERM);
-
- if (str[0] == '\0')
- RETURN(-EINVAL);
-
- if (sscanf(str, "%u", &ino) <= 0)
- RETURN(-EINVAL);
-
- /* Skip the one that has been processed last time. */
- if (ooc->ooc_pos_preload > ++ino)
- ooc->ooc_pos_preload = ino;
+ spin_lock(&scrub->os_lock);
+ if (it->ooi_cache.ooc_pos_preload < scrub->os_pos_current ||
+ scrub->os_waiting || it->ooi_stopping ||
+ !thread_is_running(&scrub->os_thread))
+ it->ooi_waiting = 0;
+ else
+ it->ooi_waiting = 1;
+ spin_unlock(&scrub->os_lock);
- RETURN(0);
+ return !it->ooi_waiting;
}
static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
RETURN(1);
}
- it->ooi_waiting = 1;
- l_wait_event(thread->t_ctl_waitq,
- ooc->ooc_pos_preload < scrub->os_pos_current ||
- !thread_is_running(thread),
- &lwi);
- it->ooi_waiting = 0;
+ if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
+ spin_lock(&scrub->os_lock);
+ scrub->os_waiting = 0;
+ cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+ spin_unlock(&scrub->os_lock);
+ }
+
+ if (it->ooi_cache.ooc_pos_preload >= scrub->os_pos_current)
+ l_wait_event(thread->t_ctl_waitq,
+ osd_otable_it_wakeup(scrub, it),
+ &lwi);
if (!thread_is_running(thread) && !it->ooi_used_outside)
RETURN(1);
+ if (it->ooi_stopping)
+ RETURN(0);
+
rc = osd_otable_it_preload(env, it);
if (rc >= 0)
goto again;
static struct dt_key *osd_otable_it_key(const struct lu_env *env,
const struct dt_it *di)
{
- struct osd_otable_it *it = (struct osd_otable_it *)di;
- struct osd_otable_cache *ooc = &it->ooi_cache;
-
- sprintf(it->ooi_key, "%u",
- ooc->ooc_cache[ooc->ooc_consumer_idx].oic_lid.oii_ino);
- return (struct dt_key *)it->ooi_key;
+ return NULL;
}
static int osd_otable_it_key_size(const struct lu_env *env,
const struct dt_it *di)
{
- return sizeof(((struct osd_otable_it *)di)->ooi_key);
+ return sizeof(__u64);
}
static int osd_otable_it_rec(const struct lu_env *env, const struct dt_it *di,
struct osd_otable_cache *ooc = &it->ooi_cache;
*(struct lu_fid *)rec = ooc->ooc_cache[ooc->ooc_consumer_idx].oic_fid;
+
+ /* Filter out Invald FID already. */
+ LASSERTF(fid_is_sane((struct lu_fid *)rec),
+ "Invalid FID "DFID", p_idx = %d, c_idx = %d\n",
+ PFID((struct lu_fid *)rec),
+ ooc->ooc_producer_idx, ooc->ooc_consumer_idx);
+
return 0;
}
+static __u64 osd_otable_it_store(const struct lu_env *env,
+ const struct dt_it *di)
+{
+ struct osd_otable_it *it = (struct osd_otable_it *)di;
+ struct osd_otable_cache *ooc = &it->ooi_cache;
+ __u64 hash;
+
+ if (it->ooi_user_ready && ooc->ooc_consumer_idx != -1)
+ hash = ooc->ooc_cache[ooc->ooc_consumer_idx].oic_lid.oii_ino;
+ else
+ hash = ooc->ooc_pos_preload;
+ return hash;
+}
+
+/**
+ * Set the OSD layer iteration start position as the specified hash.
+ */
static int osd_otable_it_load(const struct lu_env *env,
const struct dt_it *di, __u64 hash)
{
struct osd_device *dev = it->ooi_dev;
struct osd_otable_cache *ooc = &it->ooi_cache;
struct osd_scrub *scrub = &dev->od_scrub;
+ int rc;
+ ENTRY;
+ /* Forbid to set iteration position after iteration started. */
if (it->ooi_user_ready)
- return 0;
+ RETURN(-EPERM);
+
+ if (hash > OSD_OTABLE_MAX_HASH)
+ hash = OSD_OTABLE_MAX_HASH;
+ ooc->ooc_pos_preload = hash;
if (ooc->ooc_pos_preload <= LDISKFS_FIRST_INO(osd_sb(dev)))
ooc->ooc_pos_preload = LDISKFS_FIRST_INO(osd_sb(dev)) + 1;
+
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
/* Unplug OSD layer iteration by the first next() call. */
- return osd_otable_it_next(env, (struct dt_it *)it);
+ rc = osd_otable_it_next(env, (struct dt_it *)it);
+
+ RETURN(rc);
+}
+
+static int osd_otable_it_key_rec(const struct lu_env *env,
+ const struct dt_it *di, void *key_rec)
+{
+ return 0;
}
const struct dt_index_operations osd_otable_ops = {
.dio_it = {
.init = osd_otable_it_init,
.fini = osd_otable_it_fini,
- .put = osd_otable_it_put,
.get = osd_otable_it_get,
+ .put = osd_otable_it_put,
.next = osd_otable_it_next,
- .key = osd_otable_it_key,
+ .key = osd_otable_it_key,
.key_size = osd_otable_it_key_size,
.rec = osd_otable_it_rec,
+ .store = osd_otable_it_store,
.load = osd_otable_it_load,
+ .key_rec = osd_otable_it_key_rec,
}
};