* GPL HEADER END
*/
/*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* lustre/osd-ldiskfs/osd_scrub.c
* Author: Fan Yong <yong.fan@whamcloud.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_MDS
#include <lustre/lustre_idl.h>
return scrub->os_pos_current < ooc->ooc_pos_preload + SCRUB_WINDOW_SIZE;
}
+/**
+ * update/insert/delete the specified OI mapping (@fid @id) according to the ops
+ *
+ * \retval 1, changed nothing
+ * \retval 0, changed successfully
+ * \retval -ve, on error
+ */
static int osd_scrub_refresh_mapping(struct osd_thread_info *info,
struct osd_device *dev,
const struct lu_fid *fid,
ENTRY;
fid_cpu_to_be(oi_fid, fid);
- osd_id_pack(oi_id, id);
+ if (id != NULL)
+ osd_id_pack(oi_id, id);
jh = ldiskfs_journal_start_sb(osd_sb(dev),
osd_dto_credits_noquota[ops]);
if (IS_ERR(jh)) {
RETURN(-ENOMEM);
}
- if (ops == DTO_INDEX_UPDATE) {
+ switch (ops) {
+ case DTO_INDEX_UPDATE:
rc = iam_update(jh, bag, (const struct iam_key *)oi_fid,
(struct iam_rec *)oi_id, ipd);
- } else {
+ if (unlikely(rc == -ENOENT)) {
+ /* Some unlink thread may removed the OI mapping. */
+ rc = 1;
+ }
+ break;
+ case DTO_INDEX_INSERT:
rc = iam_insert(jh, bag, (const struct iam_key *)oi_fid,
(struct iam_rec *)oi_id, ipd);
- if (rc == -EEXIST) {
+ if (unlikely(rc == -EEXIST)) {
rc = 1;
/* XXX: There are trouble things when adding OI
* mapping for IGIF object, which may cause
*
* Anyway, it is rare, only exists in theory. */
}
+ break;
+ case DTO_INDEX_DELETE:
+ rc = iam_delete(jh, bag, (const struct iam_key *)oi_fid, ipd);
+ if (rc == -ENOENT) {
+ /* It is normal that the unlink thread has removed the
+ * OI mapping already. */
+ rc = 1;
+ }
+ break;
+ default:
+ LASSERTF(0, "Unexpected ops %d\n", ops);
+ break;
}
osd_ipd_put(info->oti_env, bag, ipd);
ldiskfs_journal_stop(jh);
GOTO(out, rc);
}
- /* Prevent the inode to be unlinked during OI scrub. */
- mutex_lock(&inode->i_mutex);
+ /* Check whether the inode to be unlinked during OI scrub. */
if (unlikely(inode->i_nlink == 0)) {
- mutex_unlock(&inode->i_mutex);
iput(inode);
GOTO(out, rc = 0);
}
ops = DTO_INDEX_INSERT;
idx = osd_oi_fid2idx(dev, fid);
if (val == SCRUB_NEXT_NOLMA) {
+ sf->sf_flags |= SF_UPGRADE;
+ scrub->os_full_speed = 1;
rc = osd_ea_fid_set(info, inode, fid, 0);
if (rc != 0)
GOTO(out, rc);
+
+ if (!(sf->sf_flags & SF_INCONSISTENT))
+ dev->od_igif_inoi = 0;
} else {
- sf->sf_flags |= SF_RECREATED | SF_INCONSISTENT;
+ sf->sf_flags |= SF_RECREATED;
+ scrub->os_full_speed = 1;
if (unlikely(!ldiskfs_test_bit(idx, sf->sf_oi_bitmap)))
ldiskfs_set_bit(idx, sf->sf_oi_bitmap);
}
GOTO(out, rc = 0);
} else {
sf->sf_flags |= SF_INCONSISTENT;
+ scrub->os_full_speed = 1;
+
+ /* XXX: If the device is restored from file-level backup, then
+ * some IGIFs may have been already in OI files, and some
+ * may be not yet. Means upgrading from 1.8 may be partly
+ * processed, but some clients may hold some immobilized
+ * IGIFs, and use them to access related objects. Under
+ * such case, OSD does not know whether an given IGIF has
+ * been processed or to be processed, and it also cannot
+ * generate local ino#/gen# directly from the immobilized
+ * IGIF because of the backup/restore. Then force OSD to
+ * lookup the given IGIF in OI files, and if no entry,
+ * then ask the client to retry after upgrading completed.
+ * No better choice. */
+ dev->od_igif_inoi = 1;
}
rc = osd_scrub_refresh_mapping(info, dev, fid, lid, ops);
sf->sf_items_updated_prior++;
else
sf->sf_items_updated++;
+
+ /* The target has been changed, need to be re-loaded. */
+ lu_object_purge(info->oti_env, osd2lu_dev(dev), fid);
}
GOTO(out, rc);
}
if (ops == DTO_INDEX_INSERT) {
- mutex_unlock(&inode->i_mutex);
+ /* There may be conflict unlink during the OI scrub,
+ * if happend, then remove the new added OI mapping. */
+ if (unlikely(inode->i_nlink == 0))
+ osd_scrub_refresh_mapping(info, dev, fid, lid,
+ DTO_INDEX_DELETE);
iput(inode);
}
up_write(&scrub->os_rwsem);
}
sf->sf_time_last_checkpoint = cfs_time_current_sec();
if (result > 0) {
+ struct osd_device *dev =
+ container_of0(scrub, struct osd_device, od_scrub);
+
+ dev->od_igif_inoi = 1;
sf->sf_status = SS_COMPLETED;
memset(sf->sf_oi_bitmap, 0, SCRUB_OI_BITMAP_SIZE);
sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
if (rc == 0) {
- if (!scrub) {
- if (!fid_is_client_visible(&lma->lma_self_fid))
- rc = SCRUB_NEXT_CONTINUE;
- else
- *fid = lma->lma_self_fid;
- }
+ if (fid_is_llog(&lma->lma_self_fid) ||
+ (!scrub && fid_is_internal(&lma->lma_self_fid)) ||
+ (scrub && (lma->lma_incompat & LMAI_AGENT)))
+ rc = SCRUB_NEXT_CONTINUE;
+ else
+ *fid = lma->lma_self_fid;
} else if (rc == -ENODATA) {
lu_igif_build(fid, inode->i_ino, inode->i_generation);
if (scrub)
return rc;
}
+static inline int
+osd_scrub_wakeup(struct osd_scrub *scrub, struct osd_otable_it *it)
+{
+ spin_lock(&scrub->os_lock);
+ if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
+ !cfs_list_empty(&scrub->os_inconsistent_items) ||
+ it->ooi_waiting || !thread_is_running(&scrub->os_thread))
+ scrub->os_waiting = 0;
+ else
+ scrub->os_waiting = 1;
+ spin_unlock(&scrub->os_lock);
+
+ return !scrub->os_waiting;
+}
+
static int osd_scrub_exec(struct osd_thread_info *info, struct osd_device *dev,
struct osd_iit_param *param,
struct osd_idmap_cache *oic, int *noslot, int rc)
next:
scrub->os_pos_current = param->gbase + ++(param->offset);
+
+wait:
if (it != NULL && it->ooi_waiting &&
ooc->ooc_pos_preload < scrub->os_pos_current) {
+ spin_lock(&scrub->os_lock);
it->ooi_waiting = 0;
cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ spin_unlock(&scrub->os_lock);
}
if (scrub->os_full_speed || rc == SCRUB_NEXT_CONTINUE)
return 0;
-wait:
if (osd_scrub_has_window(scrub, ooc)) {
*noslot = 0;
return 0;
}
- scrub->os_waiting = 1;
l_wait_event(thread->t_ctl_waitq,
- osd_scrub_has_window(scrub, ooc) ||
- !cfs_list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
+ osd_scrub_wakeup(scrub, it),
&lwi);
- scrub->os_waiting = 0;
if (osd_scrub_has_window(scrub, ooc))
*noslot = 0;
brelse(param.bitmap);
RETURN(rc);
}
-
- if (preload && dev->od_otable_it->ooi_stopping) {
- brelse(param.bitmap);
- RETURN(0);
- }
}
next_group:
brelse(param.bitmap);
-
- if (preload && dev->od_otable_it->ooi_stopping)
- RETURN(0);
}
if (*pos > limit)
int rc;
ENTRY;
- cfs_daemonize("OI_scrub");
- rc = lu_env_init(&env, LCT_DT_THREAD);
+ rc = lu_env_init(&env, LCT_LOCAL);
if (rc != 0) {
CERROR("%.16s: OI scrub, fail to init env, rc = %d\n",
LDISKFS_SB(sb)->s_es->s_volume_name, rc);
if (unlikely(!thread_is_running(thread)))
GOTO(post, rc = 0);
- LASSERT(scrub->os_pos_current >= ooc->ooc_pos_preload);
scrub->os_pos_current = ooc->ooc_pos_preload;
}
osd_ios_general_scan, osd_ios_varfid_fill },
/* PENDING */
- { "PENDING", { FID_SEQ_LOCAL_FILE, MDD_ORPHAN_OID, 0 }, 0, NULL, NULL },
+ { "PENDING", { 0, 0, 0 }, 0, NULL, NULL },
/* ROOT */
- { "ROOT", { FID_SEQ_LOCAL_FILE, MDD_ROOT_INDEX_OID, 0 },
+ { "ROOT", { FID_SEQ_ROOT, 1, 0 },
OLF_SCAN_SUBITEMS | OLF_HIDE_FID, osd_ios_ROOT_scan, NULL },
- /* capa_keys */
- { CAPA_KEYS, { FID_SEQ_LOCAL_FILE, MDD_CAPA_KEYS_OID, 0 }, 0,
- NULL, NULL },
-
/* changelog_catalog */
{ CHANGELOG_CATALOG, { 0, 0, 0 }, 0, NULL, NULL },
NULL, NULL },
/* lfsck_bookmark */
- { "lfsck_bookmark", { FID_SEQ_LOCAL_FILE, LFSCK_BOOKMARK_OID, 0 }, 0,
- NULL, NULL },
+ { "lfsck_bookmark", { 0, 0, 0 }, 0, NULL, NULL },
/* lov_objid */
{ LOV_OBJID, { FID_SEQ_LOCAL_FILE, MDD_LOV_OBJ_OID, 0 }, OLF_SHOW_NAME,
{ QSD_DIR, { 0, 0, 0 }, OLF_SCAN_SUBITEMS,
osd_ios_general_scan, osd_ios_varfid_fill },
- /* seq-200000003-lastid */
- { "seq-200000003-lastid", { FID_SEQ_LOCAL_NAME, 1, 0 }, 0,
- NULL, NULL },
-
/* seq_ctl */
{ "seq_ctl", { FID_SEQ_LOCAL_FILE, FID_SEQ_CTL_OID, 0 },
OLF_SHOW_NAME, NULL, NULL },
{ "seq_srv", { FID_SEQ_LOCAL_FILE, FID_SEQ_SRV_OID, 0 },
OLF_SHOW_NAME, NULL, NULL },
- /* LAST_GROUP */
- { "LAST_GROUP", { FID_SEQ_LOCAL_FILE, OFD_LAST_GROUP_OID, 0 },
- OLF_SHOW_NAME, NULL, NULL },
-
/* health_check */
{ HEALTH_CHECK, { FID_SEQ_LOCAL_FILE, OFD_HEALTH_CHECK_OID, 0 },
OLF_SHOW_NAME, NULL, NULL },
/* lfsck_namespace */
- { "lfsck_namespace", { FID_SEQ_LOCAL_FILE, LFSCK_NAMESPACE_OID, 0 }, 0,
- NULL, NULL },
+ { "lfsck_namespace", { 0, 0, 0 }, 0, NULL, NULL },
/* OBJECTS, upgrade from old device */
{ OBJECTS, { 0, 0, 0 }, OLF_SCAN_SUBITEMS, osd_ios_OBJECTS_scan, NULL },
/* lquota_v2.group, upgrade from old device */
{ "lquota_v2.group", { 0, 0, 0 }, 0, NULL, NULL },
+ /* LAST_GROUP, upgrade from old device */
+ { "LAST_GROUP", { FID_SEQ_LOCAL_FILE, OFD_LAST_GROUP_OID, 0 },
+ OLF_SHOW_NAME, NULL, NULL },
+
{ NULL, { 0, 0, 0 }, 0, NULL, NULL }
};
}
}
} else {
- rc = osd_ios_scan_one(info, dev, child->d_inode, NULL, 0);
+ /* For lustre-2.x (x <= 3), the ".lustre" has NO FID-in-LMA,
+ * so the client will get IGIF for the ".lustre" object when
+ * the MDT restart.
+ *
+ * From the OI scrub view, when the MDT upgrade to Lustre-2.4,
+ * it does not know whether there are some old clients cached
+ * the ".lustre" IGIF during the upgrading. Two choices:
+ *
+ * 1) Generate IGIF-in-LMA and IGIF-in-OI for the ".lustre".
+ * It will allow the old connected clients to access the
+ * ".lustre" with cached IGIF. But it will cause others
+ * on the MDT failed to check "fid_is_dot_lustre()".
+ *
+ * 2) Use fixed FID {FID_SEQ_DOT_LUSTRE, FID_OID_DOT_LUSTRE, 0}
+ * for ".lustre" in spite of whether there are some clients
+ * cached the ".lustre" IGIF or not. It enables the check
+ * "fid_is_dot_lustre()" on the MDT, although it will cause
+ * that the old connected clients cannot access the ".lustre"
+ * with the cached IGIF.
+ *
+ * Usually, it is rare case for the old connected clients
+ * to access the ".lustre" with cached IGIF. So we prefer
+ * to the solution 2). */
+ rc = osd_ios_scan_one(info, dev, child->d_inode,
+ &LU_DOT_LUSTRE_FID, 0);
dput(child);
}
static int osd_initial_OI_scrub(struct osd_thread_info *info,
struct osd_device *dev)
{
- struct osd_ios_item *item = NULL;
- scandir_t scandir = osd_ios_general_scan;
- filldir_t filldir = osd_ios_root_fill;
- struct dentry *dentry = osd_sb(dev)->s_root;
- int rc;
+ struct osd_ios_item *item = NULL;
+ scandir_t scandir = osd_ios_general_scan;
+ filldir_t filldir = osd_ios_root_fill;
+ struct dentry *dentry = osd_sb(dev)->s_root;
+ const struct osd_lf_map *map = osd_lf_maps;
+ int rc;
ENTRY;
while (1) {
OBD_FREE_PTR(item);
}
- RETURN(rc);
+ if (rc != 0)
+ RETURN(rc);
+
+ /* There maybe the case that the object has been removed, but its OI
+ * mapping is still in the OI file, such as the "CATALOGS" after MDT
+ * file-level backup/restore. So here cleanup the stale OI mappings. */
+ while (map->olm_name != NULL) {
+ struct dentry *child;
+
+ if (fid_is_zero(&map->olm_fid)) {
+ map++;
+ continue;
+ }
+
+ child = osd_ios_lookup_one_len(map->olm_name,
+ osd_sb(dev)->s_root,
+ strlen(map->olm_name));
+ if (!IS_ERR(child))
+ dput(child);
+ else if (PTR_ERR(child) == -ENOENT)
+ osd_scrub_refresh_mapping(info, dev, &map->olm_fid,
+ NULL, DTO_INDEX_DELETE);
+ map++;
+ }
+
+ RETURN(0);
}
char *osd_lf_fid2name(const struct lu_fid *fid)
scrub->os_start_flags = flags;
thread_set_flags(thread, 0);
- rc = cfs_create_thread(osd_scrub_main, dev, 0);
- if (rc < 0) {
+ rc = PTR_ERR(kthread_run(osd_scrub_main, dev, "OI_scrub"));
+ if (IS_ERR_VALUE(rc)) {
CERROR("%.16s: cannot start iteration thread, rc = %d\n",
LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, rc);
RETURN(rc);
RETURN(rc);
rc = osd_initial_OI_scrub(info, dev);
- if (rc == 0 && !dev->od_noscrub &&
- ((sf->sf_status == SS_PAUSED) ||
- (sf->sf_status == SS_CRASHED &&
- sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_UPGRADE |
- SF_AUTO)) ||
- (sf->sf_status == SS_INIT &&
- sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_UPGRADE))))
- rc = osd_scrub_start(dev);
+ if (rc == 0) {
+ if ((sf->sf_flags & SF_UPGRADE) &&
+ !(sf->sf_flags & SF_INCONSISTENT))
+ /* The 'od_igif_inoi' will be set after the
+ * upgrading completed, needs NOT remount. */
+ dev->od_igif_inoi = 0;
+ else
+ /* The 'od_igif_inoi' will be set under the
+ * following cases:
+ * 1) new created system, or
+ * 2) restored from file-level backup, or
+ * 3) the upgrading completed.
+ *
+ * The 'od_igif_inoi' may be cleared by OI scrub
+ * later if found that the system is upgrading. */
+ dev->od_igif_inoi = 1;
+
+ if (!dev->od_noscrub &&
+ ((sf->sf_status == SS_PAUSED) ||
+ (sf->sf_status == SS_CRASHED &&
+ sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE | SF_AUTO)) ||
+ (sf->sf_status == SS_INIT &&
+ sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
+ SF_UPGRADE))))
+ rc = osd_scrub_start(dev);
+ }
+
+ /* it is possible that dcache entries may keep objects after they are
+ * deleted by OSD. While it looks safe this can cause object data to
+ * stay until umount causing failures in tests calculating free space,
+ * e.g. replay-ost-single. Since those dcache entries are not used
+ * anymore let's just free them after use here */
+ shrink_dcache_sb(sb);
RETURN(rc);
}
dev->od_otable_it = it;
it->ooi_dev = dev;
- it->ooi_pid = cfs_curproc_pid();
it->ooi_cache.ooc_consumer_idx = -1;
if (flags & DOIF_OUTUSED)
it->ooi_used_outside = 1;
return 0;
}
-/**
- * It is hack here:
- *
- * Sometimes the otable-based iteration driver (LFSCK) may be blocked in OSD
- * layer when someone wants to stop/pause the iteration. Under such case, we
- * need some mechanism to notify the event and wakeup the blocker.
- */
static void osd_otable_it_put(const struct lu_env *env, struct dt_it *di)
{
- struct osd_otable_it *it = (struct osd_otable_it *)di;
- struct osd_device *dev = it->ooi_dev;
+}
- /* od_otable_mutex: prevent curcurrent init/fini */
- mutex_lock(&dev->od_otable_mutex);
- if (it->ooi_pid == cfs_curproc_pid()) {
- dev->od_scrub.os_paused = 1;
- } else {
- struct ptlrpc_thread *thread = &dev->od_scrub.os_thread;
+static inline int
+osd_otable_it_wakeup(struct osd_scrub *scrub, struct osd_otable_it *it)
+{
+ spin_lock(&scrub->os_lock);
+ if (it->ooi_cache.ooc_pos_preload < scrub->os_pos_current ||
+ scrub->os_waiting ||
+ !thread_is_running(&scrub->os_thread))
+ it->ooi_waiting = 0;
+ else
+ it->ooi_waiting = 1;
+ spin_unlock(&scrub->os_lock);
- it->ooi_stopping = 1;
- if (it->ooi_waiting)
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
- }
- mutex_unlock(&dev->od_otable_mutex);
+ return !it->ooi_waiting;
}
static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
RETURN(1);
}
- it->ooi_waiting = 1;
- l_wait_event(thread->t_ctl_waitq,
- ooc->ooc_pos_preload < scrub->os_pos_current ||
- !thread_is_running(thread) ||
- it->ooi_stopping,
- &lwi);
- it->ooi_waiting = 0;
+ if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
+ spin_lock(&scrub->os_lock);
+ scrub->os_waiting = 0;
+ cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+ spin_unlock(&scrub->os_lock);
+ }
+
+ if (it->ooi_cache.ooc_pos_preload >= scrub->os_pos_current)
+ l_wait_event(thread->t_ctl_waitq,
+ osd_otable_it_wakeup(scrub, it),
+ &lwi);
if (!thread_is_running(thread) && !it->ooi_used_outside)
RETURN(1);
- if (it->ooi_stopping)
- RETURN(0);
-
rc = osd_otable_it_preload(env, it);
if (rc >= 0)
goto again;
struct osd_otable_cache *ooc = &it->ooi_cache;
__u64 hash;
- if (it->ooi_user_ready)
- hash = ooc->ooc_pos_preload;
- else
+ if (it->ooi_user_ready && ooc->ooc_consumer_idx != -1)
hash = ooc->ooc_cache[ooc->ooc_consumer_idx].oic_lid.oii_ino;
+ else
+ hash = ooc->ooc_pos_preload;
return hash;
}
if (hash > OSD_OTABLE_MAX_HASH)
hash = OSD_OTABLE_MAX_HASH;
- /* Skip the one that has been processed last time. */
- if (ooc->ooc_pos_preload > hash)
- ooc->ooc_pos_preload = hash;
-
+ ooc->ooc_pos_preload = hash;
if (ooc->ooc_pos_preload <= LDISKFS_FIRST_INO(osd_sb(dev)))
ooc->ooc_pos_preload = LDISKFS_FIRST_INO(osd_sb(dev)) + 1;