* GPL HEADER END
*/
/*
- * Copyright (c) 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* lustre/mdd/mdd_lfsck.c
struct mdd_device *mdd;
mdd = container_of0(lfsck, struct mdd_device, mdd_lfsck);
- return mdd->mdd_obd_dev->obd_name;
+ return mdd2obd_dev(mdd)->obd_name;
}
void mdd_lfsck_set_speed(struct md_lfsck *lfsck, __u32 limit)
{
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
lfsck->ml_speed_limit = limit;
if (limit != LFSCK_SPEED_NO_LIMIT) {
if (limit > CFS_HZ) {
lfsck->ml_sleep_jif = 0;
lfsck->ml_sleep_rate = 0;
}
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
}
static void mdd_lfsck_control_speed(struct md_lfsck *lfsck)
if (lfsck->ml_sleep_jif > 0 &&
lfsck->ml_new_scanned >= lfsck->ml_sleep_rate) {
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
if (likely(lfsck->ml_sleep_jif > 0 &&
lfsck->ml_new_scanned >= lfsck->ml_sleep_rate)) {
lwi = LWI_TIMEOUT_INTR(lfsck->ml_sleep_jif, NULL,
LWI_ON_SIGNAL_NOOP, NULL);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
l_wait_event(thread->t_ctl_waitq,
!thread_is_running(thread),
&lwi);
lfsck->ml_new_scanned = 0;
} else {
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
}
}
}
* every bookmark, then low layer module can decide the
* start point for current iteration. */
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
thread_set_flags(thread, SVC_RUNNING);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
/* Call iops->load() to finish the choosing start point. */
GOTO(out, rc);
out:
- CDEBUG(D_LFSCK, "LFSCK: iteration stop: pos = %s, rc = %d\n",
- (char *)iops->key(&env, di), rc);
+ if (lfsck->ml_paused) {
+ /* XXX: It is hack here: if the lfsck is still running when MDS
+ * umounts, it should be restarted automatically after MDS
+ * remounts up.
+ *
+ * To support that, we need to record the lfsck status in
+ * the lfsck on-disk bookmark file. But now, there is not
+ * lfsck component under the lfsck framework. To avoid to
+ * introduce unnecessary bookmark incompatibility issues,
+ * we write nothing to the lfsck bookmark file now.
+ *
+ * Instead, we will reuse dt_it_ops::put() method to notify
+ * low layer iterator to process such case.
+ *
+ * It is just temporary solution, and will be replaced when
+ * some lfsck component is introduced in the future. */
+ iops->put(&env, di);
+ CDEBUG(D_LFSCK, "LFSCK: iteration pasued: pos = %s, rc = %d\n",
+ (char *)iops->key(&env, di), rc);
+ } else {
+ CDEBUG(D_LFSCK, "LFSCK: iteration stop: pos = %s, rc = %d\n",
+ (char *)iops->key(&env, di), rc);
+ }
iops->fini(&env, di);
fini_env:
lu_env_fini(&env);
noenv:
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
return rc;
}
__u16 flags = 0;
ENTRY;
- cfs_mutex_lock(&lfsck->ml_mutex);
- cfs_spin_lock(&lfsck->ml_lock);
+ if (lfsck->ml_it_obj == NULL)
+ RETURN(-ENOTSUPP);
+
+ mutex_lock(&lfsck->ml_mutex);
+ spin_lock(&lfsck->ml_lock);
if (thread_is_running(thread)) {
- cfs_spin_unlock(&lfsck->ml_lock);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ spin_unlock(&lfsck->ml_lock);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(-EALREADY);
}
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
if (start->ls_valid & LSV_SPEED_LIMIT)
mdd_lfsck_set_speed(lfsck, start->ls_speed_limit);
- if (start->ls_valid & LSV_METHOD && start->ls_method != LM_OTABLE) {
- cfs_mutex_unlock(&lfsck->ml_mutex);
- RETURN(-EOPNOTSUPP);
- }
-
if (start->ls_valid & LSV_ERROR_HANDLE) {
valid |= DOIV_ERROR_HANDLE;
if (start->ls_flags & LPF_FAILOUT)
thread_is_running(thread) ||
thread_is_stopped(thread),
&lwi);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(rc < 0 ? rc : 0);
}
struct l_wait_info lwi = { 0 };
ENTRY;
- cfs_mutex_lock(&lfsck->ml_mutex);
- cfs_spin_lock(&lfsck->ml_lock);
+ mutex_lock(&lfsck->ml_mutex);
+ spin_lock(&lfsck->ml_lock);
if (thread_is_init(thread) || thread_is_stopped(thread)) {
- cfs_spin_unlock(&lfsck->ml_lock);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ spin_unlock(&lfsck->ml_lock);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(-EALREADY);
}
thread_set_flags(thread, SVC_STOPPING);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(0);
}
struct dt_object *obj;
int rc;
+ memset(lfsck, 0, sizeof(*lfsck));
lfsck->ml_version = LFSCK_VERSION_V1;
cfs_waitq_init(&lfsck->ml_thread.t_ctl_waitq);
- cfs_mutex_init(&lfsck->ml_mutex);
- cfs_spin_lock_init(&lfsck->ml_lock);
+ mutex_init(&lfsck->ml_mutex);
+ spin_lock_init(&lfsck->ml_lock);
obj = dt_store_open(env, mdd->mdd_child, "", lfsck_bookmark_name,
&mdd_env_info(env)->mti_fid);
rc = obj->do_ops->do_index_try(env, obj, &dt_otable_features);
if (rc != 0) {
lu_object_put(env, &obj->do_lu);
+ if (rc == -ENOTSUPP)
+ rc = 0;
return rc;
}
struct md_lfsck *lfsck = &mdd->mdd_lfsck;
if (lfsck->ml_it_obj != NULL) {
+ lfsck->ml_paused = 1;
mdd_lfsck_stop(env, lfsck);
lu_object_put(env, &lfsck->ml_it_obj->do_lu);
lfsck->ml_it_obj = NULL;