"failout",
"dryrun",
"all_targets",
+ "broadcast",
+ "orphan",
+ "create_ostobj",
NULL
};
ltd = lfsck_tgt_get(&lfsck->li_mdt_descs, 0);
if (unlikely(ltd == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
parent = lfsck_object_find_by_dev(env, ltd->ltd_tgt,
&LU_LPF_FID);
ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
if (unlikely(ss == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
OBD_ALLOC_PTR(lfsck->li_seq);
if (lfsck->li_seq == NULL)
int flag;
int rc;
int i;
+ bool newline = (bits != 0 ? false : true);
- rc = snprintf(*buf, *len, "%s:%c", prefix, bits != 0 ? ' ' : '\n');
+ rc = snprintf(*buf, *len, "%s:%c", prefix, newline ? '\n' : ' ');
if (rc <= 0)
return -ENOSPC;
if (flag & bits) {
bits &= ~flag;
if (names[i] != NULL) {
+ if (bits == 0)
+ newline = true;
+
rc = snprintf(*buf, *len, "%s%c", names[i],
- bits != 0 ? ',' : '\n');
+ newline ? '\n' : ',');
if (rc <= 0)
return -ENOSPC;
}
}
}
+
+ if (!newline) {
+ rc = snprintf(*buf, *len, "\n");
+ if (rc <= 0)
+ return -ENOSPC;
+
+ *buf += rc;
+ *len -= rc;
+ }
+
return save - *len;
}
}
}
-static void __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
+bool __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
{
- lfsck->li_bookmark_ram.lb_speed_limit = limit;
+ bool dirty = false;
+
if (limit != LFSCK_SPEED_NO_LIMIT) {
if (limit > HZ) {
lfsck->li_sleep_rate = limit / HZ;
lfsck->li_sleep_jif = 0;
lfsck->li_sleep_rate = 0;
}
+
+ if (lfsck->li_bookmark_ram.lb_speed_limit != limit) {
+ lfsck->li_bookmark_ram.lb_speed_limit = limit;
+ dirty = true;
+ }
+
+ return dirty;
}
void lfsck_control_speed(struct lfsck_instance *lfsck)
return 1;
}
- /* .lustre doesn't contain "real" user objects, no need lfsck */
- if (fid_is_dot_lustre(lfsck_dto2fid(obj))) {
+ /* No need to check .lustre and its children. */
+ if (fid_seq_is_dot_lustre(fid_seq(lfsck_dto2fid(obj)))) {
if (depth > 0)
lfsck_object_put(env, obj);
return 0;
return 0;
}
- /* Currently, only client visible directory can be remote. */
if (dt_object_remote(obj)) {
+ /* .lustre/lost+found/MDTxxx can be remote directory. */
+ if (fid_seq_is_dot_lustre(fid_seq(lfsck_dto2fid(obj))))
+ rc = 0;
+ else
+ /* Other remote directory should be client
+ * visible and need to be checked. */
+ rc = 1;
lfsck_object_put(env, obj);
- return 1;
+ return rc;
}
depth++;
struct lfsck_async_interpret_args *laia = args;
struct lfsck_component *com;
+ LASSERT(laia->laia_com == NULL);
LASSERT(laia->laia_shared);
spin_lock(&lfsck->li_lock);
int rc = 0;
int rc1 = 0;
- cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
- lc_link) {
+ list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
com->lc_journal = 0;
atomic_read(&lfsck->li_double_scan_count) == 0,
&lwi);
+ if (lfsck->li_status != LS_PAUSED &&
+ lfsck->li_status != LS_CO_PAUSED) {
+ list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
+ lc_link) {
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
+ }
+ }
+
return rc1 != 0 ? rc1 : rc;
}
lc_link) {
if (com->lc_ops->lfsck_quit != NULL)
com->lc_ops->lfsck_quit(env, com);
+
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_del_init(&com->lc_link_dir);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
}
list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
lc_link) {
if (com->lc_ops->lfsck_quit != NULL)
com->lc_ops->lfsck_quit(env, com);
+
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
}
}
struct req_format *format;
int rc;
- if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
- return -EOPNOTSUPP;
-
switch (request) {
case LFSCK_NOTIFY:
format = &RQF_LFSCK_NOTIFY;
lfsck->li_bookmark_ram.lb_speed_limit);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {
mutex_lock(&lfsck->li_mutex);
- __lfsck_set_speed(lfsck, val);
- rc = lfsck_bookmark_store(&env, lfsck);
+ if (__lfsck_set_speed(lfsck, val))
+ rc = lfsck_bookmark_store(&env, lfsck);
mutex_unlock(&lfsck->li_mutex);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck->li_bookmark_ram.lb_async_windows);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {
if (val > LFSCK_ASYNC_WIN_MAX) {
- CERROR("%s: Too large async windows size, which "
- "may cause memory issues. The valid range "
- "is [0 - %u]. If you do not want to restrict "
- "the windows size for async requests pipeline, "
- "just set it as 0.\n",
- lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
+ CWARN("%s: Too large async window size, which "
+ "may cause memory issues. The valid range "
+ "is [0 - %u]. If you do not want to restrict "
+ "the window size for async requests pipeline, "
+ "just set it as 0.\n",
+ lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
rc = -EINVAL;
} else if (lfsck->li_bookmark_ram.lb_async_windows != val) {
mutex_lock(&lfsck->li_mutex);
}
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
struct lfsck_component *com;
struct l_wait_info lwi = { 0 };
struct lfsck_thread_args *lta;
- bool dirty = false;
long rc = 0;
__u16 valid = 0;
__u16 flags = 0;
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
/* System is not ready, try again later. */
if (unlikely(lfsck->li_namespace == NULL))
}
start->ls_version = bk->lb_version;
- if (start->ls_valid & LSV_SPEED_LIMIT) {
- __lfsck_set_speed(lfsck, start->ls_speed_limit);
- dirty = true;
- }
-
- if (start->ls_valid & LSV_ASYNC_WINDOWS &&
- bk->lb_async_windows != start->ls_async_windows) {
- bk->lb_async_windows = start->ls_async_windows;
- dirty = true;
- }
-
- if (start->ls_valid & LSV_ERROR_HANDLE) {
- valid |= DOIV_ERROR_HANDLE;
- if (start->ls_flags & LPF_FAILOUT)
- flags |= DOIF_FAILOUT;
-
- if ((start->ls_flags & LPF_FAILOUT) &&
- !(bk->lb_param & LPF_FAILOUT)) {
- bk->lb_param |= LPF_FAILOUT;
- dirty = true;
- } else if (!(start->ls_flags & LPF_FAILOUT) &&
- (bk->lb_param & LPF_FAILOUT)) {
- bk->lb_param &= ~LPF_FAILOUT;
- dirty = true;
- }
- }
-
- if (start->ls_valid & LSV_DRYRUN) {
- valid |= DOIV_DRYRUN;
- if (start->ls_flags & LPF_DRYRUN)
- flags |= DOIF_DRYRUN;
-
- if ((start->ls_flags & LPF_DRYRUN) &&
- !(bk->lb_param & LPF_DRYRUN)) {
- bk->lb_param |= LPF_DRYRUN;
- dirty = true;
- } else if (!(start->ls_flags & LPF_DRYRUN) &&
- (bk->lb_param & LPF_DRYRUN)) {
- bk->lb_param &= ~LPF_DRYRUN;
- lfsck->li_drop_dryrun = 1;
- dirty = true;
- }
- }
-
- if (bk->lb_param & LPF_ALL_TGT &&
- !(start->ls_flags & LPF_ALL_TGT)) {
- bk->lb_param &= ~LPF_ALL_TGT;
- dirty = true;
- } else if (!(bk->lb_param & LPF_ALL_TGT) &&
- start->ls_flags & LPF_ALL_TGT) {
- bk->lb_param |= LPF_ALL_TGT;
- dirty = true;
- }
-
- if (bk->lb_param & LPF_ORPHAN &&
- !(start->ls_flags & LPF_ORPHAN)) {
- bk->lb_param &= ~LPF_ORPHAN;
- dirty = true;
- } else if (!(bk->lb_param & LPF_ORPHAN) &&
- start->ls_flags & LPF_ORPHAN) {
- bk->lb_param |= LPF_ORPHAN;
- dirty = true;
- }
-
- if (dirty) {
- rc = lfsck_bookmark_store(env, lfsck);
- if (rc != 0)
- GOTO(out, rc);
- }
-
- if (start->ls_flags & LPF_RESET)
- flags |= DOIF_RESET;
if (start->ls_active != 0) {
struct lfsck_component *next;
GOTO(out, rc = -ENOTSUPP);
}
- cfs_list_for_each_entry_safe(com, next,
- &lfsck->li_list_scan, lc_link) {
+ list_for_each_entry_safe(com, next,
+ &lfsck->li_list_scan, lc_link) {
if (!(com->lc_type & start->ls_active)) {
rc = com->lc_ops->lfsck_post(env, com, 0,
false);
/* The component status will be updated
* when its prep() is called later by
* the LFSCK main engine. */
- cfs_list_del_init(&com->lc_link);
- cfs_list_add_tail(&com->lc_link,
- &lfsck->li_list_scan);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link,
+ &lfsck->li_list_scan);
}
start->ls_active &= ~type;
}
}
}
- cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
+ if (list_empty(&lfsck->li_list_scan)) {
+ /* The speed limit will be used to control both the LFSCK and
+ * low layer scrub (if applied), need to be handled firstly. */
+ if (start->ls_valid & LSV_SPEED_LIMIT) {
+ if (__lfsck_set_speed(lfsck, start->ls_speed_limit)) {
+ rc = lfsck_bookmark_store(env, lfsck);
+ if (rc != 0)
+ GOTO(out, rc);
+ }
+ }
+
+ goto trigger;
+ }
+
+ if (start->ls_flags & LPF_RESET)
+ flags |= DOIF_RESET;
+
+ rc = lfsck_set_param(env, lfsck, start, !!(flags & DOIF_RESET));
+ if (rc != 0)
+ GOTO(out, rc);
+
+ list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
start->ls_active |= com->lc_type;
if (flags & DOIF_RESET) {
rc = com->lc_ops->lfsck_reset(env, com, false);
trigger:
lfsck->li_args_dir = LUDA_64BITHASH | LUDA_VERIFY;
- if (bk->lb_param & LPF_DRYRUN) {
+ if (bk->lb_param & LPF_DRYRUN)
lfsck->li_args_dir |= LUDA_VERIFY_DRYRUN;
- valid |= DOIV_DRYRUN;
- flags |= DOIF_DRYRUN;
- }
- if (bk->lb_param & LPF_FAILOUT) {
+ if (start != NULL && start->ls_valid & LSV_ERROR_HANDLE) {
valid |= DOIV_ERROR_HANDLE;
- flags |= DOIF_FAILOUT;
+ if (start->ls_flags & LPF_FAILOUT)
+ flags |= DOIF_FAILOUT;
+ }
+
+ if (start != NULL && start->ls_valid & LSV_DRYRUN) {
+ valid |= DOIV_DRYRUN;
+ if (start->ls_flags & LPF_DRYRUN)
+ flags |= DOIF_DRYRUN;
}
- if (!cfs_list_empty(&lfsck->li_list_scan))
+ if (!list_empty(&lfsck->li_list_scan))
flags |= DOIF_OUTUSED;
lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
if (IS_ERR(lta))
GOTO(out, rc = PTR_ERR(lta));
+ __lfsck_set_speed(lfsck, bk->lb_speed_limit);
rc = PTR_ERR(kthread_run(lfsck_master_engine, lta, "lfsck"));
if (IS_ERR_VALUE(rc)) {
CERROR("%s: cannot start LFSCK thread: rc = %ld\n",
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
thread = &lfsck->li_thread;
/* release lfsck::li_mutex to avoid deadlock. */
case LE_PHASE2_DONE:
case LE_FID_ACCESSED:
case LE_PEER_EXIT:
- case LE_CONDITIONAL_DESTROY: {
+ case LE_CONDITIONAL_DESTROY:
+ case LE_PAIRS_VERIFY: {
struct lfsck_instance *lfsck;
struct lfsck_component *com;
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
com = lfsck_component_find(lfsck, lr->lr_active);
if (likely(com != NULL)) {
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
com = lfsck_component_find(lfsck, lr->lr_active);
if (likely(com != NULL)) {
struct ldlm_namespace *ns)
{
struct lfsck_instance *lfsck;
- int rc = -ENODEV;
+ int rc = -ENXIO;
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {