struct lfsck_thread_info *info = data;
lu_buf_free(&info->lti_linkea_buf);
+ lu_buf_free(&info->lti_big_buf);
OBD_FREE_PTR(info);
}
NULL,
"failout",
"dryrun",
+ "all_targets",
+ "broadcast",
+ "orphan",
+ "create_ostobj",
NULL
};
return NULL;
}
-static struct lfsck_component *
+struct lfsck_component *
lfsck_component_find(struct lfsck_instance *lfsck, __u16 type)
{
struct lfsck_component *com;
lfsck_component_put(env, com);
}
+int lfsck_fid_alloc(const struct lu_env *env, struct lfsck_instance *lfsck,
+ struct lu_fid *fid, bool locked)
+{
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ int rc = 0;
+ ENTRY;
+
+ if (!locked)
+ mutex_lock(&lfsck->li_mutex);
+
+ rc = seq_client_alloc_fid(env, lfsck->li_seq, fid);
+ if (rc >= 0) {
+ bk->lb_last_fid = *fid;
+ /* We do not care about whether the subsequent sub-operations
+ * failed or not. The worst case is that one FID is lost that
+ * is not a big issue for the LFSCK since it is relative rare
+ * for LFSCK create. */
+ rc = lfsck_bookmark_store(env, lfsck);
+ }
+
+ if (!locked)
+ mutex_unlock(&lfsck->li_mutex);
+
+ RETURN(rc);
+}
+
+static const char dot[] = ".";
+static const char dotdot[] = "..";
+
+static int lfsck_create_lpf_local(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct dt_object *parent,
+ struct dt_object *child,
+ struct lu_attr *la,
+ struct dt_object_format *dof,
+ const char *name)
+{
+ struct dt_device *dev = lfsck->li_bottom;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct dt_object *bk_obj = lfsck->li_bookmark_obj;
+ const struct lu_fid *cfid = lu_object_fid(&child->do_lu);
+ struct thandle *th = NULL;
+ loff_t pos = 0;
+ int len = sizeof(struct lfsck_bookmark);
+ int rc = 0;
+ ENTRY;
+
+ th = dt_trans_create(env, dev);
+ if (IS_ERR(th))
+ RETURN(PTR_ERR(th));
+
+ /* 1a. create child */
+ rc = dt_declare_create(env, child, la, NULL, dof, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 2a. increase child nlink */
+ rc = dt_declare_ref_add(env, child, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 3a. insert name into parent dir */
+ rc = dt_declare_insert(env, parent, (const struct dt_rec *)cfid,
+ (const struct dt_key *)name, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 4a. increase parent nlink */
+ rc = dt_declare_ref_add(env, parent, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 5a. update bookmark */
+ rc = dt_declare_record_write(env, bk_obj,
+ lfsck_buf_get(env, bk, len), 0, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start_local(env, dev, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ dt_write_lock(env, child, 0);
+ /* 1b.1 create child */
+ rc = dt_create(env, child, la, NULL, dof, th);
+ if (rc != 0)
+ GOTO(unlock, rc);
+
+ if (unlikely(!dt_try_as_dir(env, child)))
+ GOTO(unlock, rc = -ENOTDIR);
+
+ /* 1b.2 insert dot into child dir */
+ rc = dt_insert(env, child, (const struct dt_rec *)cfid,
+ (const struct dt_key *)dot, th, BYPASS_CAPA, 1);
+ if (rc != 0)
+ GOTO(unlock, rc);
+
+ /* 1b.3 insert dotdot into child dir */
+ rc = dt_insert(env, child, (const struct dt_rec *)&LU_LPF_FID,
+ (const struct dt_key *)dotdot, th, BYPASS_CAPA, 1);
+ if (rc != 0)
+ GOTO(unlock, rc);
+
+ /* 2b. increase child nlink */
+ rc = dt_ref_add(env, child, th);
+ dt_write_unlock(env, child);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 3b. insert name into parent dir */
+ rc = dt_insert(env, parent, (const struct dt_rec *)cfid,
+ (const struct dt_key *)name, th, BYPASS_CAPA, 1);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ dt_write_lock(env, parent, 0);
+ /* 4b. increase parent nlink */
+ rc = dt_ref_add(env, parent, th);
+ dt_write_unlock(env, parent);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ bk->lb_lpf_fid = *cfid;
+ lfsck_bookmark_cpu_to_le(&lfsck->li_bookmark_disk, bk);
+
+ /* 5b. update bookmark */
+ rc = dt_record_write(env, bk_obj,
+ lfsck_buf_get(env, bk, len), &pos, th);
+
+ GOTO(stop, rc);
+
+unlock:
+ dt_write_unlock(env, child);
+
+stop:
+ dt_trans_stop(env, dev, th);
+
+ return rc;
+}
+
+static int lfsck_create_lpf_remote(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct dt_object *parent,
+ struct dt_object *child,
+ struct lu_attr *la,
+ struct dt_object_format *dof,
+ const char *name)
+{
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct dt_object *bk_obj = lfsck->li_bookmark_obj;
+ const struct lu_fid *cfid = lu_object_fid(&child->do_lu);
+ struct thandle *th = NULL;
+ struct dt_device *dev;
+ loff_t pos = 0;
+ int len = sizeof(struct lfsck_bookmark);
+ int rc = 0;
+ ENTRY;
+
+ /* Create .lustre/lost+found/MDTxxxx. */
+
+ /* XXX: Currently, cross-MDT create operation needs to create the child
+ * object firstly, then insert name into the parent directory. For
+ * this case, the child object resides on current MDT (local), but
+ * the parent ".lustre/lost+found" may be on remote MDT. It is not
+ * easy to contain all the sub-modifications orderly within single
+ * transaction.
+ *
+ * To avoid more inconsistency, we split the create operation into
+ * two transactions:
+ *
+ * 1) create the child locally.
+ * 2) insert the name "MDTXXXX" in the parent ".lustre/lost+found"
+ * remotely and update the lfsck_bookmark::lb_lpf_fid locally.
+ *
+ * If 1) done but 2) failed, then the worst case is that we lose
+ * one object locally, which is not a big issue. (can be repaird
+ * by LFSCK phase III) */
+
+ /* Transaction I: */
+
+ dev = lfsck->li_bottom;
+ th = dt_trans_create(env, dev);
+ if (IS_ERR(th))
+ RETURN(PTR_ERR(th));
+
+ /* 1a. create child locally. */
+ rc = dt_declare_create(env, child, la, NULL, dof, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 2a. increase child nlink locally. */
+ rc = dt_declare_ref_add(env, child, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start_local(env, dev, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ dt_write_lock(env, child, 0);
+ /* 1b. create child locally. */
+ rc = dt_create(env, child, la, NULL, dof, th);
+ if (rc != 0)
+ GOTO(unlock, rc);
+
+ if (unlikely(!dt_try_as_dir(env, child)))
+ GOTO(unlock, rc = -ENOTDIR);
+
+ /* 2b.1 insert dot into child dir locally. */
+ rc = dt_insert(env, child, (const struct dt_rec *)cfid,
+ (const struct dt_key *)dot, th, BYPASS_CAPA, 1);
+ if (rc != 0)
+ GOTO(unlock, rc);
+
+ /* 2b.2 insert dotdot into child dir locally. */
+ rc = dt_insert(env, child, (const struct dt_rec *)&LU_LPF_FID,
+ (const struct dt_key *)dotdot, th, BYPASS_CAPA, 1);
+ if (rc != 0)
+ GOTO(unlock, rc);
+
+ /* 2b.3 increase child nlink locally. */
+ rc = dt_ref_add(env, child, th);
+ dt_write_unlock(env, child);
+ dt_trans_stop(env, dev, th);
+ if (rc != 0)
+ RETURN(rc);
+
+ /* Transaction II: */
+
+ dev = lfsck->li_next;
+ th = dt_trans_create(env, dev);
+ if (IS_ERR(th))
+ RETURN(PTR_ERR(th));
+
+ /* 3a. insert name into parent dir remotely. */
+ rc = dt_declare_insert(env, parent, (const struct dt_rec *)cfid,
+ (const struct dt_key *)name, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 4a. increase parent nlink remotely. */
+ rc = dt_declare_ref_add(env, parent, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 5a. decrease child nlink for dotdot locally if former remote
+ * update failed. */
+ rc = dt_declare_ref_del(env, child, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 6a. decrease child nlink for dot locally if former remote
+ * update failed. */
+ rc = dt_declare_ref_del(env, child, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 7a. destroy child locally if former remote update failed. */
+ rc = dt_declare_destroy(env, child, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 8a. update bookmark locally. */
+ rc = dt_declare_record_write(env, bk_obj,
+ lfsck_buf_get(env, bk, len), 0, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start(env, dev, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* 3b. insert name into parent dir remotely. */
+ rc = dt_insert(env, parent, (const struct dt_rec *)cfid,
+ (const struct dt_key *)name, th, BYPASS_CAPA, 1);
+ if (rc == 0) {
+ dt_write_lock(env, parent, 0);
+ /* 4b. increase parent nlink remotely. */
+ rc = dt_ref_add(env, parent, th);
+ dt_write_unlock(env, parent);
+ }
+ if (rc != 0) {
+ /* 5b. decrease child nlink for dotdot locally. */
+ dt_ref_del(env, child, th);
+ /* 6b. decrease child nlink for dot locally. */
+ dt_ref_del(env, child, th);
+ /* 7b. destroy child locally. */
+ dt_destroy(env, child, th);
+ GOTO(stop, rc);
+ }
+
+ bk->lb_lpf_fid = *cfid;
+ lfsck_bookmark_cpu_to_le(&lfsck->li_bookmark_disk, bk);
+
+ /* 8b. update bookmark locally. */
+ rc = dt_record_write(env, bk_obj,
+ lfsck_buf_get(env, bk, len), &pos, th);
+
+ GOTO(stop, rc);
+
+unlock:
+ dt_write_unlock(env, child);
+stop:
+ dt_trans_stop(env, dev, th);
+
+ return rc;
+}
+
+/* Do NOT create .lustre/lost+found/MDTxxxx when register the lfsck instance,
+ * because the MDT0 maybe not reaady for sequence allocation yet. We do that
+ * only when it is required, such as orphan OST-objects repairing. */
+int lfsck_create_lpf(const struct lu_env *env, struct lfsck_instance *lfsck)
+{
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lu_fid *cfid = &info->lti_fid2;
+ struct lu_attr *la = &info->lti_la;
+ struct dt_object_format *dof = &info->lti_dof;
+ struct dt_object *parent = NULL;
+ struct dt_object *child = NULL;
+ char name[8];
+ int node = lfsck_dev_idx(lfsck->li_bottom);
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(lfsck->li_master);
+
+ sprintf(name, "MDT%04x", node);
+ if (node == 0) {
+ parent = lfsck_object_find_by_dev(env, lfsck->li_bottom,
+ &LU_LPF_FID);
+ } else {
+ struct lfsck_tgt_desc *ltd;
+
+ ltd = lfsck_tgt_get(&lfsck->li_mdt_descs, 0);
+ if (unlikely(ltd == NULL))
+ RETURN(-ENXIO);
+
+ parent = lfsck_object_find_by_dev(env, ltd->ltd_tgt,
+ &LU_LPF_FID);
+ lfsck_tgt_put(ltd);
+ }
+ if (IS_ERR(parent))
+ RETURN(PTR_ERR(parent));
+
+ if (unlikely(!dt_try_as_dir(env, parent)))
+ GOTO(out, rc = -ENOTDIR);
+
+ mutex_lock(&lfsck->li_mutex);
+ if (lfsck->li_lpf_obj != NULL)
+ GOTO(unlock, rc = 0);
+
+ if (fid_is_zero(&bk->lb_lpf_fid)) {
+ /* There is corner case that: in former LFSCK scanning we have
+ * created the .lustre/lost+found/MDTxxxx but failed to update
+ * the lfsck_bookmark::lb_lpf_fid successfully. So need lookup
+ * it from MDT0 firstly. */
+ rc = dt_lookup(env, parent, (struct dt_rec *)cfid,
+ (const struct dt_key *)name, BYPASS_CAPA);
+ if (rc != 0 && rc != -ENOENT)
+ GOTO(unlock, rc);
+
+ if (rc == 0) {
+ bk->lb_lpf_fid = *cfid;
+ rc = lfsck_bookmark_store(env, lfsck);
+ } else {
+ rc = lfsck_fid_alloc(env, lfsck, cfid, true);
+ }
+ if (rc != 0)
+ GOTO(unlock, rc);
+ } else {
+ *cfid = bk->lb_lpf_fid;
+ }
+
+ child = lfsck_object_find_by_dev(env, lfsck->li_bottom, cfid);
+ if (IS_ERR(child))
+ GOTO(unlock, rc = PTR_ERR(child));
+
+ if (dt_object_exists(child) != 0) {
+ if (unlikely(!dt_try_as_dir(env, child)))
+ GOTO(unlock, rc = -ENOTDIR);
+
+ lfsck->li_lpf_obj = child;
+ GOTO(unlock, rc = 0);
+ }
+
+ memset(la, 0, sizeof(*la));
+ la->la_atime = la->la_mtime = la->la_ctime = cfs_time_current_sec();
+ la->la_mode = S_IFDIR | S_IRWXU;
+ la->la_valid = LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
+ LA_UID | LA_GID;
+ memset(dof, 0, sizeof(*dof));
+ dof->dof_type = dt_mode_to_dft(S_IFDIR);
+
+ if (node == 0)
+ rc = lfsck_create_lpf_local(env, lfsck, parent, child, la,
+ dof, name);
+ else
+ rc = lfsck_create_lpf_remote(env, lfsck, parent, child, la,
+ dof, name);
+ if (rc == 0)
+ lfsck->li_lpf_obj = child;
+
+ GOTO(unlock, rc);
+
+unlock:
+ mutex_unlock(&lfsck->li_mutex);
+ if (rc != 0 && child != NULL && !IS_ERR(child))
+ lu_object_put(env, &child->do_lu);
+out:
+ if (parent != NULL && !IS_ERR(parent))
+ lu_object_put(env, &parent->do_lu);
+
+ return rc;
+}
+
+static int lfsck_fid_init(struct lfsck_instance *lfsck)
+{
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct seq_server_site *ss;
+ char *prefix;
+ int rc = 0;
+ ENTRY;
+
+ ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
+ if (unlikely(ss == NULL))
+ RETURN(-ENXIO);
+
+ OBD_ALLOC_PTR(lfsck->li_seq);
+ if (lfsck->li_seq == NULL)
+ RETURN(-ENOMEM);
+
+ OBD_ALLOC(prefix, MAX_OBD_NAME + 7);
+ if (prefix == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ snprintf(prefix, MAX_OBD_NAME + 7, "lfsck-%s", lfsck_lfsck2name(lfsck));
+ rc = seq_client_init(lfsck->li_seq, NULL, LUSTRE_SEQ_METADATA, prefix,
+ ss->ss_server_seq);
+ OBD_FREE(prefix, MAX_OBD_NAME + 7);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ if (fid_is_sane(&bk->lb_last_fid))
+ lfsck->li_seq->lcs_fid = bk->lb_last_fid;
+
+ RETURN(0);
+
+out:
+ OBD_FREE_PTR(lfsck->li_seq);
+ lfsck->li_seq = NULL;
+
+ return rc;
+}
+
+static void lfsck_fid_fini(struct lfsck_instance *lfsck)
+{
+ if (lfsck->li_seq != NULL) {
+ seq_client_fini(lfsck->li_seq);
+ OBD_FREE_PTR(lfsck->li_seq);
+ lfsck->li_seq = NULL;
+ }
+}
+
void lfsck_instance_cleanup(const struct lu_env *env,
struct lfsck_instance *lfsck)
{
LASSERT(list_empty(&lfsck->li_link));
LASSERT(thread_is_init(thread) || thread_is_stopped(thread));
- lfsck_tgt_descs_fini(&lfsck->li_ost_descs);
- lfsck_tgt_descs_fini(&lfsck->li_mdt_descs);
-
if (lfsck->li_obj_oit != NULL) {
lu_object_put_nocache(env, &lfsck->li_obj_oit->do_lu);
lfsck->li_obj_oit = NULL;
lfsck_component_cleanup(env, com);
}
+ lfsck_tgt_descs_fini(&lfsck->li_ost_descs);
+ lfsck_tgt_descs_fini(&lfsck->li_mdt_descs);
+
if (lfsck->li_bookmark_obj != NULL) {
lu_object_put_nocache(env, &lfsck->li_bookmark_obj->do_lu);
lfsck->li_bookmark_obj = NULL;
}
+ if (lfsck->li_lpf_obj != NULL) {
+ lu_object_put(env, &lfsck->li_lpf_obj->do_lu);
+ lfsck->li_lpf_obj = NULL;
+ }
+
if (lfsck->li_los != NULL) {
local_oid_storage_fini(env, lfsck->li_los);
lfsck->li_los = NULL;
}
+ lfsck_fid_fini(lfsck);
+
OBD_FREE_PTR(lfsck);
}
return NULL;
}
-static inline struct lfsck_instance *lfsck_instance_find(struct dt_device *key,
- bool ref, bool unlink)
+struct lfsck_instance *lfsck_instance_find(struct dt_device *key, bool ref,
+ bool unlink)
{
struct lfsck_instance *lfsck;
int flag;
int rc;
int i;
+ bool newline = (bits != 0 ? false : true);
- rc = snprintf(*buf, *len, "%s:%c", prefix, bits != 0 ? ' ' : '\n');
+ rc = snprintf(*buf, *len, "%s:%c", prefix, newline ? '\n' : ' ');
if (rc <= 0)
return -ENOSPC;
if (flag & bits) {
bits &= ~flag;
if (names[i] != NULL) {
+ if (bits == 0)
+ newline = true;
+
rc = snprintf(*buf, *len, "%s%c", names[i],
- bits != 0 ? ',' : '\n');
+ newline ? '\n' : ',');
if (rc <= 0)
return -ENOSPC;
}
}
}
+
+ if (!newline) {
+ rc = snprintf(*buf, *len, "\n");
+ if (rc <= 0)
+ return -ENOSPC;
+
+ *buf += rc;
+ *len -= rc;
+ }
+
return save - *len;
}
}
}
-static void __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
+bool __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
{
- lfsck->li_bookmark_ram.lb_speed_limit = limit;
+ bool dirty = false;
+
if (limit != LFSCK_SPEED_NO_LIMIT) {
if (limit > HZ) {
lfsck->li_sleep_rate = limit / HZ;
lfsck->li_sleep_jif = 0;
lfsck->li_sleep_rate = 0;
}
+
+ if (lfsck->li_bookmark_ram.lb_speed_limit != limit) {
+ lfsck->li_bookmark_ram.lb_speed_limit = limit;
+ dirty = true;
+ }
+
+ return dirty;
}
void lfsck_control_speed(struct lfsck_instance *lfsck)
return 1;
}
- /* .lustre doesn't contain "real" user objects, no need lfsck */
- if (fid_is_dot_lustre(lfsck_dto2fid(obj))) {
+ /* No need to check .lustre and its children. */
+ if (fid_seq_is_dot_lustre(fid_seq(lfsck_dto2fid(obj)))) {
if (depth > 0)
lfsck_object_put(env, obj);
return 0;
return 0;
}
- /* Currently, only client visible directory can be remote. */
if (dt_object_remote(obj)) {
+ /* .lustre/lost+found/MDTxxx can be remote directory. */
+ if (fid_seq_is_dot_lustre(fid_seq(lfsck_dto2fid(obj))))
+ rc = 0;
+ else
+ /* Other remote directory should be client
+ * visible and need to be checked. */
+ rc = 1;
lfsck_object_put(env, obj);
- return 1;
+ return rc;
}
depth++;
return result;
}
+static void lfsck_interpret(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct ptlrpc_request *req, void *args, int result)
+{
+ struct lfsck_async_interpret_args *laia = args;
+ struct lfsck_component *com;
+
+ LASSERT(laia->laia_com == NULL);
+ LASSERT(laia->laia_shared);
+
+ spin_lock(&lfsck->li_lock);
+ list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
+ if (com->lc_ops->lfsck_interpret != NULL) {
+ laia->laia_com = com;
+ com->lc_ops->lfsck_interpret(env, req, laia, result);
+ }
+ }
+
+ list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
+ if (com->lc_ops->lfsck_interpret != NULL) {
+ laia->laia_com = com;
+ com->lc_ops->lfsck_interpret(env, req, laia, result);
+ }
+ }
+ spin_unlock(&lfsck->li_lock);
+}
+
int lfsck_double_scan(const struct lu_env *env, struct lfsck_instance *lfsck)
{
struct lfsck_component *com;
int rc = 0;
int rc1 = 0;
- cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
- lc_link) {
+ list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
com->lc_journal = 0;
atomic_read(&lfsck->li_double_scan_count) == 0,
&lwi);
+ if (lfsck->li_status != LS_PAUSED &&
+ lfsck->li_status != LS_CO_PAUSED) {
+ list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
+ lc_link) {
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
+ }
+ }
+
return rc1 != 0 ? rc1 : rc;
}
-int lfsck_stop_notify(const struct lu_env *env, struct lfsck_instance *lfsck,
- struct lfsck_tgt_descs *ltds, struct lfsck_tgt_desc *ltd)
+static int lfsck_stop_notify(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct lfsck_tgt_descs *ltds,
+ struct lfsck_tgt_desc *ltd, __u16 type)
{
struct ptlrpc_request_set *set;
struct lfsck_component *com;
- int cnt = 0;
int rc = 0;
- int rc1 = 0;
+ ENTRY;
- set = ptlrpc_prep_set();
- if (set == NULL)
- return -ENOMEM;
+ spin_lock(&lfsck->li_lock);
+ com = __lfsck_component_find(lfsck, type, &lfsck->li_list_scan);
+ if (com == NULL)
+ com = __lfsck_component_find(lfsck, type,
+ &lfsck->li_list_double_scan);
+ if (com != NULL)
+ lfsck_component_get(com);
+ spin_lock(&lfsck->li_lock);
- list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
+ if (com != NULL) {
if (com->lc_ops->lfsck_stop_notify != NULL) {
- rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
- ltd, set);
- if (rc != 0)
- rc1 = rc;
- else
- cnt++;
- }
- }
+ set = ptlrpc_prep_set();
+ if (set == NULL) {
+ lfsck_component_put(env, com);
+
+ RETURN(-ENOMEM);
+ }
- list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
- if (com->lc_ops->lfsck_stop_notify != NULL) {
rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
ltd, set);
- if (rc != 0)
- rc1 = rc;
- else
- cnt++;
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+
+ ptlrpc_set_destroy(set);
}
- }
- if (cnt > 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
+ lfsck_component_put(env, com);
+ }
- return rc1 != 0 ? rc1 : rc;
+ RETURN(rc);
}
void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
lc_link) {
if (com->lc_ops->lfsck_quit != NULL)
com->lc_ops->lfsck_quit(env, com);
+
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_del_init(&com->lc_link_dir);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
}
list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
lc_link) {
if (com->lc_ops->lfsck_quit != NULL)
com->lc_ops->lfsck_quit(env, com);
+
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
}
}
+static int lfsck_async_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *args, int rc)
+{
+ struct lfsck_async_interpret_args *laia = args;
+ struct lfsck_instance *lfsck;
+
+ lfsck = container_of0(laia->laia_ltds, struct lfsck_instance,
+ li_mdt_descs);
+ lfsck_interpret(env, lfsck, req, laia, rc);
+ lfsck_tgt_put(laia->laia_ltd);
+ if (rc != 0 && laia->laia_result != -EALREADY)
+ laia->laia_result = rc;
+
+ return 0;
+}
+
int lfsck_async_request(const struct lu_env *env, struct obd_export *exp,
struct lfsck_request *lr,
struct ptlrpc_request_set *set,
struct req_format *format;
int rc;
- if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
- return -EOPNOTSUPP;
-
switch (request) {
case LFSCK_NOTIFY:
format = &RQF_LFSCK_NOTIFY;
laia = ptlrpc_req_async_args(req);
*laia = *(struct lfsck_async_interpret_args *)args;
+ if (laia->laia_com != NULL)
+ lfsck_component_get(laia->laia_com);
req->rq_interpret_reply = interpreter;
ptlrpc_set_add_req(set, req);
lfsck->li_bookmark_ram.lb_speed_limit);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {
mutex_lock(&lfsck->li_mutex);
- __lfsck_set_speed(lfsck, val);
- rc = lfsck_bookmark_store(&env, lfsck);
+ if (__lfsck_set_speed(lfsck, val))
+ rc = lfsck_bookmark_store(&env, lfsck);
mutex_unlock(&lfsck->li_mutex);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck->li_bookmark_ram.lb_async_windows);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {
if (val > LFSCK_ASYNC_WIN_MAX) {
- CERROR("%s: Too large async windows size, which "
- "may cause memory issues. The valid range "
- "is [0 - %u]. If you do not want to restrict "
- "the windows size for async requests pipeline, "
- "just set it as 0.\n",
- lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
+ CWARN("%s: Too large async window size, which "
+ "may cause memory issues. The valid range "
+ "is [0 - %u]. If you do not want to restrict "
+ "the window size for async requests pipeline, "
+ "just set it as 0.\n",
+ lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
rc = -EINVAL;
} else if (lfsck->li_bookmark_ram.lb_async_windows != val) {
mutex_lock(&lfsck->li_mutex);
}
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
}
EXPORT_SYMBOL(lfsck_dump);
+static int lfsck_stop_all(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct lfsck_stop *stop)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_request *lr = &info->lti_lr;
+ struct lfsck_async_interpret_args *laia = &info->lti_laia;
+ struct ptlrpc_request_set *set;
+ struct lfsck_tgt_descs *ltds = &lfsck->li_mdt_descs;
+ struct lfsck_tgt_desc *ltd;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ __u32 idx;
+ int rc = 0;
+ int rc1 = 0;
+ ENTRY;
+
+ LASSERT(stop->ls_flags & LPF_BROADCAST);
+
+ set = ptlrpc_prep_set();
+ if (unlikely(set == NULL)) {
+ CERROR("%s: cannot allocate memory for stop LFSCK on "
+ "all targets\n", lfsck_lfsck2name(lfsck));
+
+ RETURN(-ENOMEM);
+ }
+
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_event = LE_STOP;
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_status = stop->ls_status;
+ lr->lr_version = bk->lb_version;
+ lr->lr_active = LFSCK_TYPES_ALL;
+ lr->lr_param = stop->ls_flags;
+
+ laia->laia_com = NULL;
+ laia->laia_ltds = ltds;
+ laia->laia_lr = lr;
+ laia->laia_result = 0;
+ laia->laia_shared = 1;
+
+ down_read(<ds->ltd_rw_sem);
+ cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
+ ltd = lfsck_tgt_get(ltds, idx);
+ LASSERT(ltd != NULL);
+
+ laia->laia_ltd = ltd;
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_async_interpret, laia,
+ LFSCK_NOTIFY);
+ if (rc != 0) {
+ lfsck_interpret(env, lfsck, NULL, laia, rc);
+ lfsck_tgt_put(ltd);
+ CWARN("%s: cannot notify MDT %x for LFSCK stop: "
+ "rc = %d\n", lfsck_lfsck2name(lfsck), idx, rc);
+ rc1 = rc;
+ }
+ }
+ up_read(<ds->ltd_rw_sem);
+
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+
+ if (rc == 0)
+ rc = laia->laia_result;
+
+ if (rc == -EALREADY)
+ rc = 0;
+
+ if (rc != 0)
+ CWARN("%s: fail to stop LFSCK on some MDTs: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc);
+
+ RETURN(rc != 0 ? rc : rc1);
+}
+
+static int lfsck_start_all(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct lfsck_start *start)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_request *lr = &info->lti_lr;
+ struct lfsck_async_interpret_args *laia = &info->lti_laia;
+ struct ptlrpc_request_set *set;
+ struct lfsck_tgt_descs *ltds = &lfsck->li_mdt_descs;
+ struct lfsck_tgt_desc *ltd;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ __u32 idx;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(start->ls_flags & LPF_BROADCAST);
+
+ set = ptlrpc_prep_set();
+ if (unlikely(set == NULL)) {
+ if (bk->lb_param & LPF_FAILOUT) {
+ CERROR("%s: cannot allocate memory for start LFSCK on "
+ "all targets, failout.\n",
+ lfsck_lfsck2name(lfsck));
+
+ RETURN(-ENOMEM);
+ } else {
+ CWARN("%s: cannot allocate memory for start LFSCK on "
+ "all targets, partly scan.\n",
+ lfsck_lfsck2name(lfsck));
+
+ RETURN(0);
+ }
+ }
+
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_event = LE_START;
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_speed = bk->lb_speed_limit;
+ lr->lr_version = bk->lb_version;
+ lr->lr_active = start->ls_active;
+ lr->lr_param = start->ls_flags;
+ lr->lr_async_windows = bk->lb_async_windows;
+ lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
+ LSV_ASYNC_WINDOWS;
+
+ laia->laia_com = NULL;
+ laia->laia_ltds = ltds;
+ laia->laia_lr = lr;
+ laia->laia_result = 0;
+ laia->laia_shared = 1;
+
+ down_read(<ds->ltd_rw_sem);
+ cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
+ ltd = lfsck_tgt_get(ltds, idx);
+ LASSERT(ltd != NULL);
+
+ laia->laia_ltd = ltd;
+ ltd->ltd_layout_done = 0;
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_async_interpret, laia,
+ LFSCK_NOTIFY);
+ if (rc != 0) {
+ lfsck_interpret(env, lfsck, NULL, laia, rc);
+ lfsck_tgt_put(ltd);
+ if (bk->lb_param & LPF_FAILOUT) {
+ CERROR("%s: cannot notify MDT %x for LFSCK "
+ "start, failout: rc = %d\n",
+ lfsck_lfsck2name(lfsck), idx, rc);
+ break;
+ } else {
+ CWARN("%s: cannot notify MDT %x for LFSCK "
+ "start, partly scan: rc = %d\n",
+ lfsck_lfsck2name(lfsck), idx, rc);
+ rc = 0;
+ }
+ }
+ }
+ up_read(<ds->ltd_rw_sem);
+
+ if (rc != 0) {
+ ptlrpc_set_destroy(set);
+
+ RETURN(rc);
+ }
+
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+
+ if (rc == 0)
+ rc = laia->laia_result;
+
+ if (rc != 0) {
+ if (bk->lb_param & LPF_FAILOUT) {
+ struct lfsck_stop *stop = &info->lti_stop;
+
+ CERROR("%s: cannot start LFSCK on some MDTs, "
+ "stop all: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc);
+ if (rc != -EALREADY) {
+ stop->ls_status = LS_FAILED;
+ stop->ls_flags = LPF_ALL_TGT | LPF_BROADCAST;
+ lfsck_stop_all(env, lfsck, stop);
+ }
+ } else {
+ CWARN("%s: cannot start LFSCK on some MDTs, "
+ "partly scan: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc);
+ rc = 0;
+ }
+ }
+
+ RETURN(rc);
+}
+
int lfsck_start(const struct lu_env *env, struct dt_device *key,
struct lfsck_start_param *lsp)
{
struct lfsck_component *com;
struct l_wait_info lwi = { 0 };
struct lfsck_thread_args *lta;
- bool dirty = false;
long rc = 0;
__u16 valid = 0;
__u16 flags = 0;
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
+
+ /* System is not ready, try again later. */
+ if (unlikely(lfsck->li_namespace == NULL))
+ GOTO(put, rc = -EAGAIN);
/* start == NULL means auto trigger paused LFSCK. */
if ((start == NULL) &&
if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
rc = -EALREADY;
while (start->ls_active != 0) {
- if (type & start->ls_active) {
+ if (!(type & start->ls_active)) {
+ type <<= 1;
+ continue;
+ }
+
+ com = __lfsck_component_find(lfsck, type,
+ &lfsck->li_list_scan);
+ if (com == NULL)
com = __lfsck_component_find(lfsck, type,
- &lfsck->li_list_scan);
- if (com == NULL)
- com = __lfsck_component_find(lfsck,
- type,
&lfsck->li_list_double_scan);
- if (com == NULL) {
- rc = -EBUSY;
+ if (com == NULL) {
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if (com->lc_ops->lfsck_join != NULL) {
+ rc = com->lc_ops->lfsck_join( env, com, lsp);
+ if (rc != 0 && rc != -EALREADY)
break;
- } else {
- start->ls_active &= ~type;
- }
}
+ start->ls_active &= ~type;
type <<= 1;
}
spin_unlock(&lfsck->li_lock);
}
spin_unlock(&lfsck->li_lock);
- lfsck->li_namespace = lsp->lsp_namespace;
lfsck->li_status = 0;
lfsck->li_oit_over = 0;
+ lfsck->li_start_unplug = 0;
lfsck->li_drop_dryrun = 0;
lfsck->li_new_scanned = 0;
if (start == NULL)
goto trigger;
- start->ls_version = bk->lb_version;
- if (start->ls_valid & LSV_SPEED_LIMIT) {
- __lfsck_set_speed(lfsck, start->ls_speed_limit);
- dirty = true;
- }
+ if (start->ls_flags & LPF_BROADCAST && !lfsck->li_master) {
+ CERROR("%s: only allow to specify '-A | -o' via MDS\n",
+ lfsck_lfsck2name(lfsck));
- if (start->ls_valid & LSV_ASYNC_WINDOWS &&
- bk->lb_async_windows != start->ls_async_windows) {
- bk->lb_async_windows = start->ls_async_windows;
- dirty = true;
+ GOTO(out, rc = -EPERM);
}
- if (start->ls_valid & LSV_ERROR_HANDLE) {
- valid |= DOIV_ERROR_HANDLE;
- if (start->ls_flags & LPF_FAILOUT)
- flags |= DOIF_FAILOUT;
-
- if ((start->ls_flags & LPF_FAILOUT) &&
- !(bk->lb_param & LPF_FAILOUT)) {
- bk->lb_param |= LPF_FAILOUT;
- dirty = true;
- } else if (!(start->ls_flags & LPF_FAILOUT) &&
- (bk->lb_param & LPF_FAILOUT)) {
- bk->lb_param &= ~LPF_FAILOUT;
- dirty = true;
- }
- }
-
- if (start->ls_valid & LSV_DRYRUN) {
- valid |= DOIV_DRYRUN;
- if (start->ls_flags & LPF_DRYRUN)
- flags |= DOIF_DRYRUN;
-
- if ((start->ls_flags & LPF_DRYRUN) &&
- !(bk->lb_param & LPF_DRYRUN)) {
- bk->lb_param |= LPF_DRYRUN;
- dirty = true;
- } else if (!(start->ls_flags & LPF_DRYRUN) &&
- (bk->lb_param & LPF_DRYRUN)) {
- bk->lb_param &= ~LPF_DRYRUN;
- lfsck->li_drop_dryrun = 1;
- dirty = true;
- }
- }
-
- if (dirty) {
- rc = lfsck_bookmark_store(env, lfsck);
- if (rc != 0)
- GOTO(out, rc);
- }
-
- if (start->ls_flags & LPF_RESET)
- flags |= DOIF_RESET;
+ start->ls_version = bk->lb_version;
if (start->ls_active != 0) {
struct lfsck_component *next;
GOTO(out, rc = -ENOTSUPP);
}
- cfs_list_for_each_entry_safe(com, next,
- &lfsck->li_list_scan, lc_link) {
+ list_for_each_entry_safe(com, next,
+ &lfsck->li_list_scan, lc_link) {
if (!(com->lc_type & start->ls_active)) {
rc = com->lc_ops->lfsck_post(env, com, 0,
false);
/* The component status will be updated
* when its prep() is called later by
* the LFSCK main engine. */
- cfs_list_del_init(&com->lc_link);
- cfs_list_add_tail(&com->lc_link,
- &lfsck->li_list_scan);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link,
+ &lfsck->li_list_scan);
}
start->ls_active &= ~type;
}
}
}
- cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
+ if (list_empty(&lfsck->li_list_scan)) {
+ /* The speed limit will be used to control both the LFSCK and
+ * low layer scrub (if applied), need to be handled firstly. */
+ if (start->ls_valid & LSV_SPEED_LIMIT) {
+ if (__lfsck_set_speed(lfsck, start->ls_speed_limit)) {
+ rc = lfsck_bookmark_store(env, lfsck);
+ if (rc != 0)
+ GOTO(out, rc);
+ }
+ }
+
+ goto trigger;
+ }
+
+ if (start->ls_flags & LPF_RESET)
+ flags |= DOIF_RESET;
+
+ rc = lfsck_set_param(env, lfsck, start, !!(flags & DOIF_RESET));
+ if (rc != 0)
+ GOTO(out, rc);
+
+ list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
start->ls_active |= com->lc_type;
if (flags & DOIF_RESET) {
rc = com->lc_ops->lfsck_reset(env, com, false);
trigger:
lfsck->li_args_dir = LUDA_64BITHASH | LUDA_VERIFY;
- if (bk->lb_param & LPF_DRYRUN) {
+ if (bk->lb_param & LPF_DRYRUN)
lfsck->li_args_dir |= LUDA_VERIFY_DRYRUN;
- valid |= DOIV_DRYRUN;
- flags |= DOIF_DRYRUN;
- }
- if (bk->lb_param & LPF_FAILOUT) {
+ if (start != NULL && start->ls_valid & LSV_ERROR_HANDLE) {
valid |= DOIV_ERROR_HANDLE;
- flags |= DOIF_FAILOUT;
+ if (start->ls_flags & LPF_FAILOUT)
+ flags |= DOIF_FAILOUT;
}
- if (!cfs_list_empty(&lfsck->li_list_scan))
+ if (start != NULL && start->ls_valid & LSV_DRYRUN) {
+ valid |= DOIV_DRYRUN;
+ if (start->ls_flags & LPF_DRYRUN)
+ flags |= DOIF_DRYRUN;
+ }
+
+ if (!list_empty(&lfsck->li_list_scan))
flags |= DOIF_OUTUSED;
lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
if (IS_ERR(lta))
GOTO(out, rc = PTR_ERR(lta));
+ __lfsck_set_speed(lfsck, bk->lb_speed_limit);
rc = PTR_ERR(kthread_run(lfsck_master_engine, lta, "lfsck"));
if (IS_ERR_VALUE(rc)) {
CERROR("%s: cannot start LFSCK thread: rc = %ld\n",
lfsck_lfsck2name(lfsck), rc);
lfsck_thread_args_fini(lta);
+
+ GOTO(out, rc);
+ }
+
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_running(thread) ||
+ thread_is_stopped(thread),
+ &lwi);
+ if (start == NULL || !(start->ls_flags & LPF_BROADCAST)) {
+ lfsck->li_start_unplug = 1;
+ wake_up_all(&thread->t_ctl_waitq);
+
+ GOTO(out, rc = 0);
+ }
+
+ /* release lfsck::li_mutex to avoid deadlock. */
+ mutex_unlock(&lfsck->li_mutex);
+ rc = lfsck_start_all(env, lfsck, start);
+ if (rc != 0) {
+ spin_lock(&lfsck->li_lock);
+ if (thread_is_stopped(thread)) {
+ spin_unlock(&lfsck->li_lock);
+ } else {
+ lfsck->li_status = LS_FAILED;
+ lfsck->li_flags = 0;
+ thread_set_flags(thread, SVC_STOPPING);
+ spin_unlock(&lfsck->li_lock);
+
+ lfsck->li_start_unplug = 1;
+ wake_up_all(&thread->t_ctl_waitq);
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopped(thread),
+ &lwi);
+ }
} else {
- rc = 0;
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) ||
- thread_is_stopped(thread),
- &lwi);
+ lfsck->li_start_unplug = 1;
+ wake_up_all(&thread->t_ctl_waitq);
}
- GOTO(out, rc);
+ GOTO(put, rc);
out:
mutex_unlock(&lfsck->li_mutex);
+
put:
lfsck_instance_put(env, lfsck);
- return (rc < 0 ? rc : 0);
+
+ return rc < 0 ? rc : 0;
}
EXPORT_SYMBOL(lfsck_start);
struct ptlrpc_thread *thread;
struct l_wait_info lwi = { 0 };
int rc = 0;
+ int rc1 = 0;
ENTRY;
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
thread = &lfsck->li_thread;
+ /* release lfsck::li_mutex to avoid deadlock. */
+ if (stop != NULL && stop->ls_flags & LPF_BROADCAST) {
+ if (!lfsck->li_master) {
+ CERROR("%s: only allow to specify '-A' via MDS\n",
+ lfsck_lfsck2name(lfsck));
+
+ GOTO(out, rc = -EPERM);
+ }
+
+ rc1 = lfsck_stop_all(env, lfsck, stop);
+ }
+
mutex_lock(&lfsck->li_mutex);
spin_lock(&lfsck->li_lock);
if (thread_is_init(thread) || thread_is_stopped(thread)) {
GOTO(out, rc = -EALREADY);
}
- if (stop != NULL)
+ if (stop != NULL) {
lfsck->li_status = stop->ls_status;
- else
+ lfsck->li_flags = stop->ls_flags;
+ } else {
lfsck->li_status = LS_STOPPED;
+ lfsck->li_flags = 0;
+ }
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&lfsck->li_lock);
mutex_unlock(&lfsck->li_mutex);
lfsck_instance_put(env, lfsck);
- return rc;
+ return rc != 0 ? rc : rc1;
}
EXPORT_SYMBOL(lfsck_stop);
int lfsck_in_notify(const struct lu_env *env, struct dt_device *key,
struct lfsck_request *lr)
{
- struct lfsck_instance *lfsck;
- struct lfsck_component *com;
- int rc;
+ int rc = -EOPNOTSUPP;
ENTRY;
switch (lr->lr_event) {
- case LE_STOP:
- case LE_PHASE1_DONE:
- case LE_PHASE2_DONE:
+ case LE_START: {
+ struct lfsck_start *start = &lfsck_env_info(env)->lti_start;
+ struct lfsck_start_param lsp;
+
+ memset(start, 0, sizeof(*start));
+ start->ls_valid = lr->lr_valid;
+ start->ls_speed_limit = lr->lr_speed;
+ start->ls_version = lr->lr_version;
+ start->ls_active = lr->lr_active;
+ start->ls_flags = lr->lr_param & ~LPF_BROADCAST;
+ start->ls_async_windows = lr->lr_async_windows;
+
+ lsp.lsp_start = start;
+ lsp.lsp_index = lr->lr_index;
+ lsp.lsp_index_valid = 1;
+ rc = lfsck_start(env, key, &lsp);
break;
- default:
- RETURN(-EOPNOTSUPP);
}
+ case LE_STOP: {
+ struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
- lfsck = lfsck_instance_find(key, true, false);
- if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
-
- com = lfsck_component_find(lfsck, lr->lr_active);
- if (likely(com != NULL)) {
- rc = com->lc_ops->lfsck_in_notify(env, com, lr);
- lfsck_component_put(env, com);
- } else {
- rc = -ENOTSUPP;
+ memset(stop, 0, sizeof(*stop));
+ stop->ls_status = lr->lr_status;
+ stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
+ rc = lfsck_stop(env, key, stop);
+ break;
}
+ case LE_PHASE1_DONE:
+ case LE_PHASE2_DONE:
+ case LE_FID_ACCESSED:
+ case LE_PEER_EXIT:
+ case LE_CONDITIONAL_DESTROY:
+ case LE_PAIRS_VERIFY: {
+ struct lfsck_instance *lfsck;
+ struct lfsck_component *com;
+
+ lfsck = lfsck_instance_find(key, true, false);
+ if (unlikely(lfsck == NULL))
+ RETURN(-ENXIO);
+
+ com = lfsck_component_find(lfsck, lr->lr_active);
+ if (likely(com != NULL)) {
+ rc = com->lc_ops->lfsck_in_notify(env, com, lr);
+ lfsck_component_put(env, com);
+ }
- lfsck_instance_put(env, lfsck);
+ lfsck_instance_put(env, lfsck);
+ break;
+ }
+ default:
+ break;
+ }
RETURN(rc);
}
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
com = lfsck_component_find(lfsck, lr->lr_active);
if (likely(com != NULL)) {
}
EXPORT_SYMBOL(lfsck_query);
+int lfsck_register_namespace(const struct lu_env *env, struct dt_device *key,
+ struct ldlm_namespace *ns)
+{
+ struct lfsck_instance *lfsck;
+ int rc = -ENXIO;
+
+ lfsck = lfsck_instance_find(key, true, false);
+ if (likely(lfsck != NULL)) {
+ lfsck->li_namespace = ns;
+ lfsck_instance_put(env, lfsck);
+ rc = 0;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lfsck_register_namespace);
+
int lfsck_register(const struct lu_env *env, struct dt_device *key,
struct dt_device *next, struct obd_device *obd,
lfsck_out_notify notify, void *notify_data, bool master)
GOTO(out, rc);
if (master) {
+ rc = lfsck_fid_init(lfsck);
+ if (rc < 0)
+ GOTO(out, rc);
+
rc = lfsck_namespace_setup(env, lfsck);
if (rc < 0)
GOTO(out, rc);
{
struct lfsck_instance *lfsck;
struct lfsck_tgt_descs *ltds;
- struct lfsck_tgt_desc *ltd;
+ struct lfsck_tgt_desc *ltd = NULL;
struct list_head *head;
- bool found = false;
- bool stop = false;
if (for_ost)
head = &lfsck_ost_orphan_list;
ltds = &lfsck->li_mdt_descs;
down_write(<ds->ltd_rw_sem);
-
LASSERT(ltds->ltd_tgts_bitmap != NULL);
if (unlikely(index >= ltds->ltd_tgts_bitmap->size))
if (unlikely(ltd == NULL))
goto unlock;
- found = true;
- spin_lock(<ds->ltd_lock);
- ltd->ltd_dead = 1;
- if (!list_empty(<d->ltd_layout_list)) {
- list_del_init(<d->ltd_layout_list);
- stop = true;
- } else {
- LASSERT(list_empty(<d->ltd_layout_phase_list));
- }
- spin_unlock(<ds->ltd_lock);
-
- if (stop && lfsck->li_master)
- lfsck_stop_notify(env, lfsck, ltds, ltd);
-
LASSERT(ltds->ltd_tgtnr > 0);
ltds->ltd_tgtnr--;
cfs_bitmap_clear(ltds->ltd_tgts_bitmap, index);
LTD_TGT(ltds, index) = NULL;
- lfsck_tgt_put(ltd);
unlock:
- if (!found) {
+ if (ltd == NULL) {
if (for_ost)
head = &lfsck->li_ost_descs.ltd_orphan;
else
list_for_each_entry(ltd, head, ltd_orphan_list) {
if (ltd->ltd_tgt == tgt) {
list_del_init(<d->ltd_orphan_list);
- lfsck_tgt_put(ltd);
break;
}
}
}
up_write(<ds->ltd_rw_sem);
+ if (ltd != NULL) {
+ spin_lock(<ds->ltd_lock);
+ ltd->ltd_dead = 1;
+ spin_unlock(<ds->ltd_lock);
+ lfsck_stop_notify(env, lfsck, ltds, ltd, LT_LAYOUT);
+ lfsck_tgt_put(ltd);
+ }
+
lfsck_instance_put(env, lfsck);
}
EXPORT_SYMBOL(lfsck_del_target);
lfsck_key_init_generic(&lfsck_thread_key, NULL);
rc = lu_context_key_register(&lfsck_thread_key);
if (rc == 0) {
- tgt_register_lfsck_start(lfsck_start);
tgt_register_lfsck_in_notify(lfsck_in_notify);
tgt_register_lfsck_query(lfsck_query);
}