struct lfsck_thread_info *info = data;
lu_buf_free(&info->lti_linkea_buf);
+ lu_buf_free(&info->lti_big_buf);
OBD_FREE_PTR(info);
}
LU_KEY_INIT_GENERIC(lfsck);
static CFS_LIST_HEAD(lfsck_instance_list);
+static struct list_head lfsck_ost_orphan_list;
+static struct list_head lfsck_mdt_orphan_list;
static DEFINE_SPINLOCK(lfsck_instance_lock);
static const char *lfsck_status_names[] = {
[LS_STOPPED] = "stopped",
[LS_PAUSED] = "paused",
[LS_CRASHED] = "crashed",
- [LS_PARTIAL] = "partial"
+ [LS_PARTIAL] = "partial",
+ [LS_CO_FAILED] = "co-failed",
+ [LS_CO_STOPPED] = "co-stopped",
+ [LS_CO_PAUSED] = "co-paused"
};
const char *lfsck_flags_names[] = {
NULL,
"failout",
"dryrun",
+ "all_targets",
NULL
};
return lfsck_status_names[status];
}
+static int lfsck_tgt_descs_init(struct lfsck_tgt_descs *ltds)
+{
+ spin_lock_init(<ds->ltd_lock);
+ init_rwsem(<ds->ltd_rw_sem);
+ INIT_LIST_HEAD(<ds->ltd_orphan);
+ ltds->ltd_tgts_bitmap = CFS_ALLOCATE_BITMAP(BITS_PER_LONG);
+ if (ltds->ltd_tgts_bitmap == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void lfsck_tgt_descs_fini(struct lfsck_tgt_descs *ltds)
+{
+ struct lfsck_tgt_desc *ltd;
+ struct lfsck_tgt_desc *next;
+ int idx;
+
+ down_write(<ds->ltd_rw_sem);
+
+ list_for_each_entry_safe(ltd, next, <ds->ltd_orphan,
+ ltd_orphan_list) {
+ list_del_init(<d->ltd_orphan_list);
+ lfsck_tgt_put(ltd);
+ }
+
+ if (unlikely(ltds->ltd_tgts_bitmap == NULL)) {
+ up_write(<ds->ltd_rw_sem);
+
+ return;
+ }
+
+ cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
+ ltd = LTD_TGT(ltds, idx);
+ if (likely(ltd != NULL)) {
+ LASSERT(list_empty(<d->ltd_layout_list));
+ LASSERT(list_empty(<d->ltd_layout_phase_list));
+
+ ltds->ltd_tgtnr--;
+ cfs_bitmap_clear(ltds->ltd_tgts_bitmap, idx);
+ LTD_TGT(ltds, idx) = NULL;
+ lfsck_tgt_put(ltd);
+ }
+ }
+
+ LASSERTF(ltds->ltd_tgtnr == 0, "tgt count unmatched: %d\n",
+ ltds->ltd_tgtnr);
+
+ for (idx = 0; idx < TGT_PTRS; idx++) {
+ if (ltds->ltd_tgts_idx[idx] != NULL) {
+ OBD_FREE_PTR(ltds->ltd_tgts_idx[idx]);
+ ltds->ltd_tgts_idx[idx] = NULL;
+ }
+ }
+
+ CFS_FREE_BITMAP(ltds->ltd_tgts_bitmap);
+ ltds->ltd_tgts_bitmap = NULL;
+ up_write(<ds->ltd_rw_sem);
+}
+
+static int __lfsck_add_target(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct lfsck_tgt_desc *ltd,
+ bool for_ost, bool locked)
+{
+ struct lfsck_tgt_descs *ltds;
+ __u32 index = ltd->ltd_index;
+ int rc = 0;
+ ENTRY;
+
+ if (for_ost)
+ ltds = &lfsck->li_ost_descs;
+ else
+ ltds = &lfsck->li_mdt_descs;
+
+ if (!locked)
+ down_write(<ds->ltd_rw_sem);
+
+ LASSERT(ltds->ltd_tgts_bitmap != NULL);
+
+ if (index >= ltds->ltd_tgts_bitmap->size) {
+ __u32 newsize = max((__u32)ltds->ltd_tgts_bitmap->size,
+ (__u32)BITS_PER_LONG);
+ cfs_bitmap_t *old_bitmap = ltds->ltd_tgts_bitmap;
+ cfs_bitmap_t *new_bitmap;
+
+ while (newsize < index + 1)
+ newsize <<= 1;
+
+ new_bitmap = CFS_ALLOCATE_BITMAP(newsize);
+ if (new_bitmap == NULL)
+ GOTO(unlock, rc = -ENOMEM);
+
+ if (ltds->ltd_tgtnr > 0)
+ cfs_bitmap_copy(new_bitmap, old_bitmap);
+ ltds->ltd_tgts_bitmap = new_bitmap;
+ CFS_FREE_BITMAP(old_bitmap);
+ }
+
+ if (cfs_bitmap_check(ltds->ltd_tgts_bitmap, index)) {
+ CERROR("%s: the device %s (%u) is registered already\n",
+ lfsck_lfsck2name(lfsck),
+ ltd->ltd_tgt->dd_lu_dev.ld_obd->obd_name, index);
+ GOTO(unlock, rc = -EEXIST);
+ }
+
+ if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL) {
+ OBD_ALLOC_PTR(ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK]);
+ if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL)
+ GOTO(unlock, rc = -ENOMEM);
+ }
+
+ LTD_TGT(ltds, index) = ltd;
+ cfs_bitmap_set(ltds->ltd_tgts_bitmap, index);
+ ltds->ltd_tgtnr++;
+
+ GOTO(unlock, rc = 0);
+
+unlock:
+ if (!locked)
+ up_write(<ds->ltd_rw_sem);
+
+ return rc;
+}
+
+static int lfsck_add_target_from_orphan(const struct lu_env *env,
+ struct lfsck_instance *lfsck)
+{
+ struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
+ struct lfsck_tgt_desc *ltd;
+ struct lfsck_tgt_desc *next;
+ struct list_head *head = &lfsck_ost_orphan_list;
+ int rc;
+ bool for_ost = true;
+
+again:
+ spin_lock(&lfsck_instance_lock);
+ list_for_each_entry_safe(ltd, next, head, ltd_orphan_list) {
+ if (ltd->ltd_key == lfsck->li_bottom) {
+ list_del_init(<d->ltd_orphan_list);
+ list_add_tail(<d->ltd_orphan_list,
+ <ds->ltd_orphan);
+ }
+ }
+ spin_unlock(&lfsck_instance_lock);
+
+ down_write(<ds->ltd_rw_sem);
+ while (!list_empty(<ds->ltd_orphan)) {
+ ltd = list_entry(ltds->ltd_orphan.next,
+ struct lfsck_tgt_desc,
+ ltd_orphan_list);
+ list_del_init(<d->ltd_orphan_list);
+ rc = __lfsck_add_target(env, lfsck, ltd, for_ost, true);
+ /* Do not hold the semaphore for too long time. */
+ up_write(<ds->ltd_rw_sem);
+ if (rc != 0)
+ return rc;
+
+ down_write(<ds->ltd_rw_sem);
+ }
+ up_write(<ds->ltd_rw_sem);
+
+ if (for_ost) {
+ ltds = &lfsck->li_mdt_descs;
+ head = &lfsck_mdt_orphan_list;
+ for_ost = false;
+ goto again;
+ }
+
+ return 0;
+}
+
static inline struct lfsck_component *
__lfsck_component_find(struct lfsck_instance *lfsck, __u16 type, cfs_list_t *list)
{
lfsck_component_cleanup(env, com);
}
+ lfsck_tgt_descs_fini(&lfsck->li_ost_descs);
+ lfsck_tgt_descs_fini(&lfsck->li_mdt_descs);
+
if (lfsck->li_bookmark_obj != NULL) {
lu_object_put_nocache(env, &lfsck->li_bookmark_obj->do_lu);
lfsck->li_bookmark_obj = NULL;
OBD_FREE_PTR(lfsck);
}
-static inline struct lfsck_instance *lfsck_instance_find(struct dt_device *key,
- bool ref, bool unlink)
+static inline struct lfsck_instance *
+__lfsck_instance_find(struct dt_device *key, bool ref, bool unlink)
{
struct lfsck_instance *lfsck;
- spin_lock(&lfsck_instance_lock);
cfs_list_for_each_entry(lfsck, &lfsck_instance_list, li_link) {
if (lfsck->li_bottom == key) {
if (ref)
lfsck_instance_get(lfsck);
if (unlink)
list_del_init(&lfsck->li_link);
- spin_unlock(&lfsck_instance_lock);
+
return lfsck;
}
}
- spin_unlock(&lfsck_instance_lock);
+
return NULL;
}
+static inline struct lfsck_instance *lfsck_instance_find(struct dt_device *key,
+ bool ref, bool unlink)
+{
+ struct lfsck_instance *lfsck;
+
+ spin_lock(&lfsck_instance_lock);
+ lfsck = __lfsck_instance_find(key, ref, unlink);
+ spin_unlock(&lfsck_instance_lock);
+
+ return lfsck;
+}
+
static inline int lfsck_instance_add(struct lfsck_instance *lfsck)
{
struct lfsck_instance *tmp;
}
struct lfsck_thread_args *lfsck_thread_args_init(struct lfsck_instance *lfsck,
- struct lfsck_component *com)
+ struct lfsck_component *com,
+ struct lfsck_start_param *lsp)
{
struct lfsck_thread_args *lta;
int rc;
if (com != NULL)
lta->lta_com = lfsck_component_get(com);
+ lta->lta_lsp = lsp;
+
return lta;
}
return rc1 != 0 ? rc1 : rc;
}
-int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck)
+int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
+ struct lfsck_start_param *lsp)
{
struct dt_object *obj = NULL;
struct lfsck_component *com;
if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
com->lc_journal = 0;
- rc = com->lc_ops->lfsck_prep(env, com);
+ rc = com->lc_ops->lfsck_prep(env, com, lsp);
if (rc != 0)
GOTO(out, rc);
{
struct lfsck_component *com;
struct lfsck_component *next;
- int rc;
+ struct l_wait_info lwi = { 0 };
+ int rc = 0;
+ int rc1 = 0;
cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
lc_link) {
rc = com->lc_ops->lfsck_double_scan(env, com);
if (rc != 0)
- return rc;
+ rc1 = rc;
}
+
+ l_wait_event(lfsck->li_thread.t_ctl_waitq,
+ atomic_read(&lfsck->li_double_scan_count) == 0,
+ &lwi);
+
+ return rc1 != 0 ? rc1 : rc;
+}
+
+int lfsck_stop_notify(const struct lu_env *env, struct lfsck_instance *lfsck,
+ struct lfsck_tgt_descs *ltds, struct lfsck_tgt_desc *ltd)
+{
+ struct ptlrpc_request_set *set;
+ struct lfsck_component *com;
+ int cnt = 0;
+ int rc = 0;
+ int rc1 = 0;
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ return -ENOMEM;
+
+ list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
+ if (com->lc_ops->lfsck_stop_notify != NULL) {
+ rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
+ ltd, set);
+ if (rc != 0)
+ rc1 = rc;
+ else
+ cnt++;
+ }
+ }
+
+ list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
+ if (com->lc_ops->lfsck_stop_notify != NULL) {
+ rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
+ ltd, set);
+ if (rc != 0)
+ rc1 = rc;
+ else
+ cnt++;
+ }
+ }
+
+ if (cnt > 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+
+ return rc1 != 0 ? rc1 : rc;
+}
+
+void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
+{
+ struct lfsck_component *com;
+ struct lfsck_component *next;
+
+ list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
+ lc_link) {
+ if (com->lc_ops->lfsck_quit != NULL)
+ com->lc_ops->lfsck_quit(env, com);
+ }
+
+ list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
+ lc_link) {
+ if (com->lc_ops->lfsck_quit != NULL)
+ com->lc_ops->lfsck_quit(env, com);
+ }
+}
+
+int lfsck_async_request(const struct lu_env *env, struct obd_export *exp,
+ struct lfsck_request *lr,
+ struct ptlrpc_request_set *set,
+ ptlrpc_interpterer_t interpreter,
+ void *args, int request)
+{
+ struct lfsck_async_interpret_args *laia;
+ struct ptlrpc_request *req;
+ struct lfsck_request *tmp;
+ struct req_format *format;
+ int rc;
+
+ if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
+ return -EOPNOTSUPP;
+
+ switch (request) {
+ case LFSCK_NOTIFY:
+ format = &RQF_LFSCK_NOTIFY;
+ break;
+ case LFSCK_QUERY:
+ format = &RQF_LFSCK_QUERY;
+ break;
+ default:
+ CERROR("%s: unknown async request: opc = %d\n",
+ exp->exp_obd->obd_name, request);
+ return -EINVAL;
+ }
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), format);
+ if (req == NULL)
+ return -ENOMEM;
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, request);
+ if (rc != 0) {
+ ptlrpc_request_free(req);
+
+ return rc;
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
+ *tmp = *lr;
+ ptlrpc_request_set_replen(req);
+
+ laia = ptlrpc_req_async_args(req);
+ *laia = *(struct lfsck_async_interpret_args *)args;
+ lfsck_component_get(laia->laia_com);
+ req->rq_interpret_reply = interpreter;
+ ptlrpc_set_add_req(set, req);
+
return 0;
}
}
EXPORT_SYMBOL(lfsck_set_speed);
+int lfsck_get_windows(struct dt_device *key, void *buf, int len)
+{
+ struct lu_env env;
+ struct lfsck_instance *lfsck;
+ int rc;
+ ENTRY;
+
+ rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
+ if (rc != 0)
+ RETURN(rc);
+
+ lfsck = lfsck_instance_find(key, true, false);
+ if (likely(lfsck != NULL)) {
+ rc = snprintf(buf, len, "%u\n",
+ lfsck->li_bookmark_ram.lb_async_windows);
+ lfsck_instance_put(&env, lfsck);
+ } else {
+ rc = -ENODEV;
+ }
+
+ lu_env_fini(&env);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(lfsck_get_windows);
+
+int lfsck_set_windows(struct dt_device *key, int val)
+{
+ struct lu_env env;
+ struct lfsck_instance *lfsck;
+ int rc;
+ ENTRY;
+
+ rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
+ if (rc != 0)
+ RETURN(rc);
+
+ lfsck = lfsck_instance_find(key, true, false);
+ if (likely(lfsck != NULL)) {
+ if (val > LFSCK_ASYNC_WIN_MAX) {
+ CERROR("%s: Too large async windows size, which "
+ "may cause memory issues. The valid range "
+ "is [0 - %u]. If you do not want to restrict "
+ "the windows size for async requests pipeline, "
+ "just set it as 0.\n",
+ lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
+ rc = -EINVAL;
+ } else if (lfsck->li_bookmark_ram.lb_async_windows != val) {
+ mutex_lock(&lfsck->li_mutex);
+ lfsck->li_bookmark_ram.lb_async_windows = val;
+ rc = lfsck_bookmark_store(&env, lfsck);
+ mutex_unlock(&lfsck->li_mutex);
+ }
+ lfsck_instance_put(&env, lfsck);
+ } else {
+ rc = -ENODEV;
+ }
+
+ lu_env_fini(&env);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(lfsck_set_windows);
+
int lfsck_dump(struct dt_device *key, void *buf, int len, enum lfsck_type type)
{
struct lu_env env;
if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
rc = -EALREADY;
while (start->ls_active != 0) {
- if (type & start->ls_active) {
+ if (!(type & start->ls_active)) {
+ type <<= 1;
+ continue;
+ }
+
+ com = __lfsck_component_find(lfsck, type,
+ &lfsck->li_list_scan);
+ if (com == NULL)
com = __lfsck_component_find(lfsck, type,
- &lfsck->li_list_scan);
- if (com == NULL)
- com = __lfsck_component_find(lfsck,
- type,
&lfsck->li_list_double_scan);
- if (com == NULL) {
- rc = -EBUSY;
+ if (com == NULL) {
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if (com->lc_ops->lfsck_join != NULL) {
+ rc = com->lc_ops->lfsck_join( env, com, lsp);
+ if (rc != 0 && rc != -EALREADY)
break;
- } else {
- start->ls_active &= ~type;
- }
}
+ start->ls_active &= ~type;
type <<= 1;
}
spin_unlock(&lfsck->li_lock);
spin_unlock(&lfsck->li_lock);
lfsck->li_namespace = lsp->lsp_namespace;
- lfsck->li_paused = 0;
+ lfsck->li_status = 0;
lfsck->li_oit_over = 0;
lfsck->li_drop_dryrun = 0;
lfsck->li_new_scanned = 0;
dirty = true;
}
+ if (start->ls_valid & LSV_ASYNC_WINDOWS &&
+ bk->lb_async_windows != start->ls_async_windows) {
+ bk->lb_async_windows = start->ls_async_windows;
+ dirty = true;
+ }
+
if (start->ls_valid & LSV_ERROR_HANDLE) {
valid |= DOIV_ERROR_HANDLE;
if (start->ls_flags & LPF_FAILOUT)
}
}
+ if (bk->lb_param & LPF_ALL_MDT &&
+ !(start->ls_flags & LPF_ALL_MDT)) {
+ bk->lb_param &= ~LPF_ALL_MDT;
+ dirty = true;
+ } else if (!(bk->lb_param & LPF_ALL_MDT) &&
+ start->ls_flags & LPF_ALL_MDT) {
+ bk->lb_param |= LPF_ALL_MDT;
+ dirty = true;
+ }
+
if (dirty) {
rc = lfsck_bookmark_store(env, lfsck);
if (rc != 0)
lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
thread_set_flags(thread, 0);
- lta = lfsck_thread_args_init(lfsck, NULL);
+ lta = lfsck_thread_args_init(lfsck, NULL, lsp);
if (IS_ERR(lta))
GOTO(out, rc = PTR_ERR(lta));
}
EXPORT_SYMBOL(lfsck_start);
-int lfsck_stop(const struct lu_env *env, struct dt_device *key, bool pause)
+int lfsck_stop(const struct lu_env *env, struct dt_device *key,
+ struct lfsck_stop *stop)
{
struct lfsck_instance *lfsck;
struct ptlrpc_thread *thread;
struct l_wait_info lwi = { 0 };
+ int rc = 0;
ENTRY;
lfsck = lfsck_instance_find(key, true, false);
spin_lock(&lfsck->li_lock);
if (thread_is_init(thread) || thread_is_stopped(thread)) {
spin_unlock(&lfsck->li_lock);
- mutex_unlock(&lfsck->li_mutex);
- lfsck_instance_put(env, lfsck);
- RETURN(-EALREADY);
+ GOTO(out, rc = -EALREADY);
+ }
+
+ if (stop != NULL) {
+ lfsck->li_status = stop->ls_status;
+ lfsck->li_flags = stop->ls_flags;
+ } else {
+ lfsck->li_status = LS_STOPPED;
+ lfsck->li_flags = 0;
}
- if (pause)
- lfsck->li_paused = 1;
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&lfsck->li_lock);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
+
+ GOTO(out, rc = 0);
+
+out:
mutex_unlock(&lfsck->li_mutex);
lfsck_instance_put(env, lfsck);
- RETURN(0);
+ return rc;
}
EXPORT_SYMBOL(lfsck_stop);
+int lfsck_in_notify(const struct lu_env *env, struct dt_device *key,
+ struct lfsck_request *lr)
+{
+ struct lfsck_instance *lfsck;
+ struct lfsck_component *com;
+ int rc;
+ ENTRY;
+
+ switch (lr->lr_event) {
+ case LE_STOP:
+ case LE_PHASE1_DONE:
+ case LE_PHASE2_DONE:
+ case LE_FID_ACCESSED:
+ break;
+ default:
+ RETURN(-EOPNOTSUPP);
+ }
+
+ lfsck = lfsck_instance_find(key, true, false);
+ if (unlikely(lfsck == NULL))
+ RETURN(-ENODEV);
+
+ com = lfsck_component_find(lfsck, lr->lr_active);
+ if (likely(com != NULL)) {
+ rc = com->lc_ops->lfsck_in_notify(env, com, lr);
+ lfsck_component_put(env, com);
+ } else {
+ rc = -ENOTSUPP;
+ }
+
+ lfsck_instance_put(env, lfsck);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(lfsck_in_notify);
+
+int lfsck_query(const struct lu_env *env, struct dt_device *key,
+ struct lfsck_request *lr)
+{
+ struct lfsck_instance *lfsck;
+ struct lfsck_component *com;
+ int rc;
+ ENTRY;
+
+ lfsck = lfsck_instance_find(key, true, false);
+ if (unlikely(lfsck == NULL))
+ RETURN(-ENODEV);
+
+ com = lfsck_component_find(lfsck, lr->lr_active);
+ if (likely(com != NULL)) {
+ rc = com->lc_ops->lfsck_query(env, com);
+ lfsck_component_put(env, com);
+ } else {
+ rc = -ENOTSUPP;
+ }
+
+ lfsck_instance_put(env, lfsck);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(lfsck_query);
+
int lfsck_register(const struct lu_env *env, struct dt_device *key,
- struct dt_device *next, lfsck_out_notify notify,
- void *notify_data, bool master)
+ struct dt_device *next, struct obd_device *obd,
+ lfsck_out_notify notify, void *notify_data, bool master)
{
struct lfsck_instance *lfsck;
struct dt_object *root = NULL;
CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
atomic_set(&lfsck->li_ref, 1);
+ atomic_set(&lfsck->li_double_scan_count, 0);
init_waitqueue_head(&lfsck->li_thread.t_ctl_waitq);
lfsck->li_out_notify = notify;
lfsck->li_out_notify_data = notify_data;
lfsck->li_next = next;
lfsck->li_bottom = key;
+ lfsck->li_obd = obd;
+
+ rc = lfsck_tgt_descs_init(&lfsck->li_ost_descs);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ rc = lfsck_tgt_descs_init(&lfsck->li_mdt_descs);
+ if (rc != 0)
+ GOTO(out, rc);
fid->f_seq = FID_SEQ_LOCAL_NAME;
fid->f_oid = 1;
add:
rc = lfsck_instance_add(lfsck);
+ if (rc == 0)
+ rc = lfsck_add_target_from_orphan(env, lfsck);
out:
if (root != NULL && !IS_ERR(root))
lu_object_put(env, &root->do_lu);
}
EXPORT_SYMBOL(lfsck_degister);
+int lfsck_add_target(const struct lu_env *env, struct dt_device *key,
+ struct dt_device *tgt, struct obd_export *exp,
+ __u32 index, bool for_ost)
+{
+ struct lfsck_instance *lfsck;
+ struct lfsck_tgt_desc *ltd;
+ int rc;
+ ENTRY;
+
+ OBD_ALLOC_PTR(ltd);
+ if (ltd == NULL)
+ RETURN(-ENOMEM);
+
+ ltd->ltd_tgt = tgt;
+ ltd->ltd_key = key;
+ ltd->ltd_exp = exp;
+ INIT_LIST_HEAD(<d->ltd_orphan_list);
+ INIT_LIST_HEAD(<d->ltd_layout_list);
+ INIT_LIST_HEAD(<d->ltd_layout_phase_list);
+ atomic_set(<d->ltd_ref, 1);
+ ltd->ltd_index = index;
+
+ spin_lock(&lfsck_instance_lock);
+ lfsck = __lfsck_instance_find(key, true, false);
+ if (lfsck == NULL) {
+ if (for_ost)
+ list_add_tail(<d->ltd_orphan_list,
+ &lfsck_ost_orphan_list);
+ else
+ list_add_tail(<d->ltd_orphan_list,
+ &lfsck_mdt_orphan_list);
+ spin_unlock(&lfsck_instance_lock);
+
+ RETURN(0);
+ }
+ spin_unlock(&lfsck_instance_lock);
+
+ rc = __lfsck_add_target(env, lfsck, ltd, for_ost, false);
+ if (rc != 0)
+ lfsck_tgt_put(ltd);
+
+ lfsck_instance_put(env, lfsck);
+
+ RETURN(rc);
+}
+EXPORT_SYMBOL(lfsck_add_target);
+
+void lfsck_del_target(const struct lu_env *env, struct dt_device *key,
+ struct dt_device *tgt, __u32 index, bool for_ost)
+{
+ struct lfsck_instance *lfsck;
+ struct lfsck_tgt_descs *ltds;
+ struct lfsck_tgt_desc *ltd;
+ struct list_head *head;
+ bool found = false;
+ bool stop = false;
+
+ if (for_ost)
+ head = &lfsck_ost_orphan_list;
+ else
+ head = &lfsck_mdt_orphan_list;
+
+ spin_lock(&lfsck_instance_lock);
+ list_for_each_entry(ltd, head, ltd_orphan_list) {
+ if (ltd->ltd_tgt == tgt) {
+ list_del_init(<d->ltd_orphan_list);
+ spin_unlock(&lfsck_instance_lock);
+ lfsck_tgt_put(ltd);
+
+ return;
+ }
+ }
+
+ lfsck = __lfsck_instance_find(key, true, false);
+ spin_unlock(&lfsck_instance_lock);
+ if (unlikely(lfsck == NULL))
+ return;
+
+ if (for_ost)
+ ltds = &lfsck->li_ost_descs;
+ else
+ ltds = &lfsck->li_mdt_descs;
+
+ down_write(<ds->ltd_rw_sem);
+
+ LASSERT(ltds->ltd_tgts_bitmap != NULL);
+
+ if (unlikely(index >= ltds->ltd_tgts_bitmap->size))
+ goto unlock;
+
+ ltd = LTD_TGT(ltds, index);
+ if (unlikely(ltd == NULL))
+ goto unlock;
+
+ found = true;
+ spin_lock(<ds->ltd_lock);
+ ltd->ltd_dead = 1;
+ if (!list_empty(<d->ltd_layout_list)) {
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ stop = true;
+ } else {
+ LASSERT(list_empty(<d->ltd_layout_phase_list));
+ }
+ spin_unlock(<ds->ltd_lock);
+
+ if (stop && lfsck->li_master)
+ lfsck_stop_notify(env, lfsck, ltds, ltd);
+
+ LASSERT(ltds->ltd_tgtnr > 0);
+
+ ltds->ltd_tgtnr--;
+ cfs_bitmap_clear(ltds->ltd_tgts_bitmap, index);
+ LTD_TGT(ltds, index) = NULL;
+ lfsck_tgt_put(ltd);
+
+unlock:
+ if (!found) {
+ if (for_ost)
+ head = &lfsck->li_ost_descs.ltd_orphan;
+ else
+ head = &lfsck->li_ost_descs.ltd_orphan;
+
+ list_for_each_entry(ltd, head, ltd_orphan_list) {
+ if (ltd->ltd_tgt == tgt) {
+ list_del_init(<d->ltd_orphan_list);
+ lfsck_tgt_put(ltd);
+ break;
+ }
+ }
+ }
+
+ up_write(<ds->ltd_rw_sem);
+ lfsck_instance_put(env, lfsck);
+}
+EXPORT_SYMBOL(lfsck_del_target);
+
static int __init lfsck_init(void)
{
int rc;
+ INIT_LIST_HEAD(&lfsck_ost_orphan_list);
+ INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
lfsck_key_init_generic(&lfsck_thread_key, NULL);
rc = lu_context_key_register(&lfsck_thread_key);
+ if (rc == 0) {
+ tgt_register_lfsck_start(lfsck_start);
+ tgt_register_lfsck_in_notify(lfsck_in_notify);
+ tgt_register_lfsck_query(lfsck_query);
+ }
+
return rc;
}
static void __exit lfsck_exit(void)
{
+ struct lfsck_tgt_desc *ltd;
+ struct lfsck_tgt_desc *next;
+
LASSERT(cfs_list_empty(&lfsck_instance_list));
+ list_for_each_entry_safe(ltd, next, &lfsck_ost_orphan_list,
+ ltd_orphan_list) {
+ list_del_init(<d->ltd_orphan_list);
+ lfsck_tgt_put(ltd);
+ }
+
+ list_for_each_entry_safe(ltd, next, &lfsck_mdt_orphan_list,
+ ltd_orphan_list) {
+ list_del_init(<d->ltd_orphan_list);
+ lfsck_tgt_put(ltd);
+ }
+
lu_context_key_degister(&lfsck_thread_key);
}