unsigned int lls_dirty:1;
};
+struct lfsck_layout_slave_target {
+ /* link into lfsck_layout_slave_data::llsd_master_list. */
+ struct list_head llst_list;
+ __u64 llst_gen;
+ atomic_t llst_ref;
+ __u32 llst_index;
+};
+
struct lfsck_layout_slave_data {
/* list for lfsck_layout_seq */
struct list_head llsd_seq_list;
+
+ /* list for the masters involve layout verification. */
+ struct list_head llsd_master_list;
+ spinlock_t llsd_lock;
+ __u64 llsd_touch_gen;
};
struct lfsck_layout_object {
};
struct lfsck_layout_master_data {
- struct list_head llmd_req_list;
spinlock_t llmd_lock;
+ struct list_head llmd_req_list;
+
+ /* list for the ost targets involve layout verification. */
+ struct list_head llmd_ost_list;
+
+ /* list for the ost targets in phase1 scanning. */
+ struct list_head llmd_ost_phase1_list;
+
+ /* list for the ost targets in phase1 scanning. */
+ struct list_head llmd_ost_phase2_list;
+
+ /* list for the mdt targets involve layout verification. */
+ struct list_head llmd_mdt_list;
+
+ /* list for the mdt targets in phase1 scanning. */
+ struct list_head llmd_mdt_phase1_list;
+
+ /* list for the mdt targets in phase1 scanning. */
+ struct list_head llmd_mdt_phase2_list;
+
struct ptlrpc_thread llmd_thread;
atomic_t llmd_rpcs_in_flight;
+ __u32 llmd_touch_gen;
int llmd_prefetched;
int llmd_assistant_status;
int llmd_post_result;
llmd_exit:1;
};
+struct lfsck_layout_slave_async_args {
+ struct obd_export *llsaa_exp;
+ struct lfsck_component *llsaa_com;
+ struct lfsck_layout_slave_target *llsaa_llst;
+};
+
+static inline void
+lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
+{
+ if (atomic_dec_and_test(&llst->llst_ref)) {
+ LASSERT(list_empty(&llst->llst_list));
+
+ OBD_FREE_PTR(llst);
+ }
+}
+
+static inline int
+lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
+{
+ struct lfsck_layout_slave_target *llst;
+ struct lfsck_layout_slave_target *tmp;
+ int rc = 0;
+
+ OBD_ALLOC_PTR(llst);
+ if (llst == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&llst->llst_list);
+ llst->llst_gen = 0;
+ llst->llst_index = index;
+ atomic_set(&llst->llst_ref, 1);
+
+ spin_lock(&llsd->llsd_lock);
+ list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
+ if (tmp->llst_index == index) {
+ rc = -EALREADY;
+ break;
+ }
+ }
+ if (rc == 0)
+ list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
+ spin_unlock(&llsd->llsd_lock);
+
+ if (rc != 0)
+ OBD_FREE_PTR(llst);
+
+ return rc;
+}
+
+static inline void
+lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
+ struct lfsck_layout_slave_target *llst)
+{
+ bool del = false;
+
+ spin_lock(&llsd->llsd_lock);
+ if (!list_empty(&llst->llst_list)) {
+ list_del_init(&llst->llst_list);
+ del = true;
+ }
+ spin_unlock(&llsd->llsd_lock);
+
+ if (del)
+ lfsck_layout_llst_put(llst);
+}
+
+static inline struct lfsck_layout_slave_target *
+lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
+ __u32 index)
+{
+ struct lfsck_layout_slave_target *llst;
+
+ spin_lock(&llsd->llsd_lock);
+ list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
+ if (llst->llst_index == index) {
+ list_del_init(&llst->llst_list);
+ spin_unlock(&llsd->llsd_lock);
+
+ return llst;
+ }
+ }
+ spin_unlock(&llsd->llsd_lock);
+
+ return NULL;
+}
+
static inline void lfsck_layout_object_put(const struct lu_env *env,
struct lfsck_layout_object *llo)
{
return rc;
}
+static int lfsck_layout_master_async_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *args, int rc)
+{
+ struct lfsck_async_interpret_args *laia = args;
+ struct lfsck_component *com = laia->laia_com;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct lfsck_tgt_descs *ltds = laia->laia_ltds;
+ struct lfsck_tgt_desc *ltd = laia->laia_ltd;
+ struct lfsck_request *lr = laia->laia_lr;
+
+ switch (lr->lr_event) {
+ case LE_START:
+ if (rc != 0) {
+ struct lfsck_layout *lo = com->lc_file_ram;
+
+ lo->ll_flags |= LF_INCOMPLETE;
+ lfsck_tgt_put(ltd);
+ break;
+ }
+
+ spin_lock(<ds->ltd_lock);
+ if (ltd->ltd_dead || ltd->ltd_layout_done) {
+ spin_unlock(<ds->ltd_lock);
+ lfsck_tgt_put(ltd);
+ break;
+ }
+
+ if (lr->lr_flags & LEF_TO_OST) {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_ost_list);
+ if (list_empty(<d->ltd_layout_phase_list))
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_ost_phase1_list);
+ } else {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_mdt_list);
+ if (list_empty(<d->ltd_layout_phase_list))
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase1_list);
+ }
+ spin_unlock(<ds->ltd_lock);
+ lfsck_tgt_put(ltd);
+ break;
+ case LE_STOP:
+ case LE_PHASE1_DONE:
+ case LE_PHASE2_DONE:
+ if (rc != 0)
+ CERROR("%s: fail to notify %s %x for layout: "
+ "event = %d, rc = %d\n",
+ lfsck_lfsck2name(com->lc_lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
+ ltd->ltd_index, lr->lr_event, rc);
+ break;
+ case LE_QUERY: {
+ struct lfsck_reply *reply;
+
+ if (rc != 0) {
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ spin_unlock(<ds->ltd_lock);
+ lfsck_tgt_put(ltd);
+ break;
+ }
+
+ reply = req_capsule_server_get(&req->rq_pill,
+ &RMF_LFSCK_REPLY);
+ if (reply == NULL) {
+ rc = -EPROTO;
+ CERROR("%s: invalid return value: rc = %d\n",
+ lfsck_lfsck2name(com->lc_lfsck), rc);
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ spin_unlock(<ds->ltd_lock);
+ lfsck_tgt_put(ltd);
+ break;
+ }
+
+ switch (reply->lr_status) {
+ case LS_SCANNING_PHASE1:
+ break;
+ case LS_SCANNING_PHASE2:
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ if (ltd->ltd_dead || ltd->ltd_layout_done) {
+ spin_unlock(<ds->ltd_lock);
+ break;
+ }
+
+ if (lr->lr_flags & LEF_TO_OST)
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_ost_phase2_list);
+ else
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase2_list);
+ spin_unlock(<ds->ltd_lock);
+ break;
+ default:
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ spin_unlock(<ds->ltd_lock);
+ break;
+ }
+ lfsck_tgt_put(ltd);
+ break;
+ }
+ default:
+ CERROR("%s: unexpected event: rc = %d\n",
+ lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
+ break;
+ }
+
+ lfsck_component_put(env, com);
+
+ return 0;
+}
+
static int lfsck_layout_master_query_others(const struct lu_env *env,
struct lfsck_component *com)
{
- /* XXX: to be implemented. */
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_request *lr = &info->lti_lr;
+ struct lfsck_async_interpret_args *laia = &info->lti_laia;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct ptlrpc_request_set *set;
+ struct lfsck_tgt_descs *ltds;
+ struct lfsck_tgt_desc *ltd;
+ struct list_head *head;
+ __u32 cnt = 0;
+ int rc = 0;
+ int rc1 = 0;
+ ENTRY;
- return 0;
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ RETURN(-ENOMEM);
+
+ llmd->llmd_touch_gen++;
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_event = LE_QUERY;
+ lr->lr_active = LT_LAYOUT;
+ laia->laia_com = com;
+ laia->laia_lr = lr;
+
+ if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
+ ltds = &lfsck->li_mdt_descs;
+ lr->lr_flags = 0;
+ head = &llmd->llmd_mdt_phase1_list;
+ } else {
+
+again:
+ ltds = &lfsck->li_ost_descs;
+ lr->lr_flags = LEF_TO_OST;
+ head = &llmd->llmd_ost_phase1_list;
+ }
+
+ laia->laia_ltds = ltds;
+ spin_lock(<ds->ltd_lock);
+ while (!list_empty(head)) {
+ ltd = list_entry(head->next,
+ struct lfsck_tgt_desc,
+ ltd_layout_phase_list);
+ if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
+ break;
+
+ ltd->ltd_layout_gen = llmd->llmd_touch_gen;
+ list_del(<d->ltd_layout_phase_list);
+ list_add_tail(<d->ltd_layout_phase_list, head);
+ atomic_inc(<d->ltd_ref);
+ laia->laia_ltd = ltd;
+ spin_unlock(<ds->ltd_lock);
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_layout_master_async_interpret,
+ laia, LFSCK_QUERY);
+ if (rc != 0) {
+ CERROR("%s: fail to query %s %x for layout: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
+ ltd->ltd_index, rc);
+ lfsck_tgt_put(ltd);
+ rc1 = rc;
+ } else {
+ cnt++;
+ }
+ spin_lock(<ds->ltd_lock);
+ }
+ spin_unlock(<ds->ltd_lock);
+
+ if (cnt > 0) {
+ rc = ptlrpc_set_wait(set);
+ if (rc < 0) {
+ ptlrpc_set_destroy(set);
+ RETURN(rc);
+ }
+ cnt = 0;
+ }
+
+ if (!(lr->lr_flags & LEF_TO_OST) &&
+ list_empty(&llmd->llmd_mdt_phase1_list))
+ goto again;
+
+ ptlrpc_set_destroy(set);
+
+ RETURN(rc1 != 0 ? rc1 : rc);
}
static inline bool
lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
{
- /* XXX: to be implemented. */
-
- return 1;
+ return list_empty(&llmd->llmd_mdt_phase1_list) &&
+ (!list_empty(&llmd->llmd_ost_phase2_list) ||
+ list_empty(&llmd->llmd_ost_phase1_list));
}
static int lfsck_layout_master_notify_others(const struct lu_env *env,
struct lfsck_component *com,
- struct lfsck_request *lr)
+ struct lfsck_request *lr,
+ __u32 flags)
{
- /* XXX: to be implemented. */
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_async_interpret_args *laia = &info->lti_laia;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct ptlrpc_request_set *set;
+ struct lfsck_tgt_descs *ltds;
+ struct lfsck_tgt_desc *ltd;
+ struct lfsck_tgt_desc *next;
+ struct list_head *head;
+ __u32 idx;
+ __u32 cnt = 0;
+ int rc = 0;
+ ENTRY;
- return 0;
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ RETURN(-ENOMEM);
+
+ lr->lr_active = LT_LAYOUT;
+ laia->laia_com = com;
+ laia->laia_lr = lr;
+ lr->lr_flags = 0;
+ switch (lr->lr_event) {
+ case LE_START:
+ /* Notify OSTs firstly, then other MDTs if needed. */
+ lr->lr_flags |= LEF_TO_OST;
+ ltds = &lfsck->li_ost_descs;
+
+lable1:
+ laia->laia_ltds = ltds;
+ down_read(<ds->ltd_rw_sem);
+ cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
+ ltd = lfsck_tgt_get(ltds, idx);
+ LASSERT(ltd != NULL);
+
+ laia->laia_ltd = ltd;
+ ltd->ltd_layout_done = 0;
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_layout_master_async_interpret,
+ laia, LFSCK_NOTIFY);
+ if (rc != 0) {
+ CERROR("%s: fail to notify %s %x for layout "
+ "start: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" :
+ "MDT", idx, rc);
+ lfsck_tgt_put(ltd);
+ lo->ll_flags |= LF_INCOMPLETE;
+ } else {
+ cnt++;
+ }
+ }
+ up_read(<ds->ltd_rw_sem);
+
+ /* Sync up */
+ if (cnt > 0) {
+ rc = ptlrpc_set_wait(set);
+ if (rc < 0) {
+ ptlrpc_set_destroy(set);
+ RETURN(rc);
+ }
+ cnt = 0;
+ }
+
+ if (!(flags & LPF_ALL_MDT))
+ break;
+
+ ltds = &lfsck->li_mdt_descs;
+ /* The sponsor broadcasts the request to other MDTs. */
+ if (flags & LPF_BROADCAST) {
+ flags &= ~LPF_ALL_MDT;
+ lr->lr_flags &= ~LEF_TO_OST;
+ goto lable1;
+ }
+
+ /* non-sponsors link other MDT targets locallly. */
+ spin_lock(<ds->ltd_lock);
+ cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
+ ltd = LTD_TGT(ltds, idx);
+ LASSERT(ltd != NULL);
+
+ if (!list_empty(<d->ltd_layout_list))
+ continue;
+
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_mdt_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase1_list);
+ }
+ spin_unlock(<ds->ltd_lock);
+
+ break;
+ case LE_STOP:
+ if (flags & LPF_BROADCAST)
+ lr->lr_flags |= LEF_FORCE_STOP;
+ case LE_PHASE2_DONE:
+ /* Notify other MDTs if needed, then the OSTs. */
+ if (flags & LPF_ALL_MDT) {
+ /* The sponsor broadcasts the request to other MDTs. */
+ if (flags & LPF_BROADCAST) {
+ lr->lr_flags &= ~LEF_TO_OST;
+ head = &llmd->llmd_mdt_list;
+ ltds = &lfsck->li_mdt_descs;
+ goto lable3;
+ }
+
+ /* non-sponsors unlink other MDT targets locallly. */
+ ltds = &lfsck->li_mdt_descs;
+ spin_lock(<ds->ltd_lock);
+ list_for_each_entry_safe(ltd, next,
+ &llmd->llmd_mdt_list,
+ ltd_layout_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ }
+ spin_unlock(<ds->ltd_lock);
+ }
+
+lable2:
+ lr->lr_flags |= LEF_TO_OST;
+ head = &llmd->llmd_ost_list;
+ ltds = &lfsck->li_ost_descs;
+
+lable3:
+ laia->laia_ltds = ltds;
+ spin_lock(<ds->ltd_lock);
+ while (!list_empty(head)) {
+ ltd = list_entry(head->next, struct lfsck_tgt_desc,
+ ltd_layout_list);
+ if (!list_empty(<d->ltd_layout_phase_list))
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ laia->laia_ltd = ltd;
+ spin_unlock(<ds->ltd_lock);
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_layout_master_async_interpret,
+ laia, LFSCK_NOTIFY);
+ if (rc != 0)
+ CERROR("%s: fail to notify %s %x for layout "
+ "stop/phase2: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" :
+ "MDT", ltd->ltd_index, rc);
+ else
+ cnt++;
+ spin_lock(<ds->ltd_lock);
+ }
+ spin_unlock(<ds->ltd_lock);
+
+ if (!(flags & LPF_BROADCAST))
+ break;
+
+ /* Sync up */
+ if (cnt > 0) {
+ rc = ptlrpc_set_wait(set);
+ if (rc < 0) {
+ ptlrpc_set_destroy(set);
+ RETURN(rc);
+ }
+ cnt = 0;
+ }
+
+ flags &= ~LPF_BROADCAST;
+ goto lable2;
+ case LE_PHASE1_DONE:
+ llmd->llmd_touch_gen++;
+ lr->lr_flags &= ~LEF_TO_OST;
+ ltds = &lfsck->li_mdt_descs;
+ laia->laia_ltds = ltds;
+ spin_lock(<ds->ltd_lock);
+ while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
+ ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
+ struct lfsck_tgt_desc,
+ ltd_layout_phase_list);
+ if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
+ break;
+
+ ltd->ltd_layout_gen = llmd->llmd_touch_gen;
+ list_del_init(<d->ltd_layout_phase_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase1_list);
+ laia->laia_ltd = ltd;
+ spin_unlock(<ds->ltd_lock);
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_layout_master_async_interpret,
+ laia, LFSCK_NOTIFY);
+ if (rc != 0)
+ CERROR("%s: fail to notify MDT %x for layout "
+ "phase1 done: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ ltd->ltd_index, rc);
+ else
+ cnt++;
+ spin_lock(<ds->ltd_lock);
+ }
+ spin_unlock(<ds->ltd_lock);
+ break;
+ default:
+ CERROR("%s: unexpected LFSCK event: rc = %d\n",
+ lfsck_lfsck2name(lfsck), lr->lr_event);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (cnt > 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+
+ if (rc == 0 && lr->lr_event == LE_START &&
+ list_empty(&llmd->llmd_ost_list))
+ rc = -ENODEV;
+
+ RETURN(rc);
}
static int lfsck_layout_double_scan_result(const struct lu_env *env,
lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
lo->ll_success_count++;
} else if (rc == 0) {
- if (lfsck->li_paused)
- lo->ll_status = LS_PAUSED;
- else
+ lo->ll_status = lfsck->li_status;
+ if (lo->ll_status == 0)
lo->ll_status = LS_STOPPED;
} else {
lo->ll_status = LS_FAILED;
return rc;
}
+static int lfsck_layout_scan_orphan(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_tgt_desc *ltd)
+{
+ /* XXX: To be extended in other patch. */
+
+ return 0;
+}
+
static int lfsck_layout_assistant(void *args)
{
struct lfsck_thread_args *lta = args;
struct l_wait_info lwi = { 0 };
int rc = 0;
int rc1 = 0;
+ __u32 flags;
ENTRY;
+ if (lta->lta_lsp->lsp_start != NULL)
+ flags = lta->lta_lsp->lsp_start->ls_flags;
+ else
+ flags = bk->lb_param;
memset(lr, 0, sizeof(*lr));
lr->lr_event = LE_START;
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
LSV_ASYNC_WINDOWS;
lr->lr_speed = bk->lb_speed_limit;
lr->lr_version = bk->lb_version;
- lr->lr_active = LT_LAYOUT;
lr->lr_param = bk->lb_param;
lr->lr_async_windows = bk->lb_async_windows;
if (pos->lp_oit_cookie <= 1)
lr->lr_param |= LPF_RESET;
- rc = lfsck_layout_master_notify_others(env, com, lr);
+ rc = lfsck_layout_master_notify_others(env, com, lr, flags);
if (rc != 0) {
CERROR("%s: fail to notify others for layout start: rc = %d\n",
lfsck_lfsck2name(lfsck), rc);
memset(lr, 0, sizeof(*lr));
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
- lr->lr_active = LT_LAYOUT;
lr->lr_event = LE_PHASE1_DONE;
lr->lr_status = llmd->llmd_post_result;
- rc = lfsck_layout_master_notify_others(env, com, lr);
+ rc = lfsck_layout_master_notify_others(env, com, lr, 0);
if (rc != 0)
CERROR("%s: failed to notify others "
"for layout post: rc = %d\n",
wake_up_all(&mthread->t_ctl_waitq);
while (llmd->llmd_in_double_scan) {
+ struct lfsck_tgt_descs *ltds =
+ &lfsck->li_ost_descs;
+ struct lfsck_tgt_desc *ltd;
+
rc = lfsck_layout_master_query_others(env, com);
if (lfsck_layout_master_to_orphan(llmd))
goto orphan;
GOTO(cleanup2, rc);
orphan:
- /* XXX: real double scan for ost orphans. */
-
- GOTO(cleanup2, rc = 1);
+ spin_lock(<ds->ltd_lock);
+ while (!list_empty(
+ &llmd->llmd_ost_phase2_list)) {
+ ltd = list_entry(
+ llmd->llmd_ost_phase2_list.next,
+ struct lfsck_tgt_desc,
+ ltd_layout_phase_list);
+ list_del_init(
+ <d->ltd_layout_phase_list);
+ spin_unlock(<ds->ltd_lock);
+
+ rc = lfsck_layout_scan_orphan(env, com,
+ ltd);
+ if (rc != 0 &&
+ bk->lb_param & LPF_FAILOUT)
+ GOTO(cleanup2, rc);
+
+ if (unlikely(llmd->llmd_exit ||
+ !thread_is_running(mthread)))
+ GOTO(cleanup2, rc = 0);
+
+ spin_lock(<ds->ltd_lock);
+ }
+
+ if (list_empty(&llmd->llmd_ost_phase1_list)) {
+ spin_unlock(<ds->ltd_lock);
+ GOTO(cleanup2, rc = 1);
+ }
+ spin_unlock(<ds->ltd_lock);
}
}
}
cleanup2:
memset(lr, 0, sizeof(*lr));
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
- lr->lr_active = LT_LAYOUT;
if (rc > 0) {
lr->lr_event = LE_PHASE2_DONE;
+ flags = 0;
lr->lr_status = rc;
} else if (rc == 0) {
lr->lr_event = LE_STOP;
- if (lfsck->li_paused)
+ if (lfsck->li_status == LS_PAUSED ||
+ lfsck->li_status == LS_CO_PAUSED) {
+ flags = 0;
lr->lr_status = LS_CO_PAUSED;
- else
- lr->lr_status = LS_CO_STOPPED;
+ } else if (lfsck->li_status == LS_STOPPED ||
+ lfsck->li_status == LS_CO_STOPPED) {
+ flags = lfsck->li_flags;
+ if (flags & LPF_BROADCAST)
+ lr->lr_status = LS_STOPPED;
+ else
+ lr->lr_status = LS_CO_STOPPED;
+ } else {
+ LBUG();
+ }
} else {
lr->lr_event = LE_STOP;
+ flags = 0;
lr->lr_status = LS_CO_FAILED;
}
- rc1 = lfsck_layout_master_notify_others(env, com, lr);
+ rc1 = lfsck_layout_master_notify_others(env, com, lr, flags);
if (rc1 != 0) {
CERROR("%s: failed to notify others for layout quit: rc = %d\n",
lfsck_lfsck2name(lfsck), rc1);
return rc;
}
+static int
+lfsck_layout_slave_async_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *args, int rc)
+{
+ struct lfsck_layout_slave_async_args *llsaa = args;
+ struct obd_export *exp = llsaa->llsaa_exp;
+ struct lfsck_component *com = llsaa->llsaa_com;
+ struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ bool done = false;
+
+ if (rc != 0) {
+ /* It is quite probably caused by target crash,
+ * to make the LFSCK can go ahead, assume that
+ * the target finished the LFSCK prcoessing. */
+ done = true;
+ } else {
+ struct lfsck_reply *lr;
+
+ lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
+ if (lr->lr_status != LS_SCANNING_PHASE1 &&
+ lr->lr_status != LS_SCANNING_PHASE2)
+ done = true;
+ }
+ if (done)
+ lfsck_layout_llst_del(llsd, llst);
+ lfsck_layout_llst_put(llst);
+ lfsck_component_put(env, com);
+ class_export_put(exp);
+
+ return 0;
+}
+
+static int lfsck_layout_async_query(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct obd_export *exp,
+ struct lfsck_layout_slave_target *llst,
+ struct lfsck_request *lr,
+ struct ptlrpc_request_set *set)
+{
+ struct lfsck_layout_slave_async_args *llsaa;
+ struct ptlrpc_request *req;
+ struct lfsck_request *tmp;
+ int rc;
+ ENTRY;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
+ if (rc != 0) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
+ *tmp = *lr;
+ ptlrpc_request_set_replen(req);
+
+ llsaa = ptlrpc_req_async_args(req);
+ llsaa->llsaa_exp = exp;
+ llsaa->llsaa_com = lfsck_component_get(com);
+ llsaa->llsaa_llst = llst;
+ req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
+ ptlrpc_set_add_req(set, req);
+
+ RETURN(0);
+}
+
+static int lfsck_layout_async_notify(const struct lu_env *env,
+ struct obd_export *exp,
+ struct lfsck_request *lr,
+ struct ptlrpc_request_set *set)
+{
+ struct ptlrpc_request *req;
+ struct lfsck_request *tmp;
+ int rc;
+ ENTRY;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
+ if (rc != 0) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
+ *tmp = *lr;
+ ptlrpc_request_set_replen(req);
+ ptlrpc_set_add_req(set, req);
+
+ RETURN(0);
+}
+
+static int
+lfsck_layout_slave_query_master(const struct lu_env *env,
+ struct lfsck_component *com)
+{
+ struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_layout_slave_target *llst;
+ struct obd_export *exp;
+ struct ptlrpc_request_set *set;
+ int cnt = 0;
+ int rc = 0;
+ int rc1 = 0;
+ ENTRY;
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ RETURN(-ENOMEM);
+
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_event = LE_QUERY;
+ lr->lr_active = LT_LAYOUT;
+
+ llsd->llsd_touch_gen++;
+ spin_lock(&llsd->llsd_lock);
+ while (!list_empty(&llsd->llsd_master_list)) {
+ llst = list_entry(llsd->llsd_master_list.next,
+ struct lfsck_layout_slave_target,
+ llst_list);
+ if (llst->llst_gen == llsd->llsd_touch_gen)
+ break;
+
+ llst->llst_gen = llsd->llsd_touch_gen;
+ list_del(&llst->llst_list);
+ list_add_tail(&llst->llst_list,
+ &llsd->llsd_master_list);
+ atomic_inc(&llst->llst_ref);
+ spin_unlock(&llsd->llsd_lock);
+
+ exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
+ llst->llst_index);
+ if (exp == NULL) {
+ lfsck_layout_llst_del(llsd, llst);
+ lfsck_layout_llst_put(llst);
+ spin_lock(&llsd->llsd_lock);
+ continue;
+ }
+
+ rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
+ if (rc != 0) {
+ CERROR("%s: slave fail to query %s for layout: "
+ "rc = %d\n", lfsck_lfsck2name(lfsck),
+ exp->exp_obd->obd_name, rc);
+ rc1 = rc;
+ lfsck_layout_llst_put(llst);
+ class_export_put(exp);
+ } else {
+ cnt++;
+ }
+ spin_lock(&llsd->llsd_lock);
+ }
+ spin_unlock(&llsd->llsd_lock);
+
+ if (cnt > 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+
+ RETURN(rc1 != 0 ? rc1 : rc);
+}
+
+static void
+lfsck_layout_slave_notify_master(const struct lu_env *env,
+ struct lfsck_component *com,
+ enum lfsck_events event, int result)
+{
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
+ struct lfsck_layout_slave_target *llst;
+ struct obd_export *exp;
+ struct ptlrpc_request_set *set;
+ int cnt = 0;
+ int rc;
+ ENTRY;
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ RETURN_EXIT;
+
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_event = event;
+ lr->lr_flags = LEF_FROM_OST;
+ lr->lr_status = result;
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_active = LT_LAYOUT;
+ llsd->llsd_touch_gen++;
+ spin_lock(&llsd->llsd_lock);
+ while (!list_empty(&llsd->llsd_master_list)) {
+ llst = list_entry(llsd->llsd_master_list.next,
+ struct lfsck_layout_slave_target,
+ llst_list);
+ if (llst->llst_gen == llsd->llsd_touch_gen)
+ break;
+
+ llst->llst_gen = llsd->llsd_touch_gen;
+ list_del(&llst->llst_list);
+ list_add_tail(&llst->llst_list,
+ &llsd->llsd_master_list);
+ atomic_inc(&llst->llst_ref);
+ spin_unlock(&llsd->llsd_lock);
+
+ exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
+ llst->llst_index);
+ if (exp == NULL) {
+ lfsck_layout_llst_del(llsd, llst);
+ lfsck_layout_llst_put(llst);
+ spin_lock(&llsd->llsd_lock);
+ continue;
+ }
+
+ rc = lfsck_layout_async_notify(env, exp, lr, set);
+ if (rc != 0)
+ CERROR("%s: slave fail to notify %s for layout: "
+ "rc = %d\n", lfsck_lfsck2name(lfsck),
+ exp->exp_obd->obd_name, rc);
+ else
+ cnt++;
+ lfsck_layout_llst_put(llst);
+ class_export_put(exp);
+ spin_lock(&llsd->llsd_lock);
+ }
+ spin_unlock(&llsd->llsd_lock);
+
+ if (cnt > 0)
+ rc = ptlrpc_set_wait(set);
+
+ ptlrpc_set_destroy(set);
+
+ RETURN_EXIT;
+}
+
/* layout APIs */
static int lfsck_layout_reset(const struct lu_env *env,
return rc;
}
-static int lfsck_layout_slave_prep(const struct lu_env *env,
- struct lfsck_component *com)
+static int lfsck_layout_prep(const struct lu_env *env,
+ struct lfsck_component *com)
{
struct lfsck_instance *lfsck = com->lc_lfsck;
struct lfsck_layout *lo = com->lc_file_ram;
struct lfsck_position *pos = &com->lc_pos_start;
- /* XXX: For a new scanning, generate OST-objects
- * bitmap for orphan detection. */
-
fid_zero(&pos->lp_dir_parent);
pos->lp_dir_cookie = 0;
if (lo->ll_status == LS_COMPLETED ||
return 0;
}
+static int lfsck_layout_slave_prep(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_start_param *lsp)
+{
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ int rc;
+
+ /* XXX: For a new scanning, generate OST-objects
+ * bitmap for orphan detection. */
+
+ rc = lfsck_layout_prep(env, com);
+ if (rc != 0 || lo->ll_status != LS_SCANNING_PHASE1 ||
+ !lsp->lsp_index_valid)
+ return rc;
+
+ rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
+
+ return rc;
+}
+
static int lfsck_layout_master_prep(const struct lu_env *env,
- struct lfsck_component *com)
+ struct lfsck_component *com,
+ struct lfsck_start_param *lsp)
{
struct lfsck_instance *lfsck = com->lc_lfsck;
struct lfsck_layout_master_data *llmd = com->lc_data;
long rc;
ENTRY;
- rc = lfsck_layout_slave_prep(env, com);
+ rc = lfsck_layout_prep(env, com);
if (rc != 0)
RETURN(rc);
llmd->llmd_exit = 0;
thread_set_flags(athread, 0);
- lta = lfsck_thread_args_init(lfsck, com);
+ lta = lfsck_thread_args_init(lfsck, com, lsp);
if (IS_ERR(lta))
RETURN(PTR_ERR(lta));
list_del_init(&com->lc_link);
list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
} else if (result == 0) {
- if (lfsck->li_paused) {
- lo->ll_status = LS_PAUSED;
- } else {
+ lo->ll_status = lfsck->li_status;
+ if (lo->ll_status == 0)
lo->ll_status = LS_STOPPED;
+ if (lo->ll_status != LS_PAUSED) {
list_del_init(&com->lc_link);
list_add_tail(&com->lc_link, &lfsck->li_list_idle);
}
list_del_init(&com->lc_link);
list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
} else if (result == 0) {
- if (lfsck->li_paused) {
- lo->ll_status = LS_PAUSED;
- } else {
+ lo->ll_status = lfsck->li_status;
+ if (lo->ll_status == 0)
lo->ll_status = LS_STOPPED;
+ if (lo->ll_status != LS_PAUSED) {
list_del_init(&com->lc_link);
list_add_tail(&com->lc_link, &lfsck->li_list_idle);
}
up_write(&com->lc_sem);
+ lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
+
return rc;
}
static int lfsck_layout_slave_double_scan(const struct lu_env *env,
struct lfsck_component *com)
{
- struct lfsck_instance *lfsck = com->lc_lfsck;
- struct lfsck_layout *lo = com->lc_file_ram;
- int rc = 1;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct ptlrpc_thread *thread = &lfsck->li_thread;
+ int rc;
+ ENTRY;
if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
- return 0;
+ RETURN(0);
atomic_inc(&lfsck->li_double_scan_count);
com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ while (1) {
+ struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
+ NULL, NULL);
+
+ rc = lfsck_layout_slave_query_master(env, com);
+ if (list_empty(&llsd->llsd_master_list)) {
+ if (unlikely(!thread_is_running(thread)))
+ rc = 0;
+ else
+ rc = 1;
+
+ GOTO(done, rc);
+ }
+
+ if (rc < 0)
+ GOTO(done, rc);
+
+ rc = l_wait_event(thread->t_ctl_waitq,
+ !thread_is_running(thread) ||
+ list_empty(&llsd->llsd_master_list),
+ &lwi);
+ if (unlikely(!thread_is_running(thread)))
+ GOTO(done, rc = 0);
+
+ if (rc == -ETIMEDOUT)
+ continue;
+
+ GOTO(done, rc = (rc < 0 ? rc : 1));
+ }
+
+done:
rc = lfsck_layout_double_scan_result(env, com, rc);
if (atomic_dec_and_test(&lfsck->li_double_scan_count))
static void lfsck_layout_master_data_release(const struct lu_env *env,
struct lfsck_component *com)
{
- struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_tgt_descs *ltds;
+ struct lfsck_tgt_desc *ltd;
+ struct lfsck_tgt_desc *next;
LASSERT(llmd != NULL);
LASSERT(thread_is_init(&llmd->llmd_thread) ||
LASSERT(atomic_read(&llmd->llmd_rpcs_in_flight) == 0);
com->lc_data = NULL;
+
+ ltds = &lfsck->li_ost_descs;
+ spin_lock(<ds->ltd_lock);
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
+ ltd_layout_phase_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ }
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
+ ltd_layout_phase_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ }
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
+ ltd_layout_list) {
+ list_del_init(<d->ltd_layout_list);
+ }
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
+ ltd_layout_phase_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ }
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
+ ltd_layout_phase_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ }
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
+ ltd_layout_list) {
+ list_del_init(<d->ltd_layout_list);
+ }
+ spin_unlock(<ds->ltd_lock);
+
OBD_FREE_PTR(llmd);
}
static void lfsck_layout_slave_data_release(const struct lu_env *env,
struct lfsck_component *com)
{
- struct lfsck_layout_slave_data *llsd = com->lc_data;
- struct lfsck_layout_seq *lls;
- struct lfsck_layout_seq *next;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_layout_seq *lls;
+ struct lfsck_layout_seq *next;
+ struct lfsck_layout_slave_target *llst;
+ struct lfsck_layout_slave_target *tmp;
LASSERT(llsd != NULL);
OBD_FREE_PTR(lls);
}
+ list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
+ llst_list) {
+ list_del_init(&llst->llst_list);
+ OBD_FREE_PTR(llst);
+ }
+
OBD_FREE_PTR(llsd);
}
&lwi);
}
+static int lfsck_layout_master_in_notify(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_request *lr)
+{
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct lfsck_tgt_descs *ltds;
+ struct lfsck_tgt_desc *ltd;
+ ENTRY;
+
+ if (lr->lr_event != LE_PHASE1_DONE &&
+ lr->lr_event != LE_PHASE2_DONE &&
+ lr->lr_event != LE_STOP)
+ RETURN(-EINVAL);
+
+ if (lr->lr_flags & LEF_FROM_OST)
+ ltds = &lfsck->li_ost_descs;
+ else
+ ltds = &lfsck->li_mdt_descs;
+ spin_lock(<ds->ltd_lock);
+ ltd = LTD_TGT(ltds, lr->lr_index);
+ if (ltd == NULL) {
+ spin_unlock(<ds->ltd_lock);
+
+ RETURN(-ENODEV);
+ }
+
+ list_del_init(<d->ltd_layout_phase_list);
+ switch (lr->lr_event) {
+ case LE_PHASE1_DONE:
+ if (lr->lr_status <= 0) {
+ ltd->ltd_layout_done = 1;
+ list_del_init(<d->ltd_layout_list);
+ lo->ll_flags |= LF_INCOMPLETE;
+ break;
+ }
+
+ if (lr->lr_flags & LEF_FROM_OST) {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_ost_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_ost_phase2_list);
+ } else {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_mdt_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase2_list);
+ }
+ break;
+ case LE_PHASE2_DONE:
+ ltd->ltd_layout_done = 1;
+ list_del_init(<d->ltd_layout_list);
+ break;
+ case LE_STOP:
+ ltd->ltd_layout_done = 1;
+ list_del_init(<d->ltd_layout_list);
+ if (!(lr->lr_flags & LEF_FORCE_STOP))
+ lo->ll_flags |= LF_INCOMPLETE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock(<ds->ltd_lock);
+
+ if (lr->lr_flags & LEF_FORCE_STOP) {
+ struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
+
+ memset(stop, 0, sizeof(*stop));
+ stop->ls_status = lr->lr_status;
+ stop->ls_flags = lr->lr_param;
+ lfsck_stop(env, lfsck->li_bottom, stop);
+ } else if (lfsck_layout_master_to_orphan(llmd)) {
+ wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
+ }
+
+ RETURN(0);
+}
+
+static int lfsck_layout_slave_in_notify(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_request *lr)
+{
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_layout_slave_target *llst;
+ ENTRY;
+
+ if (lr->lr_event != LE_PHASE2_DONE &&
+ lr->lr_event != LE_STOP)
+ RETURN(-EINVAL);
+
+ llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index);
+ if (llst == NULL)
+ RETURN(-ENODEV);
+
+ lfsck_layout_llst_put(llst);
+ if (list_empty(&llsd->llsd_master_list)) {
+ switch (lr->lr_event) {
+ case LE_PHASE2_DONE:
+ wake_up_all(&lfsck->li_thread.t_ctl_waitq);
+ break;
+ case LE_STOP: {
+ struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
+
+ memset(stop, 0, sizeof(*stop));
+ stop->ls_status = lr->lr_status;
+ stop->ls_flags = lr->lr_param;
+ lfsck_stop(env, lfsck->li_bottom, stop);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ RETURN(0);
+}
+
+static int lfsck_layout_query(const struct lu_env *env,
+ struct lfsck_component *com)
+{
+ struct lfsck_layout *lo = com->lc_file_ram;
+
+ return lo->ll_status;
+}
+
+static int lfsck_layout_master_stop_notify(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_tgt_descs *ltds,
+ struct lfsck_tgt_desc *ltd,
+ struct ptlrpc_request_set *set)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_async_interpret_args *laia = &info->lti_laia;
+ struct lfsck_request *lr = &info->lti_lr;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ int rc;
+
+ LASSERT(list_empty(<d->ltd_layout_list));
+ LASSERT(list_empty(<d->ltd_layout_phase_list));
+
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_event = LE_STOP;
+ lr->lr_active = LT_LAYOUT;
+ if (ltds == &lfsck->li_ost_descs) {
+ lr->lr_flags = LEF_TO_OST;
+ } else {
+ if (ltd->ltd_index == lfsck_dev_idx(lfsck->li_bottom))
+ return 0;
+
+ lr->lr_flags = 0;
+ }
+ lr->lr_status = LS_CO_STOPPED;
+
+ laia->laia_com = com;
+ laia->laia_ltds = ltds;
+ laia->laia_ltd = ltd;
+ laia->laia_lr = lr;
+
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_layout_master_async_interpret,
+ laia, LFSCK_NOTIFY);
+ if (rc != 0)
+ CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
+ ltd->ltd_index, rc);
+
+ return rc;
+}
+
+/* with lfsck::li_lock held */
+static int lfsck_layout_slave_join(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_start_param *lsp)
+{
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_layout_slave_target *llst;
+ struct lfsck_start *start = lsp->lsp_start;
+ int rc = 0;
+ ENTRY;
+
+ if (!lsp->lsp_index_valid || start == NULL ||
+ !(start->ls_flags & LPF_ALL_MDT))
+ RETURN(-EALREADY);
+
+ spin_unlock(&lfsck->li_lock);
+ rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
+ spin_lock(&lfsck->li_lock);
+ if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
+ spin_unlock(&lfsck->li_lock);
+ llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index);
+ if (llst != NULL)
+ lfsck_layout_llst_put(llst);
+ spin_lock(&lfsck->li_lock);
+ rc = -EAGAIN;
+ }
+
+ RETURN(rc);
+}
+
static struct lfsck_operations lfsck_layout_master_ops = {
.lfsck_reset = lfsck_layout_reset,
.lfsck_fail = lfsck_layout_fail,
.lfsck_double_scan = lfsck_layout_master_double_scan,
.lfsck_data_release = lfsck_layout_master_data_release,
.lfsck_quit = lfsck_layout_master_quit,
+ .lfsck_in_notify = lfsck_layout_master_in_notify,
+ .lfsck_query = lfsck_layout_query,
+ .lfsck_stop_notify = lfsck_layout_master_stop_notify,
};
static struct lfsck_operations lfsck_layout_slave_ops = {
.lfsck_dump = lfsck_layout_dump,
.lfsck_double_scan = lfsck_layout_slave_double_scan,
.lfsck_data_release = lfsck_layout_slave_data_release,
+ .lfsck_in_notify = lfsck_layout_slave_in_notify,
+ .lfsck_query = lfsck_layout_query,
+ .lfsck_join = lfsck_layout_slave_join,
};
int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
INIT_LIST_HEAD(&llmd->llmd_req_list);
spin_lock_init(&llmd->llmd_lock);
+ INIT_LIST_HEAD(&llmd->llmd_ost_list);
+ INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
+ INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
+ INIT_LIST_HEAD(&llmd->llmd_mdt_list);
+ INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
+ INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
atomic_set(&llmd->llmd_rpcs_in_flight, 0);
com->lc_data = llmd;
GOTO(out, rc = -ENOMEM);
INIT_LIST_HEAD(&llsd->llsd_seq_list);
+ INIT_LIST_HEAD(&llsd->llsd_master_list);
+ spin_lock_init(&llsd->llsd_lock);
com->lc_data = llsd;
}
com->lc_file_size = sizeof(*lo);