+static int lfsck_layout_master_query_others(const struct lu_env *env,
+ struct lfsck_component *com)
+{
+ /* XXX: to be implemented. */
+
+ return 0;
+}
+
+static inline bool
+lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
+{
+ /* XXX: to be implemented. */
+
+ return 1;
+}
+
+static int lfsck_layout_master_notify_others(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_request *lr)
+{
+ /* XXX: to be implemented. */
+
+ return 0;
+}
+
+static int lfsck_layout_double_scan_result(const struct lu_env *env,
+ struct lfsck_component *com,
+ int rc)
+{
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+
+ down_write(&com->lc_sem);
+
+ lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
+ HALF_SEC - lfsck->li_time_last_checkpoint);
+ lo->ll_time_last_checkpoint = cfs_time_current_sec();
+ lo->ll_objs_checked_phase2 += com->lc_new_checked;
+
+ if (rc > 0) {
+ com->lc_journal = 0;
+ if (lo->ll_flags & LF_INCOMPLETE)
+ lo->ll_status = LS_PARTIAL;
+ else
+ lo->ll_status = LS_COMPLETED;
+ if (!(bk->lb_param & LPF_DRYRUN))
+ lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
+ lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
+ lo->ll_success_count++;
+ } else if (rc == 0) {
+ if (lfsck->li_paused)
+ lo->ll_status = LS_PAUSED;
+ else
+ lo->ll_status = LS_STOPPED;
+ } else {
+ lo->ll_status = LS_FAILED;
+ }
+
+ if (lo->ll_status != LS_PAUSED) {
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
+ }
+
+ rc = lfsck_layout_store(env, com);
+
+ up_write(&com->lc_sem);
+
+ return rc;
+}
+
+static int lfsck_layout_assistant(void *args)
+{
+ struct lfsck_thread_args *lta = args;
+ struct lu_env *env = <a->lta_env;
+ struct lfsck_component *com = lta->lta_com;
+ struct lfsck_instance *lfsck = lta->lta_lfsck;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct lfsck_position *pos = &com->lc_pos_start;
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_request *lr = &info->lti_lr;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct ptlrpc_thread *mthread = &lfsck->li_thread;
+ struct ptlrpc_thread *athread = &llmd->llmd_thread;
+ struct lfsck_layout_req *llr;
+ struct l_wait_info lwi = { 0 };
+ int rc = 0;
+ int rc1 = 0;
+ ENTRY;
+
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_event = LE_START;
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
+ LSV_ASYNC_WINDOWS;
+ lr->lr_speed = bk->lb_speed_limit;
+ lr->lr_version = bk->lb_version;
+ lr->lr_active = LT_LAYOUT;
+ lr->lr_param = bk->lb_param;
+ lr->lr_async_windows = bk->lb_async_windows;
+ if (pos->lp_oit_cookie <= 1)
+ lr->lr_param |= LPF_RESET;
+
+ rc = lfsck_layout_master_notify_others(env, com, lr);
+ if (rc != 0) {
+ CERROR("%s: fail to notify others for layout start: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc);
+ GOTO(fini, rc);
+ }
+
+ spin_lock(&llmd->llmd_lock);
+ thread_set_flags(athread, SVC_RUNNING);
+ spin_unlock(&llmd->llmd_lock);
+ wake_up_all(&mthread->t_ctl_waitq);
+
+ while (1) {
+ while (!list_empty(&llmd->llmd_req_list)) {
+ bool wakeup = false;
+
+ l_wait_event(athread->t_ctl_waitq,
+ bk->lb_async_windows == 0 ||
+ atomic_read(&llmd->llmd_rpcs_in_flight) <
+ bk->lb_async_windows ||
+ llmd->llmd_exit,
+ &lwi);
+
+ if (unlikely(llmd->llmd_exit))
+ GOTO(cleanup1, rc = llmd->llmd_post_result);
+
+ /* XXX: To be extended in other patch.
+ *
+ * Compare the OST side attribute with local attribute,
+ * and fix it if found inconsistency. */
+
+ spin_lock(&llmd->llmd_lock);
+ llr = list_entry(llmd->llmd_req_list.next,
+ struct lfsck_layout_req,
+ llr_list);
+ list_del_init(&llr->llr_list);
+ if (bk->lb_async_windows != 0 &&
+ llmd->llmd_prefetched >= bk->lb_async_windows)
+ wakeup = true;
+
+ llmd->llmd_prefetched--;
+ spin_unlock(&llmd->llmd_lock);
+ if (wakeup)
+ wake_up_all(&mthread->t_ctl_waitq);
+
+ lfsck_layout_req_fini(env, llr);
+ }
+
+ /* Wakeup the master engine if it is waiting in checkpoint. */
+ if (atomic_read(&llmd->llmd_rpcs_in_flight) == 0)
+ wake_up_all(&mthread->t_ctl_waitq);
+
+ l_wait_event(athread->t_ctl_waitq,
+ !lfsck_layout_req_empty(llmd) ||
+ llmd->llmd_exit ||
+ llmd->llmd_to_post ||
+ llmd->llmd_to_double_scan,
+ &lwi);
+
+ if (unlikely(llmd->llmd_exit))
+ GOTO(cleanup1, rc = llmd->llmd_post_result);
+
+ if (!list_empty(&llmd->llmd_req_list))
+ continue;
+
+ if (llmd->llmd_to_post) {
+ llmd->llmd_to_post = 0;
+ LASSERT(llmd->llmd_post_result > 0);
+
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_active = LT_LAYOUT;
+ lr->lr_event = LE_PHASE1_DONE;
+ lr->lr_status = llmd->llmd_post_result;
+ rc = lfsck_layout_master_notify_others(env, com, lr);
+ if (rc != 0)
+ CERROR("%s: failed to notify others "
+ "for layout post: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc);
+
+ /* Wakeup the master engine to go ahead. */
+ wake_up_all(&mthread->t_ctl_waitq);
+ }
+
+ if (llmd->llmd_to_double_scan) {
+ llmd->llmd_to_double_scan = 0;
+ atomic_inc(&lfsck->li_double_scan_count);
+ llmd->llmd_in_double_scan = 1;
+ wake_up_all(&mthread->t_ctl_waitq);
+
+ while (llmd->llmd_in_double_scan) {
+ rc = lfsck_layout_master_query_others(env, com);
+ if (lfsck_layout_master_to_orphan(llmd))
+ goto orphan;
+
+ if (rc < 0)
+ GOTO(cleanup2, rc);
+
+ /* Pull LFSCK status on related targets once
+ * per 30 seconds if we are not notified. */
+ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
+ cfs_time_seconds(1),
+ NULL, NULL);
+ rc = l_wait_event(athread->t_ctl_waitq,
+ lfsck_layout_master_to_orphan(llmd) ||
+ llmd->llmd_exit ||
+ !thread_is_running(mthread),
+ &lwi);
+
+ if (unlikely(llmd->llmd_exit ||
+ !thread_is_running(mthread)))
+ GOTO(cleanup2, rc = 0);
+
+ if (rc == -ETIMEDOUT)
+ continue;
+
+ if (rc < 0)
+ GOTO(cleanup2, rc);
+
+orphan:
+ /* XXX: real double scan for ost orphans. */
+
+ GOTO(cleanup2, rc = 1);
+ }
+ }
+ }
+
+cleanup1:
+ /* Cleanup the unfinished requests. */
+ spin_lock(&llmd->llmd_lock);
+ while (!list_empty(&llmd->llmd_req_list)) {
+ llr = list_entry(llmd->llmd_req_list.next,
+ struct lfsck_layout_req,
+ llr_list);
+ list_del_init(&llr->llr_list);
+ llmd->llmd_prefetched--;
+ spin_unlock(&llmd->llmd_lock);
+ lfsck_layout_req_fini(env, llr);
+ spin_lock(&llmd->llmd_lock);
+ }
+ spin_unlock(&llmd->llmd_lock);
+
+ LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
+ llmd->llmd_prefetched);
+
+ l_wait_event(athread->t_ctl_waitq,
+ atomic_read(&llmd->llmd_rpcs_in_flight) == 0,
+ &lwi);
+
+cleanup2:
+ memset(lr, 0, sizeof(*lr));
+ lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_active = LT_LAYOUT;
+ if (rc > 0) {
+ lr->lr_event = LE_PHASE2_DONE;
+ lr->lr_status = rc;
+ } else if (rc == 0) {
+ lr->lr_event = LE_STOP;
+ if (lfsck->li_paused)
+ lr->lr_status = LS_CO_PAUSED;
+ else
+ lr->lr_status = LS_CO_STOPPED;
+ } else {
+ lr->lr_event = LE_STOP;
+ lr->lr_status = LS_CO_FAILED;
+ }
+
+ rc1 = lfsck_layout_master_notify_others(env, com, lr);
+ if (rc1 != 0) {
+ CERROR("%s: failed to notify others for layout quit: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc1);
+ rc = rc1;
+ }
+
+ /* Under force exit case, some requests may be just freed without
+ * verification, those objects should be re-handled when next run.
+ * So not update the on-disk tracing file under such case. */
+ if (!llmd->llmd_exit)
+ rc1 = lfsck_layout_double_scan_result(env, com, rc);
+
+fini:
+ if (llmd->llmd_in_double_scan)
+ atomic_dec(&lfsck->li_double_scan_count);
+
+ spin_lock(&llmd->llmd_lock);
+ llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
+ thread_set_flags(athread, SVC_STOPPED);
+ wake_up_all(&mthread->t_ctl_waitq);
+ spin_unlock(&llmd->llmd_lock);
+ lfsck_thread_args_fini(lta);
+
+ return rc;
+}
+