4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2017, Intel Corporation.
26 * lustre/osd-zfs/osd_scrub.c
28 * Top-level entry points into osd module
30 * The OI scrub is used for rebuilding Object Index files when restores MDT from
33 * The otable based iterator scans ZFS objects to feed up layer LFSCK.
35 * Author: Fan Yong <fan.yong@intel.com>
38 #define DEBUG_SUBSYSTEM S_LFSCK
40 #include <linux/kthread.h>
41 #include <uapi/linux/lustre/lustre_idl.h>
42 #include <lustre_disk.h>
43 #include <dt_object.h>
44 #include <linux/xattr.h>
45 #include <lustre_scrub.h>
46 #include <obd_class.h>
47 #include <lustre_nodemap.h>
48 #include <sys/dsl_dataset.h>
49 #include <sys/zap_impl.h>
51 #include <sys/zap_leaf.h>
53 #include "osd_internal.h"
55 #define OSD_OTABLE_MAX_HASH ((1ULL << 48) - 1)
56 #define OTABLE_PREFETCH 256
58 #define DTO_INDEX_INSERT 1
59 #define DTO_INDEX_DELETE 2
60 #define DTO_INDEX_UPDATE 3
62 static inline bool osd_scrub_has_window(struct osd_otable_it *it)
64 return it->ooi_prefetched < OTABLE_PREFETCH;
68 * update/insert/delete the specified OI mapping (@fid @id) according to the ops
70 * \retval 1, changed nothing
71 * \retval 0, changed successfully
72 * \retval -ve, on error
74 static int osd_scrub_refresh_mapping(const struct lu_env *env,
75 struct osd_device *dev,
76 const struct lu_fid *fid,
77 uint64_t oid, int ops,
78 bool force, const char *name)
80 struct osd_thread_info *info = osd_oti_get(env);
81 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
82 char *buf = info->oti_str;
89 if (dev->od_scrub.os_file.sf_param & SP_DRYRUN && !force)
92 tx = dmu_tx_create(dev->od_os);
94 GOTO(log, rc = -ENOMEM);
96 zapid = osd_get_name_n_idx(env, dev, fid, buf,
97 sizeof(info->oti_str), &dn);
98 osd_tx_hold_zap(tx, zapid, dn,
99 ops == DTO_INDEX_INSERT ? TRUE : FALSE, NULL);
100 rc = -dmu_tx_assign(tx, TXG_WAIT);
107 case DTO_INDEX_UPDATE:
109 zde->zde_dnode = oid;
110 zde->zde_type = 0; /* The type in OI mapping is useless. */
111 rc = -zap_update(dev->od_os, zapid, buf, 8, sizeof(*zde) / 8,
113 if (unlikely(rc == -ENOENT)) {
114 /* Some unlink thread may removed the OI mapping. */
118 case DTO_INDEX_INSERT:
120 zde->zde_dnode = oid;
121 zde->zde_type = 0; /* The type in OI mapping is useless. */
122 rc = osd_zap_add(dev, zapid, dn, buf, 8, sizeof(*zde) / 8,
124 if (unlikely(rc == -EEXIST))
127 case DTO_INDEX_DELETE:
128 rc = osd_zap_remove(dev, zapid, dn, buf, tx);
130 /* It is normal that the unlink thread has removed the
131 * OI mapping already. */
136 LASSERTF(0, "Unexpected ops %d\n", ops);
145 CDEBUG(D_LFSCK, "%s: refresh OI map for scrub, op %d, force %s, "
146 DFID" => %llu (%s): rc = %d\n", osd_name(dev), ops,
147 force ? "yes" : "no", PFID(fid), oid, name ? name : "null", rc);
153 osd_scrub_check_update(const struct lu_env *env, struct osd_device *dev,
154 const struct lu_fid *fid, uint64_t oid, int val)
156 struct lustre_scrub *scrub = &dev->od_scrub;
157 struct scrub_file *sf = &scrub->os_file;
158 struct osd_inconsistent_item *oii = NULL;
159 nvlist_t *nvbuf = NULL;
162 int ops = DTO_INDEX_UPDATE;
166 down_write(&scrub->os_rwsem);
167 scrub->os_new_checked++;
171 if (scrub->os_in_prior)
172 oii = list_entry(scrub->os_inconsistent_items.next,
173 struct osd_inconsistent_item, oii_list);
175 if (oid < sf->sf_pos_latest_start && !oii)
178 if (oii && oii->oii_insert) {
179 ops = DTO_INDEX_INSERT;
183 rc = osd_fid_lookup(env, dev, fid, &oid2);
188 ops = DTO_INDEX_INSERT;
191 rc = __osd_obj2dnode(dev->od_os, oid, &dn);
193 /* Someone removed the object by race. */
194 if (rc == -ENOENT || rc == -EEXIST)
199 scrub->os_full_speed = 1;
200 sf->sf_flags |= SF_INCONSISTENT;
201 } else if (oid == oid2) {
204 struct lustre_mdt_attrs *lma = NULL;
207 rc = __osd_xattr_load_by_oid(dev, oid2, &nvbuf);
208 if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
213 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
214 (uchar_t **)&lma, &size);
215 if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
220 lustre_lma_swab(lma);
221 if (unlikely(lu_fid_eq(&lma->lma_self_fid, fid))) {
222 CDEBUG(D_LFSCK, "%s: the FID "DFID" is used by "
223 "two objects: %llu and %llu (in OI)\n",
224 osd_name(dev), PFID(fid), oid, oid2);
226 GOTO(out, rc = -EEXIST);
230 scrub->os_full_speed = 1;
231 sf->sf_flags |= SF_INCONSISTENT;
234 rc = osd_scrub_refresh_mapping(env, dev, fid, oid, ops, false, NULL);
236 if (scrub->os_in_prior)
237 sf->sf_items_updated_prior++;
239 sf->sf_items_updated++;
249 sf->sf_items_failed++;
250 if (sf->sf_pos_first_inconsistent == 0 ||
251 sf->sf_pos_first_inconsistent > oid)
252 sf->sf_pos_first_inconsistent = oid;
257 /* There may be conflict unlink during the OI scrub,
258 * if happend, then remove the new added OI mapping. */
259 if (ops == DTO_INDEX_INSERT && dn && dn->dn_free_txg)
260 osd_scrub_refresh_mapping(env, dev, fid, oid,
261 DTO_INDEX_DELETE, false, NULL);
262 up_write(&scrub->os_rwsem);
268 spin_lock(&scrub->os_lock);
269 if (likely(!list_empty(&oii->oii_list)))
270 list_del(&oii->oii_list);
271 spin_unlock(&scrub->os_lock);
275 RETURN(sf->sf_param & SP_FAILOUT ? rc : 0);
278 static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
280 struct lustre_scrub *scrub = &dev->od_scrub;
281 struct ptlrpc_thread *thread = &scrub->os_thread;
282 struct scrub_file *sf = &scrub->os_file;
283 __u32 flags = scrub->os_start_flags;
285 bool drop_dryrun = false;
288 CDEBUG(D_LFSCK, "%s: OI scrub prep, flags = 0x%x\n",
289 scrub->os_name, flags);
291 down_write(&scrub->os_rwsem);
292 if (flags & SS_SET_FAILOUT)
293 sf->sf_param |= SP_FAILOUT;
294 else if (flags & SS_CLEAR_FAILOUT)
295 sf->sf_param &= ~SP_FAILOUT;
297 if (flags & SS_SET_DRYRUN) {
298 sf->sf_param |= SP_DRYRUN;
299 } else if (flags & SS_CLEAR_DRYRUN && sf->sf_param & SP_DRYRUN) {
300 sf->sf_param &= ~SP_DRYRUN;
304 if (flags & SS_RESET)
305 scrub_file_reset(scrub, dev->od_uuid, 0);
307 scrub->os_partial_scan = 0;
308 if (flags & SS_AUTO_FULL) {
309 scrub->os_full_speed = 1;
310 sf->sf_flags |= SF_AUTO;
311 } else if (sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
313 scrub->os_full_speed = 1;
315 scrub->os_full_speed = 0;
318 spin_lock(&scrub->os_lock);
319 scrub->os_in_prior = 0;
320 scrub->os_waiting = 0;
321 scrub->os_paused = 0;
322 scrub->os_in_join = 0;
323 scrub->os_full_scrub = 0;
324 spin_unlock(&scrub->os_lock);
325 scrub->os_new_checked = 0;
326 if (drop_dryrun && sf->sf_pos_first_inconsistent != 0)
327 sf->sf_pos_latest_start = sf->sf_pos_first_inconsistent;
328 else if (sf->sf_pos_last_checkpoint != 0)
329 sf->sf_pos_latest_start = sf->sf_pos_last_checkpoint + 1;
331 sf->sf_pos_latest_start = 1;
333 scrub->os_pos_current = sf->sf_pos_latest_start;
334 sf->sf_status = SS_SCANNING;
335 sf->sf_time_latest_start = ktime_get_real_seconds();
336 sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
337 sf->sf_pos_last_checkpoint = sf->sf_pos_latest_start - 1;
338 rc = scrub_file_store(env, scrub);
340 spin_lock(&scrub->os_lock);
341 thread_set_flags(thread, SVC_RUNNING);
342 spin_unlock(&scrub->os_lock);
343 wake_up_all(&thread->t_ctl_waitq);
345 up_write(&scrub->os_rwsem);
350 static int osd_scrub_post(const struct lu_env *env, struct osd_device *dev,
353 struct lustre_scrub *scrub = &dev->od_scrub;
354 struct scrub_file *sf = &scrub->os_file;
358 CDEBUG(D_LFSCK, "%s: OI scrub post with result = %d\n",
359 scrub->os_name, result);
361 down_write(&scrub->os_rwsem);
362 spin_lock(&scrub->os_lock);
363 thread_set_flags(&scrub->os_thread, SVC_STOPPING);
364 spin_unlock(&scrub->os_lock);
365 if (scrub->os_new_checked > 0) {
366 sf->sf_items_checked += scrub->os_new_checked;
367 scrub->os_new_checked = 0;
368 sf->sf_pos_last_checkpoint = scrub->os_pos_current;
370 sf->sf_time_last_checkpoint = ktime_get_real_seconds();
372 sf->sf_status = SS_COMPLETED;
373 if (!(sf->sf_param & SP_DRYRUN)) {
374 memset(sf->sf_oi_bitmap, 0, SCRUB_OI_BITMAP_SIZE);
375 sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
376 SF_UPGRADE | SF_AUTO);
378 sf->sf_time_last_complete = sf->sf_time_last_checkpoint;
379 sf->sf_success_count++;
380 } else if (result == 0) {
381 if (scrub->os_paused)
382 sf->sf_status = SS_PAUSED;
384 sf->sf_status = SS_STOPPED;
386 sf->sf_status = SS_FAILED;
388 sf->sf_run_time += ktime_get_seconds() -
389 scrub->os_time_last_checkpoint;
391 rc = scrub_file_store(env, scrub);
392 up_write(&scrub->os_rwsem);
394 RETURN(rc < 0 ? rc : result);
397 /* iteration engine */
400 osd_scrub_wakeup(struct lustre_scrub *scrub, struct osd_otable_it *it)
402 spin_lock(&scrub->os_lock);
403 if (osd_scrub_has_window(it) ||
404 !list_empty(&scrub->os_inconsistent_items) ||
405 it->ooi_waiting || !thread_is_running(&scrub->os_thread))
406 scrub->os_waiting = 0;
408 scrub->os_waiting = 1;
409 spin_unlock(&scrub->os_lock);
411 return !scrub->os_waiting;
414 static int osd_scrub_next(const struct lu_env *env, struct osd_device *dev,
415 struct lu_fid *fid, uint64_t *oid)
417 struct l_wait_info lwi = { 0 };
418 struct lustre_scrub *scrub = &dev->od_scrub;
419 struct ptlrpc_thread *thread = &scrub->os_thread;
420 struct osd_otable_it *it = dev->od_otable_it;
421 struct lustre_mdt_attrs *lma = NULL;
422 nvlist_t *nvbuf = NULL;
427 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
428 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL);
429 if (likely(lwi.lwi_timeout > 0)) {
430 l_wait_event(thread->t_ctl_waitq,
431 !list_empty(&scrub->os_inconsistent_items) ||
432 !thread_is_running(thread),
434 if (unlikely(!thread_is_running(thread)))
435 RETURN(SCRUB_NEXT_EXIT);
439 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
440 spin_lock(&scrub->os_lock);
441 thread_set_flags(thread, SVC_STOPPING);
442 spin_unlock(&scrub->os_lock);
443 RETURN(SCRUB_NEXT_CRASH);
446 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_FATAL))
447 RETURN(SCRUB_NEXT_FATAL);
456 if (!list_empty(&scrub->os_inconsistent_items)) {
457 spin_lock(&scrub->os_lock);
458 if (likely(!list_empty(&scrub->os_inconsistent_items))) {
459 struct osd_inconsistent_item *oii;
461 oii = list_entry(scrub->os_inconsistent_items.next,
462 struct osd_inconsistent_item, oii_list);
463 *fid = oii->oii_cache.oic_fid;
464 *oid = oii->oii_cache.oic_dnode;
465 scrub->os_in_prior = 1;
466 spin_unlock(&scrub->os_lock);
470 spin_unlock(&scrub->os_lock);
473 if (!scrub->os_full_speed && !osd_scrub_has_window(it)) {
474 memset(&lwi, 0, sizeof(lwi));
475 l_wait_event(thread->t_ctl_waitq,
476 osd_scrub_wakeup(scrub, it),
480 if (unlikely(!thread_is_running(thread)))
481 GOTO(out, rc = SCRUB_NEXT_EXIT);
483 rc = -dmu_object_next(dev->od_os, &scrub->os_pos_current, B_FALSE, 0);
485 GOTO(out, rc = (rc == -ESRCH ? SCRUB_NEXT_BREAK : rc));
487 rc = __osd_xattr_load_by_oid(dev, scrub->os_pos_current, &nvbuf);
488 if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
494 LASSERT(nvbuf != NULL);
495 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
496 (uchar_t **)&lma, &size);
498 lustre_lma_swab(lma);
499 if (likely(!(lma->lma_compat & LMAC_NOT_IN_OI) &&
500 !(lma->lma_incompat & LMAI_AGENT))) {
501 *fid = lma->lma_self_fid;
502 *oid = scrub->os_pos_current;
508 if (!scrub->os_full_speed) {
509 spin_lock(&scrub->os_lock);
510 it->ooi_prefetched++;
511 if (it->ooi_waiting) {
513 wake_up_all(&thread->t_ctl_waitq);
515 spin_unlock(&scrub->os_lock);
527 static int osd_scrub_exec(const struct lu_env *env, struct osd_device *dev,
528 const struct lu_fid *fid, uint64_t oid, int rc)
530 struct lustre_scrub *scrub = &dev->od_scrub;
531 struct ptlrpc_thread *thread = &scrub->os_thread;
532 struct osd_otable_it *it = dev->od_otable_it;
534 rc = osd_scrub_check_update(env, dev, fid, oid, rc);
535 if (!scrub->os_in_prior) {
536 if (!scrub->os_full_speed) {
537 spin_lock(&scrub->os_lock);
538 it->ooi_prefetched++;
539 if (it->ooi_waiting) {
541 wake_up_all(&thread->t_ctl_waitq);
543 spin_unlock(&scrub->os_lock);
546 scrub->os_in_prior = 0;
552 rc = scrub_checkpoint(env, scrub);
554 CDEBUG(D_LFSCK, "%s: fail to checkpoint, pos = %llu: "
555 "rc = %d\n", scrub->os_name, scrub->os_pos_current, rc);
556 /* Continue, as long as the scrub itself can go ahead. */
562 static int osd_scrub_main(void *args)
565 struct osd_device *dev = (struct osd_device *)args;
566 struct lustre_scrub *scrub = &dev->od_scrub;
567 struct ptlrpc_thread *thread = &scrub->os_thread;
573 rc = lu_env_init(&env, LCT_LOCAL | LCT_DT_THREAD);
575 CDEBUG(D_LFSCK, "%s: OI scrub fail to init env: rc = %d\n",
580 rc = osd_scrub_prep(&env, dev);
582 CDEBUG(D_LFSCK, "%s: OI scrub fail to scrub prep: rc = %d\n",
587 if (!scrub->os_full_speed) {
588 struct l_wait_info lwi = { 0 };
589 struct osd_otable_it *it = dev->od_otable_it;
591 l_wait_event(thread->t_ctl_waitq,
592 it->ooi_user_ready || !thread_is_running(thread),
594 if (unlikely(!thread_is_running(thread)))
597 scrub->os_pos_current = it->ooi_pos;
600 CDEBUG(D_LFSCK, "%s: OI scrub start, flags = 0x%x, pos = %llu\n",
601 scrub->os_name, scrub->os_start_flags,
602 scrub->os_pos_current);
604 fid = &osd_oti_get(&env)->oti_fid;
605 while (!rc && thread_is_running(thread)) {
606 rc = osd_scrub_next(&env, dev, fid, &oid);
608 case SCRUB_NEXT_EXIT:
610 case SCRUB_NEXT_CRASH:
611 spin_lock(&scrub->os_lock);
612 thread_set_flags(&scrub->os_thread, SVC_STOPPING);
613 spin_unlock(&scrub->os_lock);
614 GOTO(out, rc = -EINVAL);
615 case SCRUB_NEXT_FATAL:
616 GOTO(post, rc = -EINVAL);
617 case SCRUB_NEXT_BREAK:
621 rc = osd_scrub_exec(&env, dev, fid, oid, rc);
627 rc = osd_scrub_post(&env, dev, rc);
628 CDEBUG(D_LFSCK, "%s: OI scrub: stop, pos = %llu: rc = %d\n",
629 scrub->os_name, scrub->os_pos_current, rc);
632 while (!list_empty(&scrub->os_inconsistent_items)) {
633 struct osd_inconsistent_item *oii;
635 oii = list_entry(scrub->os_inconsistent_items.next,
636 struct osd_inconsistent_item, oii_list);
637 list_del_init(&oii->oii_list);
644 spin_lock(&scrub->os_lock);
645 thread_set_flags(thread, SVC_STOPPED);
646 wake_up_all(&thread->t_ctl_waitq);
647 spin_unlock(&scrub->os_lock);
651 /* initial OI scrub */
655 typedef int (*handle_dirent_t)(const struct lu_env *, struct osd_device *,
656 const char *, uint64_t, uint64_t,
657 enum osd_lf_flags, bool);
658 static int osd_ios_varfid_hd(const struct lu_env *, struct osd_device *,
659 const char *, uint64_t, uint64_t,
660 enum osd_lf_flags, bool);
661 static int osd_ios_uld_hd(const struct lu_env *, struct osd_device *,
662 const char *, uint64_t, uint64_t,
663 enum osd_lf_flags, bool);
665 typedef int (*scan_dir_t)(const struct lu_env *, struct osd_device *,
666 uint64_t, handle_dirent_t, enum osd_lf_flags);
667 static int osd_ios_general_sd(const struct lu_env *, struct osd_device *,
668 uint64_t, handle_dirent_t, enum osd_lf_flags);
669 static int osd_ios_ROOT_sd(const struct lu_env *, struct osd_device *,
670 uint64_t, handle_dirent_t, enum osd_lf_flags);
674 struct lu_fid olm_fid;
675 enum osd_lf_flags olm_flags;
676 scan_dir_t olm_scan_dir;
677 handle_dirent_t olm_handle_dirent;
680 /* Add the new introduced local files in the list in the future. */
681 static const struct osd_lf_map osd_lf_maps[] = {
684 .olm_name = MOUNT_CONFIGS_DIR,
686 .f_seq = FID_SEQ_LOCAL_FILE,
687 .f_oid = MGS_CONFIGS_OID,
689 .olm_flags = OLF_SCAN_SUBITEMS,
690 .olm_scan_dir = osd_ios_general_sd,
691 .olm_handle_dirent = osd_ios_varfid_hd,
694 /* NIDTBL_VERSIONS */
696 .olm_name = MGS_NIDTBL_DIR,
697 .olm_flags = OLF_SCAN_SUBITEMS,
698 .olm_scan_dir = osd_ios_general_sd,
699 .olm_handle_dirent = osd_ios_varfid_hd,
704 .olm_name = MDT_ORPHAN_DIR,
711 .f_seq = FID_SEQ_ROOT,
712 .f_oid = FID_OID_ROOT,
714 .olm_flags = OLF_SCAN_SUBITEMS,
715 .olm_scan_dir = osd_ios_ROOT_sd,
722 .f_seq = FID_SEQ_LOCAL_FILE,
723 .f_oid = FLD_INDEX_OID,
727 /* changelog_catalog */
729 .olm_name = CHANGELOG_CATALOG,
732 /* changelog_users */
734 .olm_name = CHANGELOG_USERS,
740 .olm_flags = OLF_SCAN_SUBITEMS,
741 .olm_scan_dir = osd_ios_general_sd,
742 .olm_handle_dirent = osd_ios_varfid_hd,
748 .olm_flags = OLF_SCAN_SUBITEMS,
749 .olm_scan_dir = osd_ios_general_sd,
750 .olm_handle_dirent = osd_ios_varfid_hd,
755 .olm_name = LFSCK_DIR,
756 .olm_flags = OLF_SCAN_SUBITEMS | OLF_NOT_BACKUP,
757 .olm_scan_dir = osd_ios_general_sd,
758 .olm_handle_dirent = osd_ios_varfid_hd,
763 .olm_name = LFSCK_BOOKMARK,
768 .olm_name = LFSCK_LAYOUT,
771 /* lfsck_namespace */
773 .olm_name = LFSCK_NAMESPACE,
776 /* OSP update logs update_log{_dir} use f_seq = FID_SEQ_UPDATE_LOG{_DIR}
777 * and f_oid = index for their log files. See lu_update_log{_dir}_fid()
778 * for more details. */
782 .olm_name = "update_log",
784 .f_seq = FID_SEQ_UPDATE_LOG,
786 .olm_flags = OLF_IDX_IN_FID,
791 .olm_name = "update_log_dir",
793 .f_seq = FID_SEQ_UPDATE_LOG_DIR,
795 .olm_flags = OLF_SCAN_SUBITEMS | OLF_IDX_IN_FID,
796 .olm_scan_dir = osd_ios_general_sd,
797 .olm_handle_dirent = osd_ios_uld_hd,
802 .olm_name = HSM_ACTIONS,
807 .olm_name = LUSTRE_NODEMAP_NAME,
812 .olm_name = INDEX_BACKUP_DIR,
814 .f_seq = FID_SEQ_LOCAL_FILE,
815 .f_oid = INDEX_BACKUP_OID,
817 .olm_flags = OLF_SCAN_SUBITEMS | OLF_NOT_BACKUP,
818 .olm_scan_dir = osd_ios_general_sd,
819 .olm_handle_dirent = osd_ios_varfid_hd,
827 /* Add the new introduced files under .lustre/ in the list in the future. */
828 static const struct osd_lf_map osd_dl_maps[] = {
833 .f_seq = FID_SEQ_DOT_LUSTRE,
834 .f_oid = FID_OID_DOT_LUSTRE_OBF,
838 /* .lustre/lost+found */
840 .olm_name = "lost+found",
842 .f_seq = FID_SEQ_DOT_LUSTRE,
843 .f_oid = FID_OID_DOT_LUSTRE_LPF,
852 struct osd_ios_item {
853 struct list_head oii_list;
855 enum osd_lf_flags oii_flags;
856 scan_dir_t oii_scan_dir;
857 handle_dirent_t oii_handle_dirent;
860 static int osd_ios_new_item(struct osd_device *dev, uint64_t parent,
861 enum osd_lf_flags flags, scan_dir_t scan_dir,
862 handle_dirent_t handle_dirent)
864 struct osd_ios_item *item;
868 CWARN("%s: initial OI scrub failed to add item for %llu\n",
869 osd_name(dev), parent);
873 INIT_LIST_HEAD(&item->oii_list);
874 item->oii_parent = parent;
875 item->oii_flags = flags;
876 item->oii_scan_dir = scan_dir;
877 item->oii_handle_dirent = handle_dirent;
878 list_add_tail(&item->oii_list, &dev->od_ios_list);
883 static bool osd_index_need_recreate(const struct lu_env *env,
884 struct osd_device *dev, uint64_t oid)
886 struct osd_thread_info *info = osd_oti_get(env);
887 zap_attribute_t *za = &info->oti_za2;
888 zap_cursor_t *zc = &info->oti_zc2;
892 zap_cursor_init_serialized(zc, dev->od_os, oid, 0);
893 rc = -zap_cursor_retrieve(zc, za);
895 if (rc && rc != -ENOENT)
901 static void osd_ios_index_register(const struct lu_env *env,
902 struct osd_device *osd,
903 const struct lu_fid *fid, uint64_t oid)
905 struct osd_thread_info *info = osd_oti_get(env);
906 zap_attribute_t *za = &info->oti_za2;
907 zap_cursor_t *zc = &info->oti_zc2;
908 struct zap_leaf_entry *le;
917 rc = __osd_obj2dnode(osd->od_os, oid, &dn);
918 if (rc == -EEXIST || rc == -ENOENT)
924 if (!osd_object_is_zap(dn))
927 rc = -sa_handle_get(osd->od_os, oid, NULL, SA_HDL_PRIVATE, &hdl);
931 rc = -sa_lookup(hdl, SA_ZPL_MODE(osd), &mode, sizeof(mode));
932 sa_handle_destroy(hdl);
939 zap_cursor_init_serialized(zc, osd->od_os, oid, 0);
940 rc = -zap_cursor_retrieve(zc, za);
942 /* Skip empty index object */
943 GOTO(fini, rc = (rc == -ENOENT ? 1 : rc));
945 if (zc->zc_zap->zap_ismicro ||
946 !(zap_f_phys(zc->zc_zap)->zap_flags & ZAP_FLAG_UINT64_KEY))
949 le = ZAP_LEAF_ENTRY(zc->zc_leaf, 0);
950 keysize = le->le_name_numints * 8;
951 recsize = za->za_integer_length * za->za_num_integers;
952 if (likely(keysize && recsize))
953 rc = osd_index_register(osd, fid, keysize, recsize);
964 CWARN("%s: failed to register index "DFID" (%u/%u): rc = %d\n",
965 osd_name(osd), PFID(fid), keysize, recsize, rc);
967 CDEBUG(D_LFSCK, "%s: registered index "DFID" (%u/%u)\n",
968 osd_name(osd), PFID(fid), keysize, recsize);
971 static void osd_index_restore(const struct lu_env *env, struct osd_device *dev,
972 struct lustre_index_restore_unit *liru, void *buf,
975 struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
976 struct lu_fid *tgt_fid = &liru->liru_cfid;
977 struct lu_fid bak_fid;
981 lustre_fid2lbx(buf, tgt_fid, bufsize);
982 rc = -zap_lookup(dev->od_os, dev->od_index_backup_id, buf, 8,
983 sizeof(*zde) / 8, (void *)zde);
987 rc = osd_get_fid_by_oid(env, dev, zde->lzd_reg.zde_dnode, &bak_fid);
991 /* The OI mapping for index may be invalid, since it will be
992 * re-created, not update the OI mapping, just cache it in RAM. */
993 rc = osd_idc_find_and_init_with_oid(env, dev, tgt_fid,
996 rc = lustre_index_restore(env, &dev->od_dt_dev,
997 &liru->liru_pfid, tgt_fid, &bak_fid,
998 liru->liru_name, &dev->od_index_backup_list,
999 &dev->od_lock, buf, bufsize);
1003 CDEBUG(D_WARNING, "%s: restore index '%s' with "DFID": rc = %d\n",
1004 osd_name(dev), liru->liru_name, PFID(tgt_fid), rc);
1008 * verify FID-in-LMA and OI entry for one object
1010 * ios: Initial OI Scrub.
1012 static int osd_ios_scan_one(const struct lu_env *env, struct osd_device *dev,
1013 const struct lu_fid *fid, uint64_t parent,
1014 uint64_t oid, const char *name,
1015 enum osd_lf_flags flags)
1017 struct lustre_scrub *scrub = &dev->od_scrub;
1018 struct scrub_file *sf = &scrub->os_file;
1019 struct lustre_mdt_attrs *lma = NULL;
1020 nvlist_t *nvbuf = NULL;
1029 rc = __osd_xattr_load_by_oid(dev, oid, &nvbuf);
1030 if (unlikely(rc == -ENOENT || rc == -EEXIST))
1033 if (rc && rc != -ENODATA) {
1034 CWARN("%s: initial OI scrub failed to get lma for %llu: "
1035 "rc = %d\n", osd_name(dev), oid, rc);
1041 LASSERT(nvbuf != NULL);
1042 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
1043 (uchar_t **)&lma, &size);
1044 if (rc || size == 0) {
1045 LASSERT(lma == NULL);
1048 LASSERTF(lma != NULL, "corrupted LMA, size %d\n", size);
1049 lustre_lma_swab(lma);
1050 if (lma->lma_compat & LMAC_NOT_IN_OI) {
1055 if (lma->lma_compat & LMAC_IDX_BACKUP &&
1056 osd_index_need_recreate(env, dev, oid)) {
1057 if (parent == dev->od_root) {
1058 lu_local_obj_fid(&tfid,
1061 rc = osd_get_fid_by_oid(env, dev,
1069 rc = lustre_liru_new(
1070 &dev->od_index_restore_list,
1071 &tfid, &lma->lma_self_fid, oid,
1072 name, strlen(name));
1077 tfid = lma->lma_self_fid;
1078 if (!(flags & OLF_NOT_BACKUP))
1079 osd_ios_index_register(env, dev, &tfid, oid);
1084 if (rc == -ENODATA) {
1086 /* Skip the object without FID-in-LMA */
1087 CDEBUG(D_LFSCK, "%s: %llu has no FID-in-LMA, skip it\n",
1088 osd_name(dev), oid);
1093 LASSERT(!fid_is_zero(fid));
1096 if (flags & OLF_IDX_IN_FID) {
1097 LASSERT(dev->od_index >= 0);
1099 tfid.f_oid = dev->od_index;
1103 rc = osd_fid_lookup(env, dev, &tfid, &oid2);
1105 if (rc != -ENOENT) {
1106 CWARN("%s: initial OI scrub failed to lookup fid for "
1107 DFID"=>%llu: rc = %d\n",
1108 osd_name(dev), PFID(&tfid), oid, rc);
1113 flag = SF_RECREATED;
1114 op = DTO_INDEX_INSERT;
1119 flag = SF_INCONSISTENT;
1120 op = DTO_INDEX_UPDATE;
1123 if (!(sf->sf_flags & flag)) {
1124 scrub_file_reset(scrub, dev->od_uuid, flag);
1125 rc = scrub_file_store(env, scrub);
1130 rc = osd_scrub_refresh_mapping(env, dev, &tfid, oid, op, true, name);
1132 RETURN(rc > 0 ? 0 : rc);
1135 static int osd_ios_varfid_hd(const struct lu_env *env, struct osd_device *dev,
1136 const char *name, uint64_t parent, uint64_t oid,
1137 enum osd_lf_flags flags, bool is_dir)
1142 rc = osd_ios_scan_one(env, dev, NULL, parent, oid, name, 0);
1144 rc = osd_ios_new_item(dev, oid, flags, osd_ios_general_sd,
1150 static int osd_ios_uld_hd(const struct lu_env *env, struct osd_device *dev,
1151 const char *name, uint64_t parent, uint64_t oid,
1152 enum osd_lf_flags flags, bool is_dir)
1158 /* skip any non-DFID format name */
1162 /* skip the start '[' */
1163 sscanf(&name[1], SFID, RFID(&tfid));
1164 if (fid_is_sane(&tfid))
1165 rc = osd_ios_scan_one(env, dev, &tfid, parent, oid, name, 0);
1173 * General scanner for the directories execpt /ROOT during initial OI scrub.
1174 * It scans the name entries under the given directory one by one. For each
1175 * entry, verifies its OI mapping via the given @handle_dirent.
1177 static int osd_ios_general_sd(const struct lu_env *env, struct osd_device *dev,
1178 uint64_t parent, handle_dirent_t handle_dirent,
1179 enum osd_lf_flags flags)
1181 struct osd_thread_info *info = osd_oti_get(env);
1182 struct luz_direntry *zde = &info->oti_zde;
1183 zap_attribute_t *za = &info->oti_za;
1184 zap_cursor_t *zc = &info->oti_zc;
1188 zap_cursor_init_serialized(zc, dev->od_os, parent, 0);
1189 rc = -zap_cursor_retrieve(zc, za);
1191 zap_cursor_advance(zc);
1196 rc = -zap_cursor_retrieve(zc, za);
1198 GOTO(log, rc = (rc == -ENOENT ? 0 : rc));
1200 /* skip the entry started with '.' */
1201 if (likely(za->za_name[0] != '.')) {
1202 rc = osd_zap_lookup(dev, parent, NULL, za->za_name,
1203 za->za_integer_length,
1204 sizeof(*zde) / za->za_integer_length,
1207 CWARN("%s: initial OI scrub failed to lookup "
1208 "%s under %llu: rc = %d\n",
1209 osd_name(dev), za->za_name, parent, rc);
1213 rc = handle_dirent(env, dev, za->za_name, parent,
1214 zde->lzd_reg.zde_dnode, flags,
1215 S_ISDIR(DTTOIF(zde->lzd_reg.zde_type)) ?
1217 CDEBUG(D_LFSCK, "%s: initial OI scrub handled %s under "
1219 osd_name(dev), za->za_name, parent, rc);
1222 zap_cursor_advance(zc);
1227 CWARN("%s: initial OI scrub failed to scan the directory %llu: "
1228 "rc = %d\n", osd_name(dev), parent, rc);
1229 zap_cursor_fini(zc);
1235 * The scanner for /ROOT directory. It is not all the items under /ROOT will
1236 * be scanned during the initial OI scrub, instead, only the .lustre and the
1237 * sub-items under .lustre will be handled.
1239 static int osd_ios_ROOT_sd(const struct lu_env *env, struct osd_device *dev,
1240 uint64_t parent, handle_dirent_t handle_dirent,
1241 enum osd_lf_flags flags)
1243 struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
1244 const struct osd_lf_map *map;
1250 rc = osd_zap_lookup(dev, parent, NULL, dot_lustre_name, 8,
1251 sizeof(*zde) / 8, (void *)zde);
1252 if (rc == -ENOENT) {
1253 /* The .lustre directory is lost. That is not fatal. It can
1254 * be re-created in the subsequent MDT start processing. */
1259 CWARN("%s: initial OI scrub failed to find .lustre: "
1260 "rc = %d\n", osd_name(dev), rc);
1265 oid = zde->lzd_reg.zde_dnode;
1266 rc = osd_ios_scan_one(env, dev, &LU_DOT_LUSTRE_FID, parent, oid,
1267 dot_lustre_name, 0);
1271 for (map = osd_dl_maps; map->olm_name; map++) {
1272 rc = osd_zap_lookup(dev, oid, NULL, map->olm_name, 8,
1273 sizeof(*zde) / 8, (void *)zde);
1276 CWARN("%s: initial OI scrub failed to find"
1277 "the entry %s under .lustre: rc = %d\n",
1278 osd_name(dev), map->olm_name, rc);
1279 else if (!fid_is_zero(&map->olm_fid))
1280 /* Try to remove the stale OI mapping. */
1281 osd_scrub_refresh_mapping(env, dev,
1283 DTO_INDEX_DELETE, true,
1288 rc = osd_ios_scan_one(env, dev, &map->olm_fid, oid,
1289 zde->lzd_reg.zde_dnode, map->olm_name,
1298 static void osd_initial_OI_scrub(const struct lu_env *env,
1299 struct osd_device *dev)
1301 struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
1302 const struct osd_lf_map *map;
1306 for (map = osd_lf_maps; map->olm_name; map++) {
1307 rc = osd_zap_lookup(dev, dev->od_root, NULL, map->olm_name, 8,
1308 sizeof(*zde) / 8, (void *)zde);
1311 CWARN("%s: initial OI scrub failed "
1312 "to find the entry %s: rc = %d\n",
1313 osd_name(dev), map->olm_name, rc);
1314 else if (!fid_is_zero(&map->olm_fid))
1315 /* Try to remove the stale OI mapping. */
1316 osd_scrub_refresh_mapping(env, dev,
1318 DTO_INDEX_DELETE, true,
1323 rc = osd_ios_scan_one(env, dev, &map->olm_fid, dev->od_root,
1324 zde->lzd_reg.zde_dnode, map->olm_name,
1326 if (!rc && map->olm_flags & OLF_SCAN_SUBITEMS)
1327 osd_ios_new_item(dev, zde->lzd_reg.zde_dnode,
1328 map->olm_flags, map->olm_scan_dir,
1329 map->olm_handle_dirent);
1332 while (!list_empty(&dev->od_ios_list)) {
1333 struct osd_ios_item *item;
1335 item = list_entry(dev->od_ios_list.next,
1336 struct osd_ios_item, oii_list);
1337 list_del_init(&item->oii_list);
1338 item->oii_scan_dir(env, dev, item->oii_parent,
1339 item->oii_handle_dirent, item->oii_flags);
1343 if (!list_empty(&dev->od_index_restore_list)) {
1346 OBD_ALLOC_LARGE(buf, INDEX_BACKUP_BUFSIZE);
1348 CERROR("%s: not enough RAM for rebuild index\n",
1351 while (!list_empty(&dev->od_index_restore_list)) {
1352 struct lustre_index_restore_unit *liru;
1354 liru = list_entry(dev->od_index_restore_list.next,
1355 struct lustre_index_restore_unit,
1357 list_del(&liru->liru_link);
1359 osd_index_restore(env, dev, liru, buf,
1360 INDEX_BACKUP_BUFSIZE);
1361 OBD_FREE(liru, liru->liru_len);
1365 OBD_FREE_LARGE(buf, INDEX_BACKUP_BUFSIZE);
1371 /* OI scrub start/stop */
1373 int osd_scrub_start(const struct lu_env *env, struct osd_device *dev,
1379 if (dev->od_dt_dev.dd_rdonly)
1382 /* od_otable_sem: prevent concurrent start/stop */
1383 down(&dev->od_otable_sem);
1384 rc = scrub_start(osd_scrub_main, &dev->od_scrub, dev, flags);
1385 up(&dev->od_otable_sem);
1387 RETURN(rc == -EALREADY ? 0 : rc);
1390 void osd_scrub_stop(struct osd_device *dev)
1392 struct lustre_scrub *scrub = &dev->od_scrub;
1395 /* od_otable_sem: prevent concurrent start/stop */
1396 down(&dev->od_otable_sem);
1397 scrub->os_paused = 1;
1399 up(&dev->od_otable_sem);
1404 /* OI scrub setup/cleanup */
1406 static const char osd_scrub_name[] = "OI_scrub";
1408 int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev)
1410 struct osd_thread_info *info = osd_oti_get(env);
1411 struct lustre_scrub *scrub = &dev->od_scrub;
1412 struct scrub_file *sf = &scrub->os_file;
1413 struct lu_fid *fid = &info->oti_fid;
1414 struct dt_object *obj;
1420 memcpy(dev->od_uuid.b,
1421 &dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid,
1422 sizeof(dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid));
1423 memset(&dev->od_scrub, 0, sizeof(struct lustre_scrub));
1424 init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
1425 init_rwsem(&scrub->os_rwsem);
1426 spin_lock_init(&scrub->os_lock);
1427 INIT_LIST_HEAD(&scrub->os_inconsistent_items);
1428 scrub->os_name = osd_name(dev);
1430 /* 'What the @fid is' is not imporatant, because the object
1431 * has no OI mapping, and only is visible inside the OSD.*/
1432 fid->f_seq = FID_SEQ_IGIF_MAX;
1434 fid->f_oid = ((1 << 31) | dev->od_index) + 1;
1436 fid->f_oid = dev->od_index + 1;
1438 rc = osd_obj_find_or_create(env, dev, dev->od_root,
1439 osd_scrub_name, &oid, fid, false);
1443 rc = osd_idc_find_and_init_with_oid(env, dev, fid, oid);
1447 obj = lu2dt(lu_object_find_slice(env, osd2lu_dev(dev), fid, NULL));
1448 if (IS_ERR_OR_NULL(obj))
1449 RETURN(obj ? PTR_ERR(obj) : -ENOENT);
1451 obj->do_body_ops = &osd_body_scrub_ops;
1452 scrub->os_obj = obj;
1453 rc = scrub_file_load(env, scrub);
1454 if (rc == -ENOENT || rc == -EFAULT) {
1455 scrub_file_init(scrub, dev->od_uuid);
1457 } else if (rc < 0) {
1458 GOTO(cleanup_obj, rc);
1460 if (!uuid_equal(&sf->sf_uuid, &dev->od_uuid)) {
1462 "%s: UUID has been changed from %pU to %pU\n",
1463 osd_name(dev), &sf->sf_uuid, &dev->od_uuid);
1464 scrub_file_reset(scrub, dev->od_uuid, SF_INCONSISTENT);
1466 } else if (sf->sf_status == SS_SCANNING) {
1467 sf->sf_status = SS_CRASHED;
1471 if ((sf->sf_oi_count & (sf->sf_oi_count - 1)) != 0) {
1472 LCONSOLE_WARN("%s: invalid oi count %d, set it to %d\n",
1473 osd_name(dev), sf->sf_oi_count,
1475 sf->sf_oi_count = osd_oi_count;
1480 if (sf->sf_pos_last_checkpoint != 0)
1481 scrub->os_pos_current = sf->sf_pos_last_checkpoint + 1;
1483 scrub->os_pos_current = 1;
1486 rc = scrub_file_store(env, scrub);
1488 GOTO(cleanup_obj, rc);
1491 /* Initialize OI files. */
1492 rc = osd_oi_init(env, dev);
1494 GOTO(cleanup_obj, rc);
1496 if (!dev->od_dt_dev.dd_rdonly)
1497 osd_initial_OI_scrub(env, dev);
1499 if (!dev->od_dt_dev.dd_rdonly &&
1500 dev->od_auto_scrub_interval != AS_NEVER &&
1501 ((sf->sf_status == SS_PAUSED) ||
1502 (sf->sf_status == SS_CRASHED &&
1503 sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
1504 SF_UPGRADE | SF_AUTO)) ||
1505 (sf->sf_status == SS_INIT &&
1506 sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
1508 rc = osd_scrub_start(env, dev, SS_AUTO_FULL);
1511 GOTO(cleanup_oi, rc);
1516 osd_oi_fini(env, dev);
1518 dt_object_put_nocache(env, scrub->os_obj);
1519 scrub->os_obj = NULL;
1524 void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev)
1526 struct lustre_scrub *scrub = &dev->od_scrub;
1528 LASSERT(!dev->od_otable_it);
1530 if (scrub->os_obj) {
1531 osd_scrub_stop(dev);
1532 dt_object_put_nocache(env, scrub->os_obj);
1533 scrub->os_obj = NULL;
1536 if (dev->od_oi_table)
1537 osd_oi_fini(env, dev);
1540 /* object table based iteration APIs */
1542 static struct dt_it *osd_otable_it_init(const struct lu_env *env,
1543 struct dt_object *dt, __u32 attr)
1545 enum dt_otable_it_flags flags = attr >> DT_OTABLE_IT_FLAGS_SHIFT;
1546 enum dt_otable_it_valid valid = attr & ~DT_OTABLE_IT_FLAGS_MASK;
1547 struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1548 struct lustre_scrub *scrub = &dev->od_scrub;
1549 struct osd_otable_it *it;
1554 if (dev->od_dt_dev.dd_rdonly)
1555 RETURN(ERR_PTR(-EROFS));
1557 /* od_otable_sem: prevent concurrent init/fini */
1558 down(&dev->od_otable_sem);
1559 if (dev->od_otable_it)
1560 GOTO(out, it = ERR_PTR(-EALREADY));
1564 GOTO(out, it = ERR_PTR(-ENOMEM));
1566 if (flags & DOIF_OUTUSED)
1567 it->ooi_used_outside = 1;
1569 if (flags & DOIF_RESET)
1572 if (valid & DOIV_ERROR_HANDLE) {
1573 if (flags & DOIF_FAILOUT)
1574 start |= SS_SET_FAILOUT;
1576 start |= SS_CLEAR_FAILOUT;
1579 if (valid & DOIV_DRYRUN) {
1580 if (flags & DOIF_DRYRUN)
1581 start |= SS_SET_DRYRUN;
1583 start |= SS_CLEAR_DRYRUN;
1586 /* XXX: dmu_object_next() does NOT find dnodes allocated
1587 * in the current non-committed txg, so we force txg
1588 * commit to find all existing dnodes ... */
1589 txg_wait_synced(dmu_objset_pool(dev->od_os), 0ULL);
1591 dev->od_otable_it = it;
1593 rc = scrub_start(osd_scrub_main, scrub, dev, start & ~SS_AUTO_PARTIAL);
1594 if (rc == -EALREADY) {
1596 } else if (rc < 0) {
1597 dev->od_otable_it = NULL;
1601 it->ooi_pos = scrub->os_pos_current;
1607 up(&dev->od_otable_sem);
1608 return (struct dt_it *)it;
1611 static void osd_otable_it_fini(const struct lu_env *env, struct dt_it *di)
1613 struct osd_otable_it *it = (struct osd_otable_it *)di;
1614 struct osd_device *dev = it->ooi_dev;
1616 /* od_otable_sem: prevent concurrent init/fini */
1617 down(&dev->od_otable_sem);
1618 scrub_stop(&dev->od_scrub);
1619 LASSERT(dev->od_otable_it == it);
1621 dev->od_otable_it = NULL;
1622 up(&dev->od_otable_sem);
1626 static int osd_otable_it_get(const struct lu_env *env,
1627 struct dt_it *di, const struct dt_key *key)
1632 static void osd_otable_it_put(const struct lu_env *env, struct dt_it *di)
1636 static void osd_otable_it_preload(const struct lu_env *env,
1637 struct osd_otable_it *it)
1639 struct osd_device *dev = it->ooi_dev;
1642 /* can go negative on the very first access to the iterator
1643 * or if some non-Lustre objects were found */
1644 if (unlikely(it->ooi_prefetched < 0))
1645 it->ooi_prefetched = 0;
1647 if (it->ooi_prefetched >= (OTABLE_PREFETCH >> 1))
1650 if (it->ooi_prefetched_dnode == 0)
1651 it->ooi_prefetched_dnode = it->ooi_pos;
1653 while (it->ooi_prefetched < OTABLE_PREFETCH) {
1654 rc = -dmu_object_next(dev->od_os, &it->ooi_prefetched_dnode,
1659 osd_dmu_prefetch(dev->od_os, it->ooi_prefetched_dnode,
1660 0, 0, 0, ZIO_PRIORITY_ASYNC_READ);
1661 it->ooi_prefetched++;
1666 osd_otable_it_wakeup(struct lustre_scrub *scrub, struct osd_otable_it *it)
1668 spin_lock(&scrub->os_lock);
1669 if (it->ooi_pos < scrub->os_pos_current || scrub->os_waiting ||
1670 !thread_is_running(&scrub->os_thread))
1671 it->ooi_waiting = 0;
1673 it->ooi_waiting = 1;
1674 spin_unlock(&scrub->os_lock);
1676 return !it->ooi_waiting;
1679 static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
1681 struct osd_otable_it *it = (struct osd_otable_it *)di;
1682 struct osd_device *dev = it->ooi_dev;
1683 struct lustre_scrub *scrub = &dev->od_scrub;
1684 struct ptlrpc_thread *thread = &scrub->os_thread;
1685 struct l_wait_info lwi = { 0 };
1686 struct lustre_mdt_attrs *lma = NULL;
1687 nvlist_t *nvbuf = NULL;
1692 LASSERT(it->ooi_user_ready);
1693 fid_zero(&it->ooi_fid);
1695 if (unlikely(it->ooi_all_cached))
1706 if (it->ooi_pos >= scrub->os_pos_current)
1707 l_wait_event(thread->t_ctl_waitq,
1708 osd_otable_it_wakeup(scrub, it),
1711 if (!thread_is_running(thread) && !it->ooi_used_outside)
1714 rc = -dmu_object_next(dev->od_os, &it->ooi_pos, B_FALSE, 0);
1716 if (unlikely(rc == -ESRCH)) {
1717 it->ooi_all_cached = 1;
1724 rc = __osd_xattr_load_by_oid(dev, it->ooi_pos, &nvbuf);
1726 if (!scrub->os_full_speed)
1727 spin_lock(&scrub->os_lock);
1728 it->ooi_prefetched--;
1729 if (!scrub->os_full_speed) {
1730 if (scrub->os_waiting) {
1731 scrub->os_waiting = 0;
1732 wake_up_all(&thread->t_ctl_waitq);
1734 spin_unlock(&scrub->os_lock);
1737 if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
1743 LASSERT(nvbuf != NULL);
1744 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
1745 (uchar_t **)&lma, &size);
1746 if (rc || size == 0)
1747 /* It is either non-Lustre object or OSD internal object,
1748 * ignore it, go ahead */
1751 LASSERTF(lma != NULL, "corrupted LMA, size %d\n", size);
1752 lustre_lma_swab(lma);
1753 if (unlikely(lma->lma_compat & LMAC_NOT_IN_OI ||
1754 lma->lma_incompat & LMAI_AGENT))
1757 it->ooi_fid = lma->lma_self_fid;
1765 if (!rc && scrub->os_full_speed)
1766 osd_otable_it_preload(env, it);
1771 static struct dt_key *osd_otable_it_key(const struct lu_env *env,
1772 const struct dt_it *di)
1777 static int osd_otable_it_key_size(const struct lu_env *env,
1778 const struct dt_it *di)
1780 return sizeof(__u64);
1783 static int osd_otable_it_rec(const struct lu_env *env, const struct dt_it *di,
1784 struct dt_rec *rec, __u32 attr)
1786 struct osd_otable_it *it = (struct osd_otable_it *)di;
1787 struct lu_fid *fid = (struct lu_fid *)rec;
1793 static __u64 osd_otable_it_store(const struct lu_env *env,
1794 const struct dt_it *di)
1796 struct osd_otable_it *it = (struct osd_otable_it *)di;
1802 * Set the OSD layer iteration start position as the specified hash.
1804 static int osd_otable_it_load(const struct lu_env *env,
1805 const struct dt_it *di, __u64 hash)
1807 struct osd_otable_it *it = (struct osd_otable_it *)di;
1808 struct osd_device *dev = it->ooi_dev;
1809 struct lustre_scrub *scrub = &dev->od_scrub;
1813 /* Forbid to set iteration position after iteration started. */
1814 if (it->ooi_user_ready)
1817 if (hash > OSD_OTABLE_MAX_HASH)
1818 hash = OSD_OTABLE_MAX_HASH;
1820 /* The hash is the last checkpoint position,
1821 * we will start from the next one. */
1822 it->ooi_pos = hash + 1;
1823 it->ooi_prefetched = 0;
1824 it->ooi_prefetched_dnode = 0;
1825 it->ooi_user_ready = 1;
1826 if (!scrub->os_full_speed)
1827 wake_up_all(&scrub->os_thread.t_ctl_waitq);
1829 /* Unplug OSD layer iteration by the first next() call. */
1830 rc = osd_otable_it_next(env, (struct dt_it *)it);
1835 static int osd_otable_it_key_rec(const struct lu_env *env,
1836 const struct dt_it *di, void *key_rec)
1841 const struct dt_index_operations osd_otable_ops = {
1843 .init = osd_otable_it_init,
1844 .fini = osd_otable_it_fini,
1845 .get = osd_otable_it_get,
1846 .put = osd_otable_it_put,
1847 .next = osd_otable_it_next,
1848 .key = osd_otable_it_key,
1849 .key_size = osd_otable_it_key_size,
1850 .rec = osd_otable_it_rec,
1851 .store = osd_otable_it_store,
1852 .load = osd_otable_it_load,
1853 .key_rec = osd_otable_it_key_rec,
1857 /* high priority inconsistent items list APIs */
1859 int osd_oii_insert(const struct lu_env *env, struct osd_device *dev,
1860 const struct lu_fid *fid, uint64_t oid, bool insert)
1862 struct lustre_scrub *scrub = &dev->od_scrub;
1863 struct ptlrpc_thread *thread = &scrub->os_thread;
1864 struct osd_inconsistent_item *oii;
1865 bool wakeup = false;
1868 osd_idc_find_and_init_with_oid(env, dev, fid, oid);
1873 INIT_LIST_HEAD(&oii->oii_list);
1874 oii->oii_cache.oic_dev = dev;
1875 oii->oii_cache.oic_fid = *fid;
1876 oii->oii_cache.oic_dnode = oid;
1877 oii->oii_insert = insert;
1879 spin_lock(&scrub->os_lock);
1880 if (unlikely(!thread_is_running(thread))) {
1881 spin_unlock(&scrub->os_lock);
1886 if (list_empty(&scrub->os_inconsistent_items))
1888 list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
1889 spin_unlock(&scrub->os_lock);
1892 wake_up_all(&thread->t_ctl_waitq);
1897 int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
1900 struct lustre_scrub *scrub = &dev->od_scrub;
1901 struct osd_inconsistent_item *oii;
1905 spin_lock(&scrub->os_lock);
1906 list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
1907 if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
1908 *oid = oii->oii_cache.oic_dnode;
1913 spin_unlock(&scrub->os_lock);