4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2012 Whamcloud, Inc.
26 * lustre/osd-ldiskfs/osd_scrub.c
28 * Top-level entry points into osd module
30 * The OI scrub is used for rebuilding Object Index files when restores MDT from
33 * The otable based iterator scans ldiskfs inode table to feed up layer LFSCK.
35 * Author: Fan Yong <yong.fan@whamcloud.com>
39 # define EXPORT_SYMTAB
41 #define DEBUG_SUBSYSTEM S_MDS
43 #include <lustre/lustre_idl.h>
44 #include <lustre_disk.h>
45 #include <dt_object.h>
47 #include "osd_internal.h"
49 #include "osd_scrub.h"
51 #define HALF_SEC (CFS_HZ >> 1)
53 static inline struct osd_device *osd_scrub2dev(struct osd_scrub *scrub)
55 return container_of0(scrub, struct osd_device, od_scrub);
58 static inline struct super_block *osd_scrub2sb(struct osd_scrub *scrub)
60 return osd_sb(osd_scrub2dev(scrub));
63 static void osd_scrub_file_to_cpu(struct scrub_file *des,
64 struct scrub_file *src)
66 memcpy(des->sf_uuid, src->sf_uuid, 16);
67 des->sf_flags = le64_to_cpu(src->sf_flags);
68 des->sf_magic = le32_to_cpu(src->sf_magic);
69 des->sf_status = le16_to_cpu(src->sf_status);
70 des->sf_param = le16_to_cpu(src->sf_param);
71 des->sf_time_last_complete =
72 le64_to_cpu(src->sf_time_last_complete);
73 des->sf_time_latest_start =
74 le64_to_cpu(src->sf_time_latest_start);
75 des->sf_time_last_checkpoint =
76 le64_to_cpu(src->sf_time_last_checkpoint);
77 des->sf_pos_latest_start =
78 le64_to_cpu(src->sf_pos_latest_start);
79 des->sf_pos_last_checkpoint =
80 le64_to_cpu(src->sf_pos_last_checkpoint);
81 des->sf_pos_first_inconsistent =
82 le64_to_cpu(src->sf_pos_first_inconsistent);
83 des->sf_items_checked =
84 le64_to_cpu(src->sf_items_checked);
85 des->sf_items_updated =
86 le64_to_cpu(src->sf_items_updated);
87 des->sf_items_failed =
88 le64_to_cpu(src->sf_items_failed);
89 des->sf_items_updated_prior =
90 le64_to_cpu(src->sf_items_updated_prior);
91 des->sf_run_time = le32_to_cpu(src->sf_run_time);
92 des->sf_success_count = le32_to_cpu(src->sf_success_count);
93 des->sf_oi_count = le16_to_cpu(src->sf_oi_count);
94 memcpy(des->sf_oi_bitmap, src->sf_oi_bitmap, SCRUB_OI_BITMAP_SIZE);
97 static void osd_scrub_file_to_le(struct scrub_file *des,
98 struct scrub_file *src)
100 memcpy(des->sf_uuid, src->sf_uuid, 16);
101 des->sf_flags = cpu_to_le64(src->sf_flags);
102 des->sf_magic = cpu_to_le32(src->sf_magic);
103 des->sf_status = cpu_to_le16(src->sf_status);
104 des->sf_param = cpu_to_le16(src->sf_param);
105 des->sf_time_last_complete =
106 cpu_to_le64(src->sf_time_last_complete);
107 des->sf_time_latest_start =
108 cpu_to_le64(src->sf_time_latest_start);
109 des->sf_time_last_checkpoint =
110 cpu_to_le64(src->sf_time_last_checkpoint);
111 des->sf_pos_latest_start =
112 cpu_to_le64(src->sf_pos_latest_start);
113 des->sf_pos_last_checkpoint =
114 cpu_to_le64(src->sf_pos_last_checkpoint);
115 des->sf_pos_first_inconsistent =
116 cpu_to_le64(src->sf_pos_first_inconsistent);
117 des->sf_items_checked =
118 cpu_to_le64(src->sf_items_checked);
119 des->sf_items_updated =
120 cpu_to_le64(src->sf_items_updated);
121 des->sf_items_failed =
122 cpu_to_le64(src->sf_items_failed);
123 des->sf_items_updated_prior =
124 cpu_to_le64(src->sf_items_updated_prior);
125 des->sf_run_time = cpu_to_le32(src->sf_run_time);
126 des->sf_success_count = cpu_to_le32(src->sf_success_count);
127 des->sf_oi_count = cpu_to_le16(src->sf_oi_count);
128 memcpy(des->sf_oi_bitmap, src->sf_oi_bitmap, SCRUB_OI_BITMAP_SIZE);
131 static void osd_scrub_file_init(struct osd_scrub *scrub, __u8 *uuid)
133 struct scrub_file *sf = &scrub->os_file;
135 memset(sf, 0, sizeof(*sf));
136 memcpy(sf->sf_uuid, uuid, 16);
137 sf->sf_magic = SCRUB_MAGIC_V1;
138 sf->sf_status = SS_INIT;
141 void osd_scrub_file_reset(struct osd_scrub *scrub, __u8 *uuid, __u64 flags)
143 struct scrub_file *sf = &scrub->os_file;
145 CDEBUG(D_LFSCK, "Reset OI scrub file, flags = "LPX64"\n", flags);
146 memcpy(sf->sf_uuid, uuid, 16);
147 sf->sf_status = SS_INIT;
148 sf->sf_flags |= flags;
151 sf->sf_time_latest_start = 0;
152 sf->sf_time_last_checkpoint = 0;
153 sf->sf_pos_latest_start = 0;
154 sf->sf_pos_last_checkpoint = 0;
155 sf->sf_pos_first_inconsistent = 0;
156 sf->sf_items_checked = 0;
157 sf->sf_items_updated = 0;
158 sf->sf_items_failed = 0;
159 sf->sf_items_updated_prior = 0;
160 sf->sf_items_noscrub = 0;
161 sf->sf_items_igif = 0;
164 static int osd_scrub_file_load(struct osd_scrub *scrub)
167 char *name = LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name;
168 int len = sizeof(scrub->os_file_disk);
171 rc = osd_ldiskfs_read(scrub->os_inode, &scrub->os_file_disk, len, &pos);
173 struct scrub_file *sf = &scrub->os_file;
175 osd_scrub_file_to_cpu(sf, &scrub->os_file_disk);
176 if (sf->sf_magic != SCRUB_MAGIC_V1) {
177 CWARN("%.16s: invalid scrub magic 0x%x != 0x%x\n,",
178 name, sf->sf_magic, SCRUB_MAGIC_V1);
179 /* Process it as new scrub file. */
184 } else if (rc != 0) {
185 CERROR("%.16s: fail to load scrub file, expected = %d, "
186 "rc = %d\n", name, len, rc);
190 /* return -ENOENT for empty scrub file case. */
197 int osd_scrub_file_store(struct osd_scrub *scrub)
199 struct osd_device *dev;
202 int len = sizeof(scrub->os_file_disk);
206 dev = container_of0(scrub, struct osd_device, od_scrub);
207 credits = osd_dto_credits_noquota[DTO_WRITE_BASE] +
208 osd_dto_credits_noquota[DTO_WRITE_BLOCK];
209 jh = ldiskfs_journal_start_sb(osd_sb(dev), credits);
212 CERROR("%.16s: fail to start trans for scrub store, rc = %d\n",
213 LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,rc);
217 osd_scrub_file_to_le(&scrub->os_file_disk, &scrub->os_file);
218 rc = osd_ldiskfs_write_record(scrub->os_inode, &scrub->os_file_disk,
220 ldiskfs_journal_stop(jh);
222 CERROR("%.16s: fail to store scrub file, expected = %d, "
224 LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,
226 scrub->os_time_last_checkpoint = cfs_time_current();
227 scrub->os_time_next_checkpoint = scrub->os_time_last_checkpoint +
228 cfs_time_seconds(SCRUB_CHECKPOINT_INTERVAL);
232 static int osd_scrub_prep(struct osd_device *dev)
234 struct osd_scrub *scrub = &dev->od_scrub;
235 struct ptlrpc_thread *thread = &scrub->os_thread;
236 struct scrub_file *sf = &scrub->os_file;
237 __u32 flags = scrub->os_start_flags;
241 down_write(&scrub->os_rwsem);
242 if (flags & SS_SET_FAILOUT)
243 sf->sf_param |= SP_FAILOUT;
245 if (flags & SS_CLEAR_FAILOUT)
246 sf->sf_param &= ~SP_FAILOUT;
248 if (flags & SS_RESET)
249 osd_scrub_file_reset(scrub,
250 LDISKFS_SB(osd_sb(dev))->s_es->s_uuid, sf->sf_flags);
252 if (flags & SS_AUTO) {
253 scrub->os_full_speed = 1;
254 sf->sf_flags |= SF_AUTO;
256 scrub->os_full_speed = 0;
259 if (sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT))
260 scrub->os_full_speed = 1;
262 scrub->os_in_prior = 0;
263 scrub->os_waiting = 0;
264 scrub->os_new_checked = 0;
265 if (sf->sf_pos_last_checkpoint != 0)
266 sf->sf_pos_latest_start = sf->sf_pos_last_checkpoint + 1;
268 sf->sf_pos_latest_start = LDISKFS_FIRST_INO(osd_sb(dev));
270 scrub->os_pos_current = sf->sf_pos_latest_start;
271 sf->sf_status = SS_SCANNING;
272 sf->sf_time_latest_start = cfs_time_current_sec();
273 sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
274 rc = osd_scrub_file_store(scrub);
276 spin_lock(&scrub->os_lock);
277 thread_set_flags(thread, SVC_RUNNING);
278 spin_unlock(&scrub->os_lock);
279 cfs_waitq_broadcast(&thread->t_ctl_waitq);
281 up_write(&scrub->os_rwsem);
287 osd_scrub_error(struct osd_device *dev, struct osd_inode_id *lid, int rc)
289 struct osd_scrub *scrub = &dev->od_scrub;
290 struct scrub_file *sf = &scrub->os_file;
292 down_write(&scrub->os_rwsem);
293 scrub->os_new_checked++;
294 sf->sf_items_failed++;
295 if (sf->sf_pos_first_inconsistent == 0 ||
296 sf->sf_pos_first_inconsistent > lid->oii_ino)
297 sf->sf_pos_first_inconsistent = lid->oii_ino;
298 up_write(&scrub->os_rwsem);
299 return sf->sf_param & SP_FAILOUT ? rc : 0;
303 osd_scrub_check_update(struct osd_thread_info *info, struct osd_device *dev,
304 struct osd_idmap_cache *oic)
306 struct osd_scrub *scrub = &dev->od_scrub;
307 struct scrub_file *sf = &scrub->os_file;
308 struct osd_inode_id *lid2 = &info->oti_id;
309 struct lu_fid *oi_fid = &info->oti_fid;
310 struct osd_inode_id *oi_id = &info->oti_id2;
312 struct osd_inconsistent_item *oii = NULL;
313 struct inode *inode = NULL;
314 struct lu_fid *fid = &oic->oic_fid;
315 struct osd_inode_id *lid = &oic->oic_lid;
316 struct iam_container *bag;
317 struct iam_path_descr *ipd;
318 int ops = DTO_INDEX_UPDATE;
323 if (scrub->os_in_prior)
324 oii = cfs_list_entry(oic, struct osd_inconsistent_item,
327 down_write(&scrub->os_rwsem);
328 scrub->os_new_checked++;
329 if (lid->oii_ino < sf->sf_pos_latest_start && oii == NULL)
332 if (oii != NULL && oii->oii_insert)
335 rc = osd_oi_lookup(info, dev, fid, lid2);
341 inode = osd_iget(info, dev, lid);
344 /* Someone removed the inode. */
345 if (rc == -ENOENT || rc == -ESTALE)
350 /* Prevent the inode to be unlinked during OI scrub. */
351 mutex_lock(&inode->i_mutex);
352 if (unlikely(inode->i_nlink == 0)) {
353 mutex_unlock(&inode->i_mutex);
358 ops = DTO_INDEX_INSERT;
359 idx = osd_oi_fid2idx(dev, fid);
360 if (unlikely(!ldiskfs_test_bit(idx, sf->sf_oi_bitmap)))
361 ldiskfs_set_bit(idx, sf->sf_oi_bitmap);
362 sf->sf_flags |= SF_RECREATED;
363 } else if (osd_id_eq(lid, lid2)) {
367 sf->sf_flags |= SF_INCONSISTENT;
368 fid_cpu_to_be(oi_fid, fid);
369 osd_id_pack(oi_id, &oic->oic_lid);
370 jh = ldiskfs_journal_start_sb(osd_sb(dev),
371 osd_dto_credits_noquota[ops]);
374 CERROR("%.16s: fail to start trans for scrub store, rc = %d\n",
375 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, rc);
379 bag = &osd_fid2oi(dev, fid)->oi_dir.od_container;
380 ipd = osd_idx_ipd_get(info->oti_env, bag);
381 if (unlikely(ipd == NULL)) {
382 ldiskfs_journal_stop(jh);
383 CERROR("%.16s: fail to get ipd for scrub store\n",
384 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name);
385 GOTO(out, rc = -ENOMEM);
388 if (ops == DTO_INDEX_UPDATE) {
389 rc = iam_update(jh, bag, (const struct iam_key *)oi_fid,
390 (struct iam_rec *)oi_id, ipd);
392 rc = iam_insert(jh, bag, (const struct iam_key *)oi_fid,
393 (struct iam_rec *)oi_id, ipd);
397 osd_ipd_put(info->oti_env, bag, ipd);
398 ldiskfs_journal_stop(jh);
400 if (scrub->os_in_prior)
401 sf->sf_items_updated_prior++;
403 sf->sf_items_updated++;
410 sf->sf_items_failed++;
411 if (sf->sf_pos_first_inconsistent == 0 ||
412 sf->sf_pos_first_inconsistent > lid->oii_ino)
413 sf->sf_pos_first_inconsistent = lid->oii_ino;
418 if (ops == DTO_INDEX_INSERT) {
419 mutex_unlock(&inode->i_mutex);
422 up_write(&scrub->os_rwsem);
425 LASSERT(!cfs_list_empty(&oii->oii_list));
427 spin_lock(&scrub->os_lock);
428 cfs_list_del_init(&oii->oii_list);
429 spin_unlock(&scrub->os_lock);
432 RETURN(sf->sf_param & SP_FAILOUT ? rc : 0);
435 static int do_osd_scrub_checkpoint(struct osd_scrub *scrub)
437 struct scrub_file *sf = &scrub->os_file;
441 down_write(&scrub->os_rwsem);
442 sf->sf_items_checked += scrub->os_new_checked;
443 scrub->os_new_checked = 0;
444 sf->sf_pos_last_checkpoint = scrub->os_pos_current;
445 sf->sf_time_last_checkpoint = cfs_time_current_sec();
446 sf->sf_run_time += cfs_duration_sec(cfs_time_current() + HALF_SEC -
447 scrub->os_time_last_checkpoint);
448 rc = osd_scrub_file_store(scrub);
449 up_write(&scrub->os_rwsem);
454 static inline int osd_scrub_checkpoint(struct osd_scrub *scrub)
456 if (unlikely(cfs_time_beforeq(scrub->os_time_next_checkpoint,
457 cfs_time_current()) &&
458 scrub->os_new_checked > 0))
459 return do_osd_scrub_checkpoint(scrub);
463 static void osd_scrub_post(struct osd_scrub *scrub, int result)
465 struct scrub_file *sf = &scrub->os_file;
468 down_write(&scrub->os_rwsem);
469 spin_lock(&scrub->os_lock);
470 thread_set_flags(&scrub->os_thread, SVC_STOPPING);
471 spin_unlock(&scrub->os_lock);
472 if (scrub->os_new_checked > 0) {
473 sf->sf_items_checked += scrub->os_new_checked;
474 scrub->os_new_checked = 0;
475 sf->sf_pos_last_checkpoint = scrub->os_pos_current;
477 sf->sf_time_last_checkpoint = cfs_time_current_sec();
479 sf->sf_status = SS_COMPLETED;
480 memset(sf->sf_oi_bitmap, 0, SCRUB_OI_BITMAP_SIZE);
481 sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT | SF_AUTO);
482 sf->sf_time_last_complete = sf->sf_time_last_checkpoint;
483 sf->sf_success_count++;
484 } else if (result == 0) {
485 if (scrub->os_paused)
486 sf->sf_status = SS_PAUSED;
488 sf->sf_status = SS_STOPPED;
490 sf->sf_status = SS_FAILED;
492 sf->sf_run_time += cfs_duration_sec(cfs_time_current() + HALF_SEC -
493 scrub->os_time_last_checkpoint);
494 result = osd_scrub_file_store(scrub);
496 CERROR("%.16s: fail to osd_scrub_post, rc = %d\n",
497 LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,
499 up_write(&scrub->os_rwsem);
504 #define SCRUB_NEXT_BREAK 1 /* exit current loop and process next group */
505 #define SCRUB_NEXT_CONTINUE 2 /* skip current object and process next bit */
506 #define SCRUB_NEXT_EXIT 3 /* exit all the loops */
507 #define SCRUB_NEXT_WAIT 4 /* wait for free cache slot */
508 #define SCRUB_NEXT_CRASH 5 /* simulate system crash during OI scrub */
509 #define SCRUB_NEXT_FATAL 6 /* simulate failure during OI scrub */
510 #define SCRUB_NEXT_NOSCRUB 7 /* new created object, no scrub on it */
511 #define SCRUB_NEXT_IGIF 8 /* IGIF object */
513 struct osd_iit_param {
514 struct super_block *sb;
515 struct buffer_head *bitmap;
521 typedef int (*osd_iit_next_policy)(struct osd_thread_info *info,
522 struct osd_device *dev,
523 struct osd_iit_param *param,
524 struct osd_idmap_cache **oic,
527 typedef int (*osd_iit_exec_policy)(struct osd_thread_info *info,
528 struct osd_device *dev,
529 struct osd_iit_param *param,
530 struct osd_idmap_cache *oic,
531 int *noslot, int rc);
533 static inline int osd_scrub_has_window(struct osd_scrub *scrub,
534 struct osd_otable_cache *ooc)
536 return scrub->os_pos_current < ooc->ooc_pos_preload + SCRUB_WINDOW_SIZE;
539 static int osd_iit_next(struct osd_iit_param *param, __u32 *pos)
541 param->offset = ldiskfs_find_next_bit(param->bitmap->b_data,
542 LDISKFS_INODES_PER_GROUP(param->sb), param->offset);
543 if (param->offset >= LDISKFS_INODES_PER_GROUP(param->sb)) {
544 *pos = 1 + (param->bg+1) * LDISKFS_INODES_PER_GROUP(param->sb);
545 return SCRUB_NEXT_BREAK;
547 *pos = param->gbase + param->offset;
552 static int osd_iit_iget(struct osd_thread_info *info, struct osd_device *dev,
553 struct lu_fid *fid, struct osd_inode_id *lid, __u32 pos,
554 struct super_block *sb, struct inode **pinode)
559 osd_id_gen(lid, pos, OSD_OII_NOGEN);
560 inode = osd_iget_fid(info, dev, lid, fid);
563 /* The inode may be removed after bitmap searching, or the
564 * file is new created without inode initialized yet. */
565 if (rc == -ENOENT || rc == -ESTALE)
566 return SCRUB_NEXT_CONTINUE;
568 CERROR("%.16s: fail to read inode, ino# = %u, rc = %d\n",
569 LDISKFS_SB(sb)->s_es->s_volume_name, pos, rc);
577 static int osd_scrub_next(struct osd_thread_info *info, struct osd_device *dev,
578 struct osd_iit_param *param,
579 struct osd_idmap_cache **oic, int noslot)
581 struct osd_scrub *scrub = &dev->od_scrub;
582 struct ptlrpc_thread *thread = &scrub->os_thread;
584 struct osd_inode_id *lid;
588 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
589 struct l_wait_info lwi;
591 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL);
592 l_wait_event(thread->t_ctl_waitq,
593 !cfs_list_empty(&scrub->os_inconsistent_items) ||
594 !thread_is_running(thread),
598 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
599 spin_lock(&scrub->os_lock);
600 thread_set_flags(thread, SVC_STOPPING);
601 spin_unlock(&scrub->os_lock);
602 return SCRUB_NEXT_CRASH;
605 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_FATAL))
606 return SCRUB_NEXT_FATAL;
608 if (unlikely(!thread_is_running(thread)))
609 return SCRUB_NEXT_EXIT;
611 if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
612 struct osd_inconsistent_item *oii;
614 oii = cfs_list_entry(scrub->os_inconsistent_items.next,
615 struct osd_inconsistent_item, oii_list);
616 *oic = &oii->oii_cache;
617 scrub->os_in_prior = 1;
622 return SCRUB_NEXT_WAIT;
624 rc = osd_iit_next(param, &scrub->os_pos_current);
628 *oic = &scrub->os_oic;
629 fid = &(*oic)->oic_fid;
630 lid = &(*oic)->oic_lid;
631 rc = osd_iit_iget(info, dev, fid, lid,
632 scrub->os_pos_current, param->sb, &inode);
636 if (inode->i_state & I_LUSTRE_NOSCRUB) {
637 /* Only skip it for the first OI scrub accessing. */
638 inode->i_state &= ~I_LUSTRE_NOSCRUB;
639 rc = SCRUB_NEXT_NOSCRUB;
640 } else if (!fid_is_norm(fid)) {
641 rc = SCRUB_NEXT_IGIF;
648 static int osd_preload_next(struct osd_thread_info *info,
649 struct osd_device *dev, struct osd_iit_param *param,
650 struct osd_idmap_cache **oic, int noslot)
652 struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
653 struct osd_scrub *scrub;
654 struct ptlrpc_thread *thread;
658 rc = osd_iit_next(param, &ooc->ooc_pos_preload);
662 scrub = &dev->od_scrub;
663 thread = &scrub->os_thread;
664 if (thread_is_running(thread) &&
665 ooc->ooc_pos_preload >= scrub->os_pos_current)
666 return SCRUB_NEXT_EXIT;
668 rc = osd_iit_iget(info, dev,
669 &ooc->ooc_cache[ooc->ooc_producer_idx].oic_fid,
670 &ooc->ooc_cache[ooc->ooc_producer_idx].oic_lid,
671 ooc->ooc_pos_preload, param->sb, &inode);
672 /* If succeed, it needs to move forward; otherwise up layer LFSCK may
673 * ignore the failure, so it still need to skip the inode next time. */
674 ooc->ooc_pos_preload = param->gbase + ++(param->offset);
680 static int osd_scrub_exec(struct osd_thread_info *info, struct osd_device *dev,
681 struct osd_iit_param *param,
682 struct osd_idmap_cache *oic, int *noslot, int rc)
684 struct l_wait_info lwi = { 0 };
685 struct osd_scrub *scrub = &dev->od_scrub;
686 struct scrub_file *sf = &scrub->os_file;
688 struct ptlrpc_thread *thread = &scrub->os_thread;
689 struct osd_otable_it *it = dev->od_otable_it;
690 struct osd_otable_cache *ooc = it ? &it->ooi_cache : NULL;
693 case SCRUB_NEXT_CONTINUE:
695 case SCRUB_NEXT_WAIT:
697 case SCRUB_NEXT_NOSCRUB:
698 items = &sf->sf_items_noscrub;
700 case SCRUB_NEXT_IGIF:
701 items = &sf->sf_items_igif;
706 down_write(&scrub->os_rwsem);
707 scrub->os_new_checked++;
709 up_write(&scrub->os_rwsem);
713 LASSERTF(rc <= 0, "unexpected rc = %d\n", rc);
716 rc = osd_scrub_error(dev, &oic->oic_lid, rc);
718 rc = osd_scrub_check_update(info, dev, oic);
722 rc = osd_scrub_checkpoint(scrub);
724 CERROR("%.16s: fail to checkpoint, pos = %u, rc = %d\n",
725 LDISKFS_SB(param->sb)->s_es->s_volume_name,
726 scrub->os_pos_current, rc);
727 /* Continue, as long as the scrub itself can go ahead. */
730 if (scrub->os_in_prior) {
731 scrub->os_in_prior = 0;
736 scrub->os_pos_current = param->gbase + ++(param->offset);
737 if (it != NULL && it->ooi_waiting &&
738 ooc->ooc_pos_preload < scrub->os_pos_current) {
740 cfs_waitq_broadcast(&thread->t_ctl_waitq);
743 if (scrub->os_full_speed || rc == SCRUB_NEXT_CONTINUE)
747 if (osd_scrub_has_window(scrub, ooc)) {
752 scrub->os_waiting = 1;
753 l_wait_event(thread->t_ctl_waitq,
754 osd_scrub_has_window(scrub, ooc) ||
755 !cfs_list_empty(&scrub->os_inconsistent_items) ||
756 !thread_is_running(thread),
758 scrub->os_waiting = 0;
760 if (osd_scrub_has_window(scrub, ooc))
767 static int osd_preload_exec(struct osd_thread_info *info,
768 struct osd_device *dev, struct osd_iit_param *param,
769 struct osd_idmap_cache *oic, int *noslot, int rc)
771 struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
774 ooc->ooc_cached_items++;
775 ooc->ooc_producer_idx = (ooc->ooc_producer_idx + 1) &
776 ~OSD_OTABLE_IT_CACHE_MASK;
778 return rc > 0 ? 0 : rc;
781 #define SCRUB_IT_ALL 1
782 #define SCRUB_IT_CRASH 2
784 static int osd_inode_iteration(struct osd_thread_info *info,
785 struct osd_device *dev, __u32 max, int preload)
787 osd_iit_next_policy next;
788 osd_iit_exec_policy exec;
791 struct osd_iit_param param;
798 struct osd_scrub *scrub = &dev->od_scrub;
800 next = osd_scrub_next;
801 exec = osd_scrub_exec;
802 pos = &scrub->os_pos_current;
803 count = &scrub->os_new_checked;
805 struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
807 next = osd_preload_next;
808 exec = osd_preload_exec;
809 pos = &ooc->ooc_pos_preload;
810 count = &ooc->ooc_cached_items;
812 param.sb = osd_sb(dev);
813 limit = le32_to_cpu(LDISKFS_SB(param.sb)->s_es->s_inodes_count);
815 while (*pos <= limit && *count < max) {
816 struct osd_idmap_cache *oic = NULL;
818 param.bg = (*pos - 1) / LDISKFS_INODES_PER_GROUP(param.sb);
819 param.offset = (*pos - 1) % LDISKFS_INODES_PER_GROUP(param.sb);
820 param.gbase = 1 + param.bg * LDISKFS_INODES_PER_GROUP(param.sb);
821 param.bitmap = ldiskfs_read_inode_bitmap(param.sb, param.bg);
822 if (param.bitmap == NULL) {
823 CERROR("%.16s: fail to read bitmap for %u, "
824 "scrub will stop, urgent mode\n",
825 LDISKFS_SB(param.sb)->s_es->s_volume_name,
830 while (param.offset < LDISKFS_INODES_PER_GROUP(param.sb) &&
832 rc = next(info, dev, ¶m, &oic, noslot);
834 case SCRUB_NEXT_BREAK:
836 case SCRUB_NEXT_EXIT:
837 brelse(param.bitmap);
839 case SCRUB_NEXT_CRASH:
840 brelse(param.bitmap);
841 RETURN(SCRUB_IT_CRASH);
842 case SCRUB_NEXT_FATAL:
843 brelse(param.bitmap);
847 rc = exec(info, dev, ¶m, oic, &noslot, rc);
849 brelse(param.bitmap);
855 brelse(param.bitmap);
859 RETURN(SCRUB_IT_ALL);
863 static int osd_scrub_main(void *args)
866 struct osd_device *dev = (struct osd_device *)args;
867 struct osd_scrub *scrub = &dev->od_scrub;
868 struct ptlrpc_thread *thread = &scrub->os_thread;
869 struct super_block *sb = osd_sb(dev);
873 cfs_daemonize("OI_scrub");
874 rc = lu_env_init(&env, LCT_DT_THREAD);
876 CERROR("%.16s: OI scrub, fail to init env, rc = %d\n",
877 LDISKFS_SB(sb)->s_es->s_volume_name, rc);
881 rc = osd_scrub_prep(dev);
883 CERROR("%.16s: OI scrub, fail to scrub prep, rc = %d\n",
884 LDISKFS_SB(sb)->s_es->s_volume_name, rc);
888 if (!scrub->os_full_speed) {
889 struct l_wait_info lwi = { 0 };
890 struct osd_otable_it *it = dev->od_otable_it;
891 struct osd_otable_cache *ooc = &it->ooi_cache;
893 l_wait_event(thread->t_ctl_waitq,
894 it->ooi_user_ready || !thread_is_running(thread),
896 if (unlikely(!thread_is_running(thread)))
899 LASSERT(scrub->os_pos_current >= ooc->ooc_pos_preload);
900 scrub->os_pos_current = ooc->ooc_pos_preload;
903 CDEBUG(D_LFSCK, "OI scrub: flags = 0x%x, pos = %u\n",
904 scrub->os_start_flags, scrub->os_pos_current);
906 rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, 0);
907 if (unlikely(rc == SCRUB_IT_CRASH))
908 GOTO(out, rc = -EINVAL);
912 osd_scrub_post(scrub, rc);
913 CDEBUG(D_LFSCK, "OI scrub: stop, rc = %d, pos = %u\n",
914 rc, scrub->os_pos_current);
917 while (!cfs_list_empty(&scrub->os_inconsistent_items)) {
918 struct osd_inconsistent_item *oii;
920 oii = cfs_list_entry(scrub->os_inconsistent_items.next,
921 struct osd_inconsistent_item, oii_list);
922 cfs_list_del_init(&oii->oii_list);
928 spin_lock(&scrub->os_lock);
929 thread_set_flags(thread, SVC_STOPPED);
930 cfs_waitq_broadcast(&thread->t_ctl_waitq);
931 spin_unlock(&scrub->os_lock);
935 static int do_osd_scrub_start(struct osd_device *dev, __u32 flags)
937 struct osd_scrub *scrub = &dev->od_scrub;
938 struct ptlrpc_thread *thread = &scrub->os_thread;
939 struct l_wait_info lwi = { 0 };
944 /* os_lock: sync status between stop and scrub thread */
945 spin_lock(&scrub->os_lock);
946 if (thread_is_running(thread)) {
947 spin_unlock(&scrub->os_lock);
949 } else if (unlikely(thread_is_stopping(thread))) {
950 spin_unlock(&scrub->os_lock);
951 l_wait_event(thread->t_ctl_waitq,
952 thread_is_stopped(thread),
956 spin_unlock(&scrub->os_lock);
958 if (scrub->os_file.sf_status == SS_COMPLETED)
961 scrub->os_start_flags = flags;
962 thread_set_flags(thread, 0);
963 rc = cfs_create_thread(osd_scrub_main, dev, 0);
965 CERROR("%.16s: cannot start iteration thread, rc = %d\n",
966 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, rc);
970 l_wait_event(thread->t_ctl_waitq,
971 thread_is_running(thread) || thread_is_stopped(thread),
977 int osd_scrub_start(struct osd_device *dev)
982 /* od_otable_mutex: prevent curcurrent start/stop */
983 mutex_lock(&dev->od_otable_mutex);
984 rc = do_osd_scrub_start(dev, SS_AUTO);
985 mutex_unlock(&dev->od_otable_mutex);
987 RETURN(rc == -EALREADY ? 0 : rc);
990 static void do_osd_scrub_stop(struct osd_scrub *scrub)
992 struct ptlrpc_thread *thread = &scrub->os_thread;
993 struct l_wait_info lwi = { 0 };
995 /* os_lock: sync status between stop and scrub thread */
996 spin_lock(&scrub->os_lock);
997 if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
998 thread_set_flags(thread, SVC_STOPPING);
999 spin_unlock(&scrub->os_lock);
1000 cfs_waitq_broadcast(&thread->t_ctl_waitq);
1001 l_wait_event(thread->t_ctl_waitq,
1002 thread_is_stopped(thread),
1004 /* Do not skip the last lock/unlock, which can guarantee that
1005 * the caller cannot return until the OI scrub thread exit. */
1006 spin_lock(&scrub->os_lock);
1008 spin_unlock(&scrub->os_lock);
1011 static void osd_scrub_stop(struct osd_device *dev)
1013 /* od_otable_mutex: prevent curcurrent start/stop */
1014 mutex_lock(&dev->od_otable_mutex);
1015 dev->od_scrub.os_paused = 1;
1016 do_osd_scrub_stop(&dev->od_scrub);
1017 mutex_unlock(&dev->od_otable_mutex);
1020 static const char osd_scrub_name[] = "OI_scrub";
1022 int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev)
1024 struct osd_thread_info *info = osd_oti_get(env);
1025 struct osd_scrub *scrub = &dev->od_scrub;
1026 struct lvfs_run_ctxt *ctxt = &scrub->os_ctxt;
1027 struct scrub_file *sf = &scrub->os_file;
1028 struct osd_inode_id *id = &scrub->os_oic.oic_lid;
1029 struct super_block *sb = osd_sb(dev);
1030 struct ldiskfs_super_block *es = LDISKFS_SB(sb)->s_es;
1031 struct inode *inode;
1032 struct lvfs_run_ctxt saved;
1039 memset(scrub, 0, sizeof(*scrub));
1040 OBD_SET_CTXT_MAGIC(ctxt);
1041 ctxt->pwdmnt = dev->od_mnt;
1042 ctxt->pwd = dev->od_mnt->mnt_root;
1043 ctxt->fs = get_ds();
1045 cfs_waitq_init(&scrub->os_thread.t_ctl_waitq);
1046 init_rwsem(&scrub->os_rwsem);
1047 spin_lock_init(&scrub->os_lock);
1048 CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
1050 push_ctxt(&saved, ctxt, NULL);
1051 filp = filp_open(osd_scrub_name, O_RDWR | O_CREAT, 0644);
1053 RETURN(PTR_ERR(filp));
1055 scrub->os_inode = igrab(filp->f_dentry->d_inode);
1056 filp_close(filp, 0);
1057 pop_ctxt(&saved, ctxt, NULL);
1059 rc = osd_scrub_file_load(scrub);
1060 if (rc == -ENOENT) {
1061 osd_scrub_file_init(scrub, es->s_uuid);
1064 } else if (rc != 0) {
1067 if (memcmp(sf->sf_uuid, es->s_uuid, 16) != 0) {
1068 osd_scrub_file_reset(scrub, es->s_uuid,SF_INCONSISTENT);
1070 } else if (sf->sf_status == SS_SCANNING) {
1071 sf->sf_status = SS_CRASHED;
1076 if (sf->sf_pos_last_checkpoint != 0)
1077 scrub->os_pos_current = sf->sf_pos_last_checkpoint + 1;
1079 scrub->os_pos_current = LDISKFS_FIRST_INO(sb);
1082 rc = osd_scrub_file_store(scrub);
1087 /* Initialize OI files. */
1088 rc = osd_oi_init(info, dev);
1093 rc = __osd_oi_lookup(info, dev, &LU_DOT_LUSTRE_FID, id);
1095 inode = osd_iget(info, dev, id);
1096 if (IS_ERR(inode)) {
1097 rc = PTR_ERR(inode);
1098 /* It is restored from old 2.x backup. */
1099 if (rc == -ENOENT || rc == -ESTALE) {
1100 osd_scrub_file_reset(scrub, es->s_uuid,
1102 rc = osd_scrub_file_store(scrub);
1107 } else if (rc == -ENOENT) {
1112 if (rc == 0 && !dev->od_noscrub &&
1113 ((sf->sf_status == SS_PAUSED) ||
1114 (sf->sf_status == SS_CRASHED &&
1115 sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_AUTO)) ||
1116 (sf->sf_status == SS_INIT &&
1117 sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT))))
1118 rc = osd_scrub_start(dev);
1123 void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev)
1125 struct osd_scrub *scrub = &dev->od_scrub;
1127 LASSERT(dev->od_otable_it == NULL);
1129 if (scrub->os_inode != NULL) {
1130 osd_scrub_stop(dev);
1131 iput(scrub->os_inode);
1132 scrub->os_inode = NULL;
1134 if (dev->od_oi_table != NULL)
1135 osd_oi_fini(osd_oti_get(env), dev);
1138 static struct dt_it *osd_otable_it_init(const struct lu_env *env,
1139 struct dt_object *dt, __u32 attr,
1140 struct lustre_capa *capa)
1142 enum dt_otable_it_flags flags = attr >> DT_OTABLE_IT_FLAGS_SHIFT;
1143 enum dt_otable_it_valid valid = attr & ~DT_OTABLE_IT_FLAGS_MASK;
1144 struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1145 struct osd_scrub *scrub = &dev->od_scrub;
1146 struct osd_otable_it *it;
1151 /* od_otable_mutex: prevent curcurrent init/fini */
1152 mutex_lock(&dev->od_otable_mutex);
1153 if (dev->od_otable_it != NULL)
1154 GOTO(out, it = ERR_PTR(-EALREADY));
1158 GOTO(out, it = ERR_PTR(-ENOMEM));
1160 dev->od_otable_it = it;
1162 it->ooi_cache.ooc_consumer_idx = -1;
1163 if (flags & DOIF_OUTUSED)
1164 it->ooi_used_outside = 1;
1166 if (flags & DOIF_RESET)
1169 if (valid & DOIV_ERROR_HANDLE) {
1170 if (flags & DOIF_FAILOUT)
1171 start |= SS_SET_FAILOUT;
1173 start |= SS_CLEAR_FAILOUT;
1176 rc = do_osd_scrub_start(dev, start);
1177 if (rc == -EALREADY) {
1178 it->ooi_cache.ooc_pos_preload = scrub->os_pos_current - 1;
1179 } else if (rc < 0) {
1180 dev->od_otable_it = NULL;
1182 GOTO(out, it = ERR_PTR(-EALREADY));
1184 it->ooi_cache.ooc_pos_preload = scrub->os_pos_current;
1190 mutex_unlock(&dev->od_otable_mutex);
1191 return (struct dt_it *)it;
1194 static void osd_otable_it_fini(const struct lu_env *env, struct dt_it *di)
1196 struct osd_otable_it *it = (struct osd_otable_it *)di;
1197 struct osd_device *dev = it->ooi_dev;
1199 /* od_otable_mutex: prevent curcurrent init/fini */
1200 mutex_lock(&dev->od_otable_mutex);
1201 do_osd_scrub_stop(&dev->od_scrub);
1202 LASSERT(dev->od_otable_it == it);
1204 dev->od_otable_it = NULL;
1205 mutex_unlock(&dev->od_otable_mutex);
1210 * XXX: Temporary used to notify otable iteration to be paused.
1212 static void osd_otable_it_put(const struct lu_env *env, struct dt_it *di)
1214 struct osd_device *dev = ((struct osd_otable_it *)di)->ooi_dev;
1216 /* od_otable_mutex: prevent curcurrent init/fini */
1217 mutex_lock(&dev->od_otable_mutex);
1218 dev->od_scrub.os_paused = 1;
1219 mutex_unlock(&dev->od_otable_mutex);
1223 * Set the OSD layer iteration start position as the specified key.
1225 * The LFSCK out of OSD layer does not know the detail of the key, so if there
1226 * are several keys, they cannot be compared out of OSD, so call "::get()" for
1227 * each key, and OSD will select the smallest one by itself.
1229 static int osd_otable_it_get(const struct lu_env *env,
1230 struct dt_it *di, const struct dt_key *key)
1232 struct osd_otable_it *it = (struct osd_otable_it *)di;
1233 struct osd_otable_cache *ooc = &it->ooi_cache;
1234 const char *str = (const char *)key;
1238 /* Forbid to set iteration position after iteration started. */
1239 if (it->ooi_user_ready)
1245 if (sscanf(str, "%u", &ino) <= 0)
1248 /* Skip the one that has been processed last time. */
1249 if (ooc->ooc_pos_preload > ++ino)
1250 ooc->ooc_pos_preload = ino;
1255 static int osd_otable_it_preload(const struct lu_env *env,
1256 struct osd_otable_it *it)
1258 struct osd_device *dev = it->ooi_dev;
1259 struct osd_scrub *scrub = &dev->od_scrub;
1260 struct osd_otable_cache *ooc = &it->ooi_cache;
1264 rc = osd_inode_iteration(osd_oti_get(env), dev,
1265 OSD_OTABLE_IT_CACHE_SIZE, 1);
1266 if (rc == SCRUB_IT_ALL)
1267 it->ooi_all_cached = 1;
1269 CDEBUG(D_LFSCK, "OSD pre-loaded: max = %u, preload = %u, rc = %d\n",
1270 le32_to_cpu(LDISKFS_SB(osd_sb(dev))->s_es->s_inodes_count),
1271 ooc->ooc_pos_preload, rc);
1273 if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
1274 scrub->os_waiting = 0;
1275 cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
1278 RETURN(rc < 0 ? rc : ooc->ooc_cached_items);
1281 static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
1283 struct osd_otable_it *it = (struct osd_otable_it *)di;
1284 struct osd_device *dev = it->ooi_dev;
1285 struct osd_scrub *scrub = &dev->od_scrub;
1286 struct osd_otable_cache *ooc = &it->ooi_cache;
1287 struct ptlrpc_thread *thread = &scrub->os_thread;
1288 struct l_wait_info lwi = { 0 };
1292 LASSERT(it->ooi_user_ready);
1295 if (!thread_is_running(thread) && !it->ooi_used_outside)
1298 if (ooc->ooc_cached_items > 0) {
1299 ooc->ooc_cached_items--;
1300 ooc->ooc_consumer_idx = (ooc->ooc_consumer_idx + 1) &
1301 ~OSD_OTABLE_IT_CACHE_MASK;
1305 if (it->ooi_all_cached) {
1306 l_wait_event(thread->t_ctl_waitq,
1307 !thread_is_running(thread),
1312 it->ooi_waiting = 1;
1313 l_wait_event(thread->t_ctl_waitq,
1314 ooc->ooc_pos_preload < scrub->os_pos_current ||
1315 !thread_is_running(thread),
1317 it->ooi_waiting = 0;
1319 if (!thread_is_running(thread) && !it->ooi_used_outside)
1322 rc = osd_otable_it_preload(env, it);
1329 static struct dt_key *osd_otable_it_key(const struct lu_env *env,
1330 const struct dt_it *di)
1332 struct osd_otable_it *it = (struct osd_otable_it *)di;
1333 struct osd_otable_cache *ooc = &it->ooi_cache;
1335 sprintf(it->ooi_key, "%u",
1336 ooc->ooc_cache[ooc->ooc_consumer_idx].oic_lid.oii_ino);
1337 return (struct dt_key *)it->ooi_key;
1340 static int osd_otable_it_key_size(const struct lu_env *env,
1341 const struct dt_it *di)
1343 return sizeof(((struct osd_otable_it *)di)->ooi_key);
1346 static int osd_otable_it_rec(const struct lu_env *env, const struct dt_it *di,
1347 struct dt_rec *rec, __u32 attr)
1349 struct osd_otable_it *it = (struct osd_otable_it *)di;
1350 struct osd_otable_cache *ooc = &it->ooi_cache;
1352 *(struct lu_fid *)rec = ooc->ooc_cache[ooc->ooc_consumer_idx].oic_fid;
1356 static int osd_otable_it_load(const struct lu_env *env,
1357 const struct dt_it *di, __u64 hash)
1359 struct osd_otable_it *it = (struct osd_otable_it *)di;
1360 struct osd_device *dev = it->ooi_dev;
1361 struct osd_otable_cache *ooc = &it->ooi_cache;
1362 struct osd_scrub *scrub = &dev->od_scrub;
1364 if (it->ooi_user_ready)
1367 if (ooc->ooc_pos_preload < LDISKFS_FIRST_INO(osd_sb(dev)))
1368 ooc->ooc_pos_preload = LDISKFS_FIRST_INO(osd_sb(dev));
1369 it->ooi_user_ready = 1;
1370 if (!scrub->os_full_speed)
1371 cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
1373 /* Unplug OSD layer iteration by the first next() call. */
1374 return osd_otable_it_next(env, (struct dt_it *)it);
1377 const struct dt_index_operations osd_otable_ops = {
1379 .init = osd_otable_it_init,
1380 .fini = osd_otable_it_fini,
1381 .put = osd_otable_it_put,
1382 .get = osd_otable_it_get,
1383 .next = osd_otable_it_next,
1384 .key = osd_otable_it_key,
1385 .key_size = osd_otable_it_key_size,
1386 .rec = osd_otable_it_rec,
1387 .load = osd_otable_it_load,
1391 int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
1394 struct osd_inconsistent_item *oii;
1395 struct osd_scrub *scrub = &dev->od_scrub;
1396 struct ptlrpc_thread *thread = &scrub->os_thread;
1401 if (unlikely(oii == NULL))
1404 CFS_INIT_LIST_HEAD(&oii->oii_list);
1405 oii->oii_cache = *oic;
1406 oii->oii_insert = insert;
1408 spin_lock(&scrub->os_lock);
1409 if (unlikely(!thread_is_running(thread))) {
1410 spin_unlock(&scrub->os_lock);
1415 if (cfs_list_empty(&scrub->os_inconsistent_items))
1417 cfs_list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
1418 spin_unlock(&scrub->os_lock);
1421 cfs_waitq_broadcast(&thread->t_ctl_waitq);
1426 int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
1427 struct osd_inode_id *id)
1429 struct osd_scrub *scrub = &dev->od_scrub;
1430 struct osd_inconsistent_item *oii;
1433 spin_lock(&scrub->os_lock);
1434 cfs_list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
1435 if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
1436 *id = oii->oii_cache.oic_lid;
1437 spin_unlock(&scrub->os_lock);
1441 spin_unlock(&scrub->os_lock);
1446 static const char *scrub_status_names[] = {
1457 static const char *scrub_flags_names[] = {
1464 static const char *scrub_param_names[] = {
1469 static int scrub_bits_dump(char **buf, int *len, int bits, const char *names[],
1477 rc = snprintf(*buf, *len, "%s:%c", prefix, bits != 0 ? ' ' : '\n');
1483 for (i = 0, flag = 1; bits != 0; i++, flag = 1 << i) {
1486 rc = snprintf(*buf, *len, "%s%c", names[i],
1487 bits != 0 ? ',' : '\n');
1498 static int scrub_time_dump(char **buf, int *len, __u64 time, const char *prefix)
1503 rc = snprintf(*buf, *len, "%s: "LPU64" seconds\n", prefix,
1504 cfs_time_current_sec() - time);
1506 rc = snprintf(*buf, *len, "%s: N/A\n", prefix);
1515 static int scrub_pos_dump(char **buf, int *len, __u64 pos, const char *prefix)
1520 rc = snprintf(*buf, *len, "%s: "LPU64"\n", prefix, pos);
1522 rc = snprintf(*buf, *len, "%s: N/A\n", prefix);
1531 int osd_scrub_dump(struct osd_device *dev, char *buf, int len)
1533 struct osd_scrub *scrub = &dev->od_scrub;
1534 struct scrub_file *sf = &scrub->os_file;
1541 down_read(&scrub->os_rwsem);
1542 rc = snprintf(buf, len,
1547 sf->sf_magic, (int)sf->sf_oi_count,
1548 scrub_status_names[sf->sf_status]);
1554 rc = scrub_bits_dump(&buf, &len, sf->sf_flags, scrub_flags_names,
1559 rc = scrub_bits_dump(&buf, &len, sf->sf_param, scrub_param_names,
1564 rc = scrub_time_dump(&buf, &len, sf->sf_time_last_complete,
1565 "time_since_last_completed");
1569 rc = scrub_time_dump(&buf, &len, sf->sf_time_latest_start,
1570 "time_since_latest_start");
1574 rc = scrub_time_dump(&buf, &len, sf->sf_time_last_checkpoint,
1575 "time_since_last_checkpoint");
1579 rc = scrub_pos_dump(&buf, &len, sf->sf_pos_latest_start,
1580 "latest_start_position");
1584 rc = scrub_pos_dump(&buf, &len, sf->sf_pos_last_checkpoint,
1585 "last_checkpoint_position");
1589 rc = scrub_pos_dump(&buf, &len, sf->sf_pos_first_inconsistent,
1590 "first_failure_position");
1594 checked = sf->sf_items_checked + scrub->os_new_checked;
1595 rc = snprintf(buf, len,
1596 "checked: "LPU64"\n"
1597 "updated: "LPU64"\n"
1599 "prior_updated: "LPU64"\n"
1600 "noscrub: "LPU64"\n"
1602 "success_count: %u\n",
1603 checked, sf->sf_items_updated, sf->sf_items_failed,
1604 sf->sf_items_updated_prior, sf->sf_items_noscrub,
1605 sf->sf_items_igif, sf->sf_success_count);
1612 if (thread_is_running(&scrub->os_thread)) {
1613 cfs_duration_t duration = cfs_time_current() -
1614 scrub->os_time_last_checkpoint;
1615 __u64 new_checked = scrub->os_new_checked * CFS_HZ;
1616 __u32 rtime = sf->sf_run_time +
1617 cfs_duration_sec(duration + HALF_SEC);
1620 do_div(new_checked, duration);
1622 do_div(speed, rtime);
1623 rc = snprintf(buf, len,
1624 "run_time: %u seconds\n"
1625 "average_speed: "LPU64" objects/sec\n"
1626 "real-time_speed: "LPU64" objects/sec\n"
1627 "current_position: %u\n",
1628 rtime, speed, new_checked, scrub->os_pos_current);
1630 if (sf->sf_run_time != 0)
1631 do_div(speed, sf->sf_run_time);
1632 rc = snprintf(buf, len,
1633 "run_time: %u seconds\n"
1634 "average_speed: "LPU64" objects/sec\n"
1635 "real-time_speed: N/A\n"
1636 "current_position: N/A\n",
1637 sf->sf_run_time, speed);
1647 up_read(&scrub->os_rwsem);