Whamcloud - gitweb
LU-1305 osd: dmu helpers
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_scrub.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License version 2 for more details.  A copy is
14  * included in the COPYING file that accompanied this code.
15
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2012 Whamcloud, Inc.
24  */
25 /*
26  * lustre/osd-ldiskfs/osd_scrub.c
27  *
28  * Top-level entry points into osd module
29  *
30  * The OI scrub is used for rebuilding Object Index files when restores MDT from
31  * file-level backup.
32  *
33  * The otable based iterator scans ldiskfs inode table to feed up layer LFSCK.
34  *
35  * Author: Fan Yong <yong.fan@whamcloud.com>
36  */
37
38 #ifndef EXPORT_SYMTAB
39 # define EXPORT_SYMTAB
40 #endif
41 #define DEBUG_SUBSYSTEM S_MDS
42
43 #include <lustre/lustre_idl.h>
44 #include <lustre_disk.h>
45 #include <dt_object.h>
46
47 #include "osd_internal.h"
48 #include "osd_oi.h"
49 #include "osd_scrub.h"
50
51 #define HALF_SEC        (CFS_HZ >> 1)
52
53 static inline struct osd_device *osd_scrub2dev(struct osd_scrub *scrub)
54 {
55         return container_of0(scrub, struct osd_device, od_scrub);
56 }
57
58 static inline struct super_block *osd_scrub2sb(struct osd_scrub *scrub)
59 {
60         return osd_sb(osd_scrub2dev(scrub));
61 }
62
63 static void osd_scrub_file_to_cpu(struct scrub_file *des,
64                                   struct scrub_file *src)
65 {
66         memcpy(des->sf_uuid, src->sf_uuid, 16);
67         des->sf_flags   = le64_to_cpu(src->sf_flags);
68         des->sf_magic   = le32_to_cpu(src->sf_magic);
69         des->sf_status  = le16_to_cpu(src->sf_status);
70         des->sf_param   = le16_to_cpu(src->sf_param);
71         des->sf_time_last_complete      =
72                                 le64_to_cpu(src->sf_time_last_complete);
73         des->sf_time_latest_start       =
74                                 le64_to_cpu(src->sf_time_latest_start);
75         des->sf_time_last_checkpoint    =
76                                 le64_to_cpu(src->sf_time_last_checkpoint);
77         des->sf_pos_latest_start        =
78                                 le64_to_cpu(src->sf_pos_latest_start);
79         des->sf_pos_last_checkpoint     =
80                                 le64_to_cpu(src->sf_pos_last_checkpoint);
81         des->sf_pos_first_inconsistent  =
82                                 le64_to_cpu(src->sf_pos_first_inconsistent);
83         des->sf_items_checked           =
84                                 le64_to_cpu(src->sf_items_checked);
85         des->sf_items_updated           =
86                                 le64_to_cpu(src->sf_items_updated);
87         des->sf_items_failed            =
88                                 le64_to_cpu(src->sf_items_failed);
89         des->sf_items_updated_prior     =
90                                 le64_to_cpu(src->sf_items_updated_prior);
91         des->sf_run_time        = le32_to_cpu(src->sf_run_time);
92         des->sf_success_count   = le32_to_cpu(src->sf_success_count);
93         des->sf_oi_count        = le16_to_cpu(src->sf_oi_count);
94         memcpy(des->sf_oi_bitmap, src->sf_oi_bitmap, SCRUB_OI_BITMAP_SIZE);
95 }
96
97 static void osd_scrub_file_to_le(struct scrub_file *des,
98                                  struct scrub_file *src)
99 {
100         memcpy(des->sf_uuid, src->sf_uuid, 16);
101         des->sf_flags   = cpu_to_le64(src->sf_flags);
102         des->sf_magic   = cpu_to_le32(src->sf_magic);
103         des->sf_status  = cpu_to_le16(src->sf_status);
104         des->sf_param   = cpu_to_le16(src->sf_param);
105         des->sf_time_last_complete      =
106                                 cpu_to_le64(src->sf_time_last_complete);
107         des->sf_time_latest_start       =
108                                 cpu_to_le64(src->sf_time_latest_start);
109         des->sf_time_last_checkpoint    =
110                                 cpu_to_le64(src->sf_time_last_checkpoint);
111         des->sf_pos_latest_start        =
112                                 cpu_to_le64(src->sf_pos_latest_start);
113         des->sf_pos_last_checkpoint     =
114                                 cpu_to_le64(src->sf_pos_last_checkpoint);
115         des->sf_pos_first_inconsistent  =
116                                 cpu_to_le64(src->sf_pos_first_inconsistent);
117         des->sf_items_checked           =
118                                 cpu_to_le64(src->sf_items_checked);
119         des->sf_items_updated           =
120                                 cpu_to_le64(src->sf_items_updated);
121         des->sf_items_failed            =
122                                 cpu_to_le64(src->sf_items_failed);
123         des->sf_items_updated_prior     =
124                                 cpu_to_le64(src->sf_items_updated_prior);
125         des->sf_run_time        = cpu_to_le32(src->sf_run_time);
126         des->sf_success_count   = cpu_to_le32(src->sf_success_count);
127         des->sf_oi_count        = cpu_to_le16(src->sf_oi_count);
128         memcpy(des->sf_oi_bitmap, src->sf_oi_bitmap, SCRUB_OI_BITMAP_SIZE);
129 }
130
131 static void osd_scrub_file_init(struct osd_scrub *scrub, __u8 *uuid)
132 {
133         struct scrub_file *sf = &scrub->os_file;
134
135         memset(sf, 0, sizeof(*sf));
136         memcpy(sf->sf_uuid, uuid, 16);
137         sf->sf_magic = SCRUB_MAGIC_V1;
138         sf->sf_status = SS_INIT;
139 }
140
141 void osd_scrub_file_reset(struct osd_scrub *scrub, __u8 *uuid, __u64 flags)
142 {
143         struct scrub_file *sf = &scrub->os_file;
144
145         CDEBUG(D_LFSCK, "Reset OI scrub file, flags = "LPX64"\n", flags);
146         memcpy(sf->sf_uuid, uuid, 16);
147         sf->sf_status = SS_INIT;
148         sf->sf_flags |= flags;
149         sf->sf_param = 0;
150         sf->sf_run_time = 0;
151         sf->sf_time_latest_start = 0;
152         sf->sf_time_last_checkpoint = 0;
153         sf->sf_pos_latest_start = 0;
154         sf->sf_pos_last_checkpoint = 0;
155         sf->sf_pos_first_inconsistent = 0;
156         sf->sf_items_checked = 0;
157         sf->sf_items_updated = 0;
158         sf->sf_items_failed = 0;
159         sf->sf_items_updated_prior = 0;
160 }
161
162 static int osd_scrub_file_load(struct osd_scrub *scrub)
163 {
164         loff_t  pos  = 0;
165         char   *name = LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name;
166         int     len  = sizeof(scrub->os_file_disk);
167         int     rc;
168
169         rc = osd_ldiskfs_read(scrub->os_inode, &scrub->os_file_disk, len, &pos);
170         if (rc == len) {
171                 struct scrub_file *sf = &scrub->os_file;
172
173                 osd_scrub_file_to_cpu(sf, &scrub->os_file_disk);
174                 if (sf->sf_magic != SCRUB_MAGIC_V1) {
175                         CWARN("%.16s: invalid scrub magic 0x%x != 0x%x\n,",
176                               name, sf->sf_magic, SCRUB_MAGIC_V1);
177                         /* Process it as new scrub file. */
178                         rc = -ENOENT;
179                 } else {
180                         rc = 0;
181                 }
182         } else if (rc != 0) {
183                 CERROR("%.16s: fail to load scrub file, expected = %d, "
184                        "rc = %d\n", name, len, rc);
185                 if (rc > 0)
186                         rc = -EFAULT;
187         } else {
188                 /* return -ENOENT for empty scrub file case. */
189                 rc = -ENOENT;
190         }
191
192         return rc;
193 }
194
195 int osd_scrub_file_store(struct osd_scrub *scrub)
196 {
197         struct osd_device *dev;
198         handle_t          *jh;
199         loff_t             pos     = 0;
200         int                len     = sizeof(scrub->os_file_disk);
201         int                credits;
202         int                rc;
203
204         dev = container_of0(scrub, struct osd_device, od_scrub);
205         credits = osd_dto_credits_noquota[DTO_WRITE_BASE] +
206                   osd_dto_credits_noquota[DTO_WRITE_BLOCK];
207         jh = ldiskfs_journal_start_sb(osd_sb(dev), credits);
208         if (IS_ERR(jh)) {
209                 rc = PTR_ERR(jh);
210                 CERROR("%.16s: fail to start trans for scrub store, rc = %d\n",
211                        LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,rc);
212                 return rc;
213         }
214
215         osd_scrub_file_to_le(&scrub->os_file_disk, &scrub->os_file);
216         rc = osd_ldiskfs_write_record(scrub->os_inode, &scrub->os_file_disk,
217                                       len, &pos, jh);
218         ldiskfs_journal_stop(jh);
219         if (rc != 0)
220                 CERROR("%.16s: fail to store scrub file, expected = %d, "
221                        "rc = %d\n",
222                        LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,
223                        len, rc);
224         scrub->os_time_last_checkpoint = cfs_time_current();
225         scrub->os_time_next_checkpoint = scrub->os_time_last_checkpoint +
226                                 cfs_time_seconds(SCRUB_CHECKPOINT_INTERVAL);
227         return rc;
228 }
229
230 static int osd_scrub_prep(struct osd_device *dev)
231 {
232         struct osd_scrub     *scrub  = &dev->od_scrub;
233         struct ptlrpc_thread *thread = &scrub->os_thread;
234         struct scrub_file    *sf     = &scrub->os_file;
235         __u32                 flags  = scrub->os_start_flags;
236         int                   rc;
237         ENTRY;
238
239         cfs_down_write(&scrub->os_rwsem);
240         if (flags & SS_SET_FAILOUT)
241                 sf->sf_param |= SP_FAILOUT;
242
243         if (flags & SS_CLEAR_FAILOUT)
244                 sf->sf_param &= ~SP_FAILOUT;
245
246         if (flags & SS_RESET)
247                 osd_scrub_file_reset(scrub,
248                         LDISKFS_SB(osd_sb(dev))->s_es->s_uuid, sf->sf_flags);
249
250         if (flags & SS_AUTO) {
251                 scrub->os_full_speed = 1;
252                 sf->sf_flags |= SF_AUTO;
253         } else {
254                 scrub->os_full_speed = 0;
255         }
256
257         if (sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT))
258                 scrub->os_full_speed = 1;
259
260         scrub->os_in_prior = 0;
261         scrub->os_waiting = 0;
262         scrub->os_new_checked = 0;
263         if (sf->sf_pos_last_checkpoint != 0)
264                 sf->sf_pos_latest_start = sf->sf_pos_last_checkpoint + 1;
265         else
266                 sf->sf_pos_latest_start = LDISKFS_FIRST_INO(osd_sb(dev));
267
268         scrub->os_pos_current = sf->sf_pos_latest_start;
269         sf->sf_status = SS_SCANNING;
270         sf->sf_time_latest_start = cfs_time_current_sec();
271         sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
272         rc = osd_scrub_file_store(scrub);
273         if (rc == 0) {
274                 cfs_spin_lock(&scrub->os_lock);
275                 thread_set_flags(thread, SVC_RUNNING);
276                 cfs_spin_unlock(&scrub->os_lock);
277                 cfs_waitq_broadcast(&thread->t_ctl_waitq);
278         }
279         cfs_up_write(&scrub->os_rwsem);
280
281         RETURN(rc);
282 }
283
284 static int
285 osd_scrub_error(struct osd_device *dev, struct osd_inode_id *lid, int rc)
286 {
287         struct osd_scrub  *scrub = &dev->od_scrub;
288         struct scrub_file *sf    = &scrub->os_file;
289
290         cfs_down_write(&scrub->os_rwsem);
291         scrub->os_new_checked++;
292         sf->sf_items_failed++;
293         if (sf->sf_pos_first_inconsistent == 0 ||
294             sf->sf_pos_first_inconsistent > lid->oii_ino)
295                 sf->sf_pos_first_inconsistent = lid->oii_ino;
296         cfs_up_write(&scrub->os_rwsem);
297         return sf->sf_param & SP_FAILOUT ? rc : 0;
298 }
299
300 static int
301 osd_scrub_check_update(struct osd_thread_info *info,  struct osd_device *dev,
302                        struct osd_idmap_cache *oic)
303 {
304         struct osd_scrub             *scrub  = &dev->od_scrub;
305         struct scrub_file            *sf     = &scrub->os_file;
306         struct osd_inode_id          *lid2   = &info->oti_id;
307         struct lu_fid                *oi_fid = &info->oti_fid;
308         struct osd_inode_id          *oi_id  = &info->oti_id;
309         handle_t                     *jh     = NULL;
310         struct osd_inconsistent_item *oii    = NULL;
311         struct inode                 *inode  = NULL;
312         struct lu_fid                *fid    = &oic->oic_fid;
313         struct osd_inode_id          *lid    = &oic->oic_lid;
314         struct iam_container         *bag;
315         struct iam_path_descr        *ipd;
316         int                           ops    = DTO_INDEX_UPDATE;
317         int                           idx;
318         int                           rc;
319         ENTRY;
320
321         if (scrub->os_in_prior)
322                 oii = cfs_list_entry(oic, struct osd_inconsistent_item,
323                                      oii_cache);
324
325         cfs_down_write(&scrub->os_rwsem);
326         scrub->os_new_checked++;
327         if (lid->oii_ino < sf->sf_pos_latest_start && oii == NULL)
328                 GOTO(out, rc = 0);
329
330         if (oii != NULL && oii->oii_insert)
331                 goto iget;
332
333         rc = osd_oi_lookup(info, dev, fid, lid2);
334         if (rc != 0) {
335                 if (rc != -ENOENT)
336                         GOTO(out, rc);
337
338 iget:
339                 inode = osd_iget(info, dev, lid);
340                 if (IS_ERR(inode)) {
341                         rc = PTR_ERR(inode);
342                         /* Someone removed the inode. */
343                         if (rc == -ENOENT || rc == -ESTALE)
344                                 rc = 0;
345                         GOTO(out, rc);
346                 }
347
348                 /* Prevent the inode to be unlinked during OI scrub. */
349                 cfs_mutex_lock(&inode->i_mutex);
350                 if (unlikely(inode->i_nlink == 0)) {
351                         cfs_mutex_unlock(&inode->i_mutex);
352                         iput(inode);
353                         GOTO(out, rc = 0);
354                 }
355
356                 ops = DTO_INDEX_INSERT;
357                 idx = osd_oi_fid2idx(dev, fid);
358                 if (unlikely(!ldiskfs_test_bit(idx, sf->sf_oi_bitmap)))
359                         ldiskfs_set_bit(idx, sf->sf_oi_bitmap);
360                 sf->sf_flags |= SF_RECREATED;
361         } else if (osd_id_eq(lid, lid2)) {
362                         GOTO(out, rc = 0);
363         }
364
365         sf->sf_flags |= SF_INCONSISTENT;
366         fid_cpu_to_be(oi_fid, fid);
367         osd_id_pack(oi_id, &oic->oic_lid);
368         jh = ldiskfs_journal_start_sb(osd_sb(dev),
369                                 osd_dto_credits_noquota[ops]);
370         if (IS_ERR(jh)) {
371                 rc = PTR_ERR(jh);
372                 CERROR("%.16s: fail to start trans for scrub store, rc = %d\n",
373                        LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, rc);
374                 GOTO(out, rc);
375         }
376
377         bag = &osd_fid2oi(dev, fid)->oi_dir.od_container;
378         ipd = osd_idx_ipd_get(info->oti_env, bag);
379         if (unlikely(ipd == NULL)) {
380                 ldiskfs_journal_stop(jh);
381                 CERROR("%.16s: fail to get ipd for scrub store\n",
382                         LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name);
383                 GOTO(out, rc = -ENOMEM);
384         }
385
386         if (ops == DTO_INDEX_UPDATE)
387                 rc = iam_update(jh, bag, (const struct iam_key *)oi_fid,
388                                 (struct iam_rec *)oi_id, ipd);
389         else
390                 rc = iam_insert(jh, bag, (const struct iam_key *)oi_fid,
391                                 (struct iam_rec *)oi_id, ipd);
392         osd_ipd_put(info->oti_env, bag, ipd);
393         ldiskfs_journal_stop(jh);
394         if (rc == 0) {
395                 if (scrub->os_in_prior)
396                         sf->sf_items_updated_prior++;
397                 else
398                         sf->sf_items_updated++;
399         }
400
401         GOTO(out, rc);
402
403 out:
404         if (rc != 0) {
405                 sf->sf_items_failed++;
406                 if (sf->sf_pos_first_inconsistent == 0 ||
407                     sf->sf_pos_first_inconsistent > lid->oii_ino)
408                         sf->sf_pos_first_inconsistent = lid->oii_ino;
409         }
410
411         if (ops == DTO_INDEX_INSERT) {
412                 cfs_mutex_unlock(&inode->i_mutex);
413                 iput(inode);
414         }
415         cfs_up_write(&scrub->os_rwsem);
416
417         if (oii != NULL) {
418                 LASSERT(!cfs_list_empty(&oii->oii_list));
419
420                 cfs_spin_lock(&scrub->os_lock);
421                 cfs_list_del_init(&oii->oii_list);
422                 cfs_spin_unlock(&scrub->os_lock);
423                 OBD_FREE_PTR(oii);
424         }
425         RETURN(sf->sf_param & SP_FAILOUT ? rc : 0);
426 }
427
428 static int do_osd_scrub_checkpoint(struct osd_scrub *scrub)
429 {
430         struct scrub_file *sf = &scrub->os_file;
431         int                rc;
432         ENTRY;
433
434         cfs_down_write(&scrub->os_rwsem);
435         sf->sf_items_checked += scrub->os_new_checked;
436         scrub->os_new_checked = 0;
437         sf->sf_pos_last_checkpoint = scrub->os_pos_current;
438         sf->sf_time_last_checkpoint = cfs_time_current_sec();
439         sf->sf_run_time += cfs_duration_sec(cfs_time_current() + HALF_SEC -
440                                             scrub->os_time_last_checkpoint);
441         rc = osd_scrub_file_store(scrub);
442         cfs_up_write(&scrub->os_rwsem);
443
444         RETURN(rc);
445 }
446
447 static inline int osd_scrub_checkpoint(struct osd_scrub *scrub)
448 {
449         if (unlikely(cfs_time_beforeq(scrub->os_time_next_checkpoint,
450                                       cfs_time_current()) &&
451                      scrub->os_new_checked > 0))
452                 return do_osd_scrub_checkpoint(scrub);
453         return 0;
454 }
455
456 static void osd_scrub_post(struct osd_scrub *scrub, int result)
457 {
458         struct scrub_file *sf = &scrub->os_file;
459         ENTRY;
460
461         cfs_down_write(&scrub->os_rwsem);
462         cfs_spin_lock(&scrub->os_lock);
463         thread_set_flags(&scrub->os_thread, SVC_STOPPING);
464         cfs_spin_unlock(&scrub->os_lock);
465         if (scrub->os_new_checked > 0) {
466                 sf->sf_items_checked += scrub->os_new_checked;
467                 scrub->os_new_checked = 0;
468                 sf->sf_pos_last_checkpoint = scrub->os_pos_current;
469         }
470         sf->sf_time_last_checkpoint = cfs_time_current_sec();
471         if (result > 0) {
472                 sf->sf_status = SS_COMPLETED;
473                 memset(sf->sf_oi_bitmap, 0, SCRUB_OI_BITMAP_SIZE);
474                 sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT | SF_AUTO);
475                 sf->sf_time_last_complete = sf->sf_time_last_checkpoint;
476                 sf->sf_success_count++;
477         } else if (result == 0) {
478                 sf->sf_status = SS_PAUSED;
479         } else {
480                 sf->sf_status = SS_FAILED;
481         }
482         sf->sf_run_time += cfs_duration_sec(cfs_time_current() + HALF_SEC -
483                                             scrub->os_time_last_checkpoint);
484         result = osd_scrub_file_store(scrub);
485         if (result < 0)
486                 CERROR("%.16s: fail to osd_scrub_post, rc = %d\n",
487                        LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,
488                        result);
489         cfs_up_write(&scrub->os_rwsem);
490
491         EXIT;
492 }
493
494 #define SCRUB_NEXT_BREAK        1 /* exit current loop and process next group */
495 #define SCRUB_NEXT_CONTINUE     2 /* skip current object and process next bit */
496 #define SCRUB_NEXT_EXIT         3 /* exit all the loops */
497 #define SCRUB_NEXT_WAIT         4 /* wait for free cache slot */
498
499 struct osd_iit_param {
500         struct super_block *sb;
501         struct buffer_head *bitmap;
502         ldiskfs_group_t bg;
503         __u32 gbase;
504         __u32 offset;
505 };
506
507 typedef int (*osd_iit_next_policy)(struct osd_thread_info *info,
508                                    struct osd_device *dev,
509                                    struct osd_iit_param *param,
510                                    struct osd_idmap_cache **oic,
511                                    int noslot);
512
513 typedef int (*osd_iit_exec_policy)(struct osd_thread_info *info,
514                                    struct osd_device *dev,
515                                    struct osd_iit_param *param,
516                                    struct osd_idmap_cache *oic,
517                                    int *noslot, int rc);
518
519 static inline int osd_scrub_has_window(struct osd_scrub *scrub,
520                                        struct osd_otable_cache *ooc)
521 {
522         return scrub->os_pos_current < ooc->ooc_pos_preload + SCRUB_WINDOW_SIZE;
523 }
524
525 static int osd_iit_next(struct osd_iit_param *param, __u32 *pos)
526 {
527         param->offset = ldiskfs_find_next_bit(param->bitmap->b_data,
528                         LDISKFS_INODES_PER_GROUP(param->sb), param->offset);
529         if (param->offset >= LDISKFS_INODES_PER_GROUP(param->sb)) {
530                 *pos = 1 + (param->bg+1) * LDISKFS_INODES_PER_GROUP(param->sb);
531                 return SCRUB_NEXT_BREAK;
532         } else {
533                 *pos = param->gbase + param->offset;
534                 return 0;
535         }
536 }
537
538 static int osd_iit_iget(struct osd_thread_info *info, struct osd_device *dev,
539                         struct lu_fid *fid, struct osd_inode_id *lid, __u32 pos,
540                         struct super_block *sb, struct inode **pinode)
541 {
542         struct inode *inode;
543         int           rc;
544
545         osd_id_gen(lid, pos, OSD_OII_NOGEN);
546         inode = osd_iget_fid(info, dev, lid, fid);
547         if (IS_ERR(inode)) {
548                 rc = PTR_ERR(inode);
549                 /* The inode may be removed after bitmap searching, or the
550                  * file is new created without inode initialized yet. */
551                 if (rc == -ENOENT || rc == -ESTALE)
552                         return SCRUB_NEXT_CONTINUE;
553
554                 CERROR("%.16s: fail to read inode, ino# = %u, rc = %d\n",
555                        LDISKFS_SB(sb)->s_es->s_volume_name, pos, rc);
556                 return rc;
557         }
558
559         *pinode = inode;
560         return 0;
561 }
562
563 static int osd_scrub_next(struct osd_thread_info *info, struct osd_device *dev,
564                           struct osd_iit_param *param,
565                           struct osd_idmap_cache **oic, int noslot)
566 {
567         struct osd_scrub     *scrub  = &dev->od_scrub;
568         struct ptlrpc_thread *thread = &scrub->os_thread;
569         struct lu_fid        *fid;
570         struct osd_inode_id  *lid;
571         struct inode         *inode;
572         int                   rc;
573
574         if (unlikely(!thread_is_running(thread)))
575                 return SCRUB_NEXT_EXIT;
576
577         if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
578                 struct osd_inconsistent_item *oii;
579
580                 oii = cfs_list_entry(scrub->os_inconsistent_items.next,
581                                      struct osd_inconsistent_item, oii_list);
582                 *oic = &oii->oii_cache;
583                 scrub->os_in_prior = 1;
584                 return 0;
585         }
586
587         if (noslot != 0)
588                 return SCRUB_NEXT_WAIT;
589
590         rc = osd_iit_next(param, &scrub->os_pos_current);
591         if (rc != 0)
592                 return rc;
593
594         *oic = &scrub->os_oic;
595         fid = &(*oic)->oic_fid;
596         lid = &(*oic)->oic_lid;
597         rc = osd_iit_iget(info, dev, fid, lid,
598                           scrub->os_pos_current, param->sb, &inode);
599         if (rc != 0)
600                 return rc;
601
602         if (!fid_is_norm(fid) || inode->i_state & I_LUSTRE_NOSCRUB)
603                 rc = SCRUB_NEXT_CONTINUE;
604         iput(inode);
605         return rc;
606 }
607
608 static int osd_preload_next(struct osd_thread_info *info,
609                             struct osd_device *dev, struct osd_iit_param *param,
610                             struct osd_idmap_cache **oic, int noslot)
611 {
612         struct osd_otable_cache *ooc    = &dev->od_otable_it->ooi_cache;
613         struct osd_scrub        *scrub;
614         struct ptlrpc_thread    *thread;
615         struct inode            *inode;
616         int                      rc;
617
618         rc = osd_iit_next(param, &ooc->ooc_pos_preload);
619         if (rc != 0)
620                 return rc;
621
622         scrub = &dev->od_scrub;
623         thread = &scrub->os_thread;
624         if (thread_is_running(thread) &&
625             ooc->ooc_pos_preload >= scrub->os_pos_current)
626                 return SCRUB_NEXT_EXIT;
627
628         rc = osd_iit_iget(info, dev,
629                           &ooc->ooc_cache[ooc->ooc_producer_idx].oic_fid,
630                           &ooc->ooc_cache[ooc->ooc_producer_idx].oic_lid,
631                           ooc->ooc_pos_preload, param->sb, &inode);
632         /* If succeed, it needs to move forward; otherwise up layer LFSCK may
633          * ignore the failure, so it still need to skip the inode next time. */
634         ooc->ooc_pos_preload = param->gbase + ++(param->offset);
635         if (rc == 0)
636                 iput(inode);
637         return rc;
638 }
639
640 static int osd_scrub_exec(struct osd_thread_info *info, struct osd_device *dev,
641                           struct osd_iit_param *param,
642                           struct osd_idmap_cache *oic, int *noslot, int rc)
643 {
644         struct l_wait_info       lwi    = { 0 };
645         struct osd_scrub        *scrub  = &dev->od_scrub;
646         struct ptlrpc_thread    *thread = &scrub->os_thread;
647         struct osd_otable_it    *it     = dev->od_otable_it;
648         struct osd_otable_cache *ooc    = it ? &it->ooi_cache : NULL;
649
650         switch (rc) {
651         case SCRUB_NEXT_CONTINUE:
652                 goto next;
653         case SCRUB_NEXT_WAIT:
654                 goto wait;
655         }
656
657         LASSERTF(rc <= 0, "unexpected rc = %d\n", rc);
658
659         if (rc != 0)
660                 rc = osd_scrub_error(dev, &oic->oic_lid, rc);
661         else
662                 rc = osd_scrub_check_update(info, dev, oic);
663         if (rc != 0)
664                 return rc;
665
666         rc = osd_scrub_checkpoint(scrub);
667         if (rc != 0) {
668                 CERROR("%.16s: fail to checkpoint, pos = %u, rc = %d\n",
669                        LDISKFS_SB(param->sb)->s_es->s_volume_name,
670                        scrub->os_pos_current, rc);
671                 /* Continue, as long as the scrub itself can go ahead. */
672         }
673
674         if (scrub->os_in_prior) {
675                 scrub->os_in_prior = 0;
676                 return 0;
677         }
678
679 next:
680         scrub->os_pos_current = param->gbase + ++(param->offset);
681         if (it != NULL && it->ooi_waiting &&
682             ooc->ooc_pos_preload < scrub->os_pos_current) {
683                 it->ooi_waiting = 0;
684                 cfs_waitq_broadcast(&thread->t_ctl_waitq);
685         }
686
687         if (scrub->os_full_speed || rc == SCRUB_NEXT_CONTINUE)
688                 return 0;
689
690 wait:
691         if (osd_scrub_has_window(scrub, ooc)) {
692                 *noslot = 0;
693                 return 0;
694         }
695
696         scrub->os_waiting = 1;
697         l_wait_event(thread->t_ctl_waitq,
698                      osd_scrub_has_window(scrub, ooc) ||
699                      !cfs_list_empty(&scrub->os_inconsistent_items) ||
700                      !thread_is_running(thread),
701                      &lwi);
702         scrub->os_waiting = 0;
703
704         if (osd_scrub_has_window(scrub, ooc))
705                 *noslot = 0;
706         else
707                 *noslot = 1;
708         return 0;
709 }
710
711 static int osd_preload_exec(struct osd_thread_info *info,
712                             struct osd_device *dev, struct osd_iit_param *param,
713                             struct osd_idmap_cache *oic, int *noslot, int rc)
714 {
715         struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
716
717         if (rc == 0) {
718                 ooc->ooc_cached_items++;
719                 ooc->ooc_producer_idx = (ooc->ooc_producer_idx + 1) &
720                                         ~OSD_OTABLE_IT_CACHE_MASK;
721         }
722         return rc > 0 ? 0 : rc;
723 }
724
725 #define SCRUB_IT_ALL    1
726
727 static int osd_inode_iteration(struct osd_thread_info *info,
728                                struct osd_device *dev, __u32 max, int preload)
729 {
730         osd_iit_next_policy   next;
731         osd_iit_exec_policy   exec;
732         __u32                *pos;
733         __u32                *count;
734         struct osd_iit_param  param;
735         __u32                 limit;
736         int                   noslot = 0;
737         int                   rc;
738         ENTRY;
739
740         if (preload == 0) {
741                 struct osd_scrub *scrub = &dev->od_scrub;
742
743                 next = osd_scrub_next;
744                 exec = osd_scrub_exec;
745                 pos = &scrub->os_pos_current;
746                 count = &scrub->os_new_checked;
747         } else {
748                 struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
749
750                 next = osd_preload_next;
751                 exec = osd_preload_exec;
752                 pos = &ooc->ooc_pos_preload;
753                 count = &ooc->ooc_cached_items;
754         }
755         param.sb = osd_sb(dev);
756         limit = le32_to_cpu(LDISKFS_SB(param.sb)->s_es->s_inodes_count);
757
758         while (*pos <= limit && *count < max) {
759                 struct osd_idmap_cache *oic = NULL;
760
761                 param.bg = (*pos - 1) / LDISKFS_INODES_PER_GROUP(param.sb);
762                 param.offset = (*pos - 1) % LDISKFS_INODES_PER_GROUP(param.sb);
763                 param.gbase = 1 + param.bg * LDISKFS_INODES_PER_GROUP(param.sb);
764                 param.bitmap = ldiskfs_read_inode_bitmap(param.sb, param.bg);
765                 if (param.bitmap == NULL) {
766                         CERROR("%.16s: fail to read bitmap for %u, "
767                                "scrub will stop, urgent mode\n",
768                                LDISKFS_SB(param.sb)->s_es->s_volume_name,
769                                (__u32)param.bg);
770                         RETURN(-EIO);
771                 }
772
773                 while (param.offset < LDISKFS_INODES_PER_GROUP(param.sb) &&
774                        *count < max) {
775                         rc = next(info, dev, &param, &oic, noslot);
776                         switch (rc) {
777                         case SCRUB_NEXT_BREAK:
778                                 goto next_group;
779                         case SCRUB_NEXT_EXIT:
780                                 brelse(param.bitmap);
781                                 RETURN(0);
782                         }
783
784                         rc = exec(info, dev, &param, oic, &noslot, rc);
785                         if (rc != 0) {
786                                 brelse(param.bitmap);
787                                 RETURN(rc);
788                         }
789                 }
790
791 next_group:
792                 brelse(param.bitmap);
793         }
794
795         if (*pos > limit)
796                 RETURN(SCRUB_IT_ALL);
797         RETURN(0);
798 }
799
800 static int osd_scrub_main(void *args)
801 {
802         struct lu_env         env;
803         struct osd_device    *dev    = (struct osd_device *)args;
804         struct osd_scrub     *scrub  = &dev->od_scrub;
805         struct ptlrpc_thread *thread = &scrub->os_thread;
806         struct super_block   *sb     = osd_sb(dev);
807         int                   rc;
808         ENTRY;
809
810         cfs_daemonize("OI_scrub");
811         rc = lu_env_init(&env, LCT_DT_THREAD);
812         if (rc != 0) {
813                 CERROR("%.16s: OI scrub, fail to init env, rc = %d\n",
814                        LDISKFS_SB(sb)->s_es->s_volume_name, rc);
815                 GOTO(noenv, rc);
816         }
817
818         rc = osd_scrub_prep(dev);
819         if (rc != 0) {
820                 CERROR("%.16s: OI scrub, fail to scrub prep, rc = %d\n",
821                        LDISKFS_SB(sb)->s_es->s_volume_name, rc);
822                 GOTO(out, rc);
823         }
824
825         if (!scrub->os_full_speed) {
826                 struct l_wait_info lwi = { 0 };
827                 struct osd_otable_it *it = dev->od_otable_it;
828                 struct osd_otable_cache *ooc = &it->ooi_cache;
829
830                 l_wait_event(thread->t_ctl_waitq,
831                              it->ooi_user_ready || !thread_is_running(thread),
832                              &lwi);
833                 if (unlikely(!thread_is_running(thread)))
834                         GOTO(post, rc = 0);
835
836                 LASSERT(scrub->os_pos_current >= ooc->ooc_pos_preload);
837                 scrub->os_pos_current = ooc->ooc_pos_preload;
838         }
839
840         CDEBUG(D_LFSCK, "OI scrub: flags = 0x%x, pos = %u\n",
841                scrub->os_start_flags, scrub->os_pos_current);
842
843         rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, 0);
844         GOTO(post, rc);
845
846 post:
847         osd_scrub_post(scrub, rc);
848         CDEBUG(D_LFSCK, "OI scrub: stop, rc = %d, pos = %u\n",
849                rc, scrub->os_pos_current);
850
851 out:
852         while (!cfs_list_empty(&scrub->os_inconsistent_items)) {
853                 struct osd_inconsistent_item *oii;
854
855                 oii = cfs_list_entry(scrub->os_inconsistent_items.next,
856                                      struct osd_inconsistent_item, oii_list);
857                 cfs_list_del_init(&oii->oii_list);
858                 OBD_FREE_PTR(oii);
859         }
860         lu_env_fini(&env);
861
862 noenv:
863         cfs_spin_lock(&scrub->os_lock);
864         thread_set_flags(thread, SVC_STOPPED);
865         cfs_waitq_broadcast(&thread->t_ctl_waitq);
866         cfs_spin_unlock(&scrub->os_lock);
867         return rc;
868 }
869
870 static int do_osd_scrub_start(struct osd_device *dev, __u32 flags)
871 {
872         struct osd_scrub     *scrub  = &dev->od_scrub;
873         struct ptlrpc_thread *thread = &scrub->os_thread;
874         struct l_wait_info    lwi    = { 0 };
875         int                   rc;
876         ENTRY;
877
878 again:
879         /* os_lock: sync status between stop and scrub thread */
880         cfs_spin_lock(&scrub->os_lock);
881         if (thread_is_running(thread)) {
882                 cfs_spin_unlock(&scrub->os_lock);
883                 RETURN(-EALREADY);
884         } else if (unlikely(thread_is_stopping(thread))) {
885                 cfs_spin_unlock(&scrub->os_lock);
886                 l_wait_event(thread->t_ctl_waitq,
887                              thread_is_stopped(thread),
888                              &lwi);
889                 goto again;
890         }
891         cfs_spin_unlock(&scrub->os_lock);
892
893         scrub->os_start_flags = flags;
894         thread_set_flags(thread, 0);
895         rc = cfs_create_thread(osd_scrub_main, dev, 0);
896         if (rc < 0) {
897                 CERROR("%.16s: cannot start iteration thread, rc = %d\n",
898                        LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, rc);
899                 RETURN(rc);
900         }
901
902         l_wait_event(thread->t_ctl_waitq,
903                      thread_is_running(thread) || thread_is_stopped(thread),
904                      &lwi);
905
906         RETURN(0);
907 }
908
909 int osd_scrub_start(struct osd_device *dev)
910 {
911         __u32 flags = SS_AUTO;
912         int   rc;
913         ENTRY;
914
915         if (dev->od_scrub.os_file.sf_status == SS_COMPLETED)
916                 flags |= SS_RESET;
917
918         /* od_otable_mutex: prevent curcurrent start/stop */
919         cfs_mutex_lock(&dev->od_otable_mutex);
920         rc = do_osd_scrub_start(dev, flags);
921         cfs_mutex_unlock(&dev->od_otable_mutex);
922
923         RETURN(rc == -EALREADY ? 0 : rc);
924 }
925
926 static void do_osd_scrub_stop(struct osd_scrub *scrub)
927 {
928         struct ptlrpc_thread *thread = &scrub->os_thread;
929         struct l_wait_info    lwi    = { 0 };
930
931         /* os_lock: sync status between stop and scrub thread */
932         cfs_spin_lock(&scrub->os_lock);
933         if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
934                 thread_set_flags(thread, SVC_STOPPING);
935                 cfs_spin_unlock(&scrub->os_lock);
936                 cfs_waitq_broadcast(&thread->t_ctl_waitq);
937                 l_wait_event(thread->t_ctl_waitq,
938                              thread_is_stopped(thread),
939                              &lwi);
940                 /* Do not skip the last lock/unlock, which can guarantee that
941                  * the caller cannot return until the OI scrub thread exit. */
942                 cfs_spin_lock(&scrub->os_lock);
943         }
944         cfs_spin_unlock(&scrub->os_lock);
945 }
946
947 static void osd_scrub_stop(struct osd_device *dev)
948 {
949         /* od_otable_mutex: prevent curcurrent start/stop */
950         cfs_mutex_lock(&dev->od_otable_mutex);
951         do_osd_scrub_stop(&dev->od_scrub);
952         cfs_mutex_unlock(&dev->od_otable_mutex);
953 }
954
955 static const char osd_scrub_name[] = "OI_scrub";
956
957 int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev)
958 {
959         struct osd_thread_info     *info   = osd_oti_get(env);
960         struct osd_scrub           *scrub  = &dev->od_scrub;
961         struct lvfs_run_ctxt       *ctxt   = &scrub->os_ctxt;
962         struct scrub_file          *sf     = &scrub->os_file;
963         struct osd_inode_id        *id     = &scrub->os_oic.oic_lid;
964         struct super_block         *sb     = osd_sb(dev);
965         struct ldiskfs_super_block *es     = LDISKFS_SB(sb)->s_es;
966         struct inode               *inode;
967         struct lvfs_run_ctxt        saved;
968         struct file                *filp;
969         int                         dirty  = 0;
970         int                         init   = 0;
971         int                         rc     = 0;
972         ENTRY;
973
974         OBD_SET_CTXT_MAGIC(ctxt);
975         ctxt->pwdmnt = dev->od_mnt;
976         ctxt->pwd = dev->od_mnt->mnt_root;
977         ctxt->fs = get_ds();
978
979         cfs_waitq_init(&scrub->os_thread.t_ctl_waitq);
980         cfs_init_rwsem(&scrub->os_rwsem);
981         cfs_spin_lock_init(&scrub->os_lock);
982         CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
983         if (get_mount_flags(dev->od_mount->lmi_sb) & LMD_FLG_NOSCRUB)
984                 scrub->os_no_scrub = 1;
985
986         push_ctxt(&saved, ctxt, NULL);
987         filp = filp_open(osd_scrub_name, O_RDWR | O_CREAT, 0644);
988         if (IS_ERR(filp))
989                 RETURN(PTR_ERR(filp));
990
991         scrub->os_inode = igrab(filp->f_dentry->d_inode);
992         filp_close(filp, 0);
993         pop_ctxt(&saved, ctxt, NULL);
994
995         rc = osd_scrub_file_load(scrub);
996         if (rc == -ENOENT) {
997                 osd_scrub_file_init(scrub, es->s_uuid);
998                 dirty = 1;
999                 init = 1;
1000         } else if (rc != 0) {
1001                 RETURN(rc);
1002         } else {
1003                 if (memcmp(sf->sf_uuid, es->s_uuid, 16) != 0) {
1004                         osd_scrub_file_reset(scrub, es->s_uuid,SF_INCONSISTENT);
1005                         dirty = 1;
1006                 } else if (sf->sf_status == SS_SCANNING) {
1007                         sf->sf_status = SS_CRASHED;
1008                         dirty = 1;
1009                 }
1010         }
1011
1012         if (sf->sf_pos_last_checkpoint != 0)
1013                 scrub->os_pos_current = sf->sf_pos_last_checkpoint + 1;
1014         else
1015                 scrub->os_pos_current = LDISKFS_FIRST_INO(sb);
1016
1017         if (dirty != 0) {
1018                 rc = osd_scrub_file_store(scrub);
1019                 if (rc != 0)
1020                         RETURN(rc);
1021         }
1022
1023         /* Initialize OI files. */
1024         rc = osd_oi_init(info, dev);
1025         if (rc < 0)
1026                 RETURN(rc);
1027
1028         if (init != 0) {
1029                 rc = __osd_oi_lookup(info, dev, &LU_DOT_LUSTRE_FID, id);
1030                 if (rc == 0) {
1031                         inode = osd_iget(info, dev, id);
1032                         if (IS_ERR(inode)) {
1033                                 rc = PTR_ERR(inode);
1034                                 /* It is restored from old 2.x backup. */
1035                                 if (rc == -ENOENT || rc == -ESTALE) {
1036                                         osd_scrub_file_reset(scrub, es->s_uuid,
1037                                                              SF_INCONSISTENT);
1038                                         rc = osd_scrub_file_store(scrub);
1039                                 }
1040                         } else {
1041                                 iput(inode);
1042                         }
1043                 } else if (rc == -ENOENT) {
1044                         rc = 0;
1045                 }
1046         }
1047
1048         if (rc == 0 && !scrub->os_no_scrub &&
1049             ((sf->sf_status == SS_CRASHED &&
1050               sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT | SF_AUTO)) ||
1051              (sf->sf_status == SS_INIT &&
1052               sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT))))
1053                 rc = osd_scrub_start(dev);
1054
1055         RETURN(rc);
1056 }
1057
1058 void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev)
1059 {
1060         struct osd_scrub *scrub = &dev->od_scrub;
1061
1062         LASSERT(dev->od_otable_it == NULL);
1063
1064         if (scrub->os_inode != NULL) {
1065                 osd_scrub_stop(dev);
1066                 iput(scrub->os_inode);
1067                 scrub->os_inode = NULL;
1068         }
1069         if (dev->od_oi_table != NULL)
1070                 osd_oi_fini(osd_oti_get(env), dev);
1071 }
1072
1073 static struct dt_it *osd_otable_it_init(const struct lu_env *env,
1074                                        struct dt_object *dt, __u32 attr,
1075                                        struct lustre_capa *capa)
1076 {
1077         enum dt_otable_it_flags flags = attr >> DT_OTABLE_IT_FLAGS_SHIFT;
1078         enum dt_otable_it_valid valid = attr & ~DT_OTABLE_IT_FLAGS_MASK;
1079         struct osd_device      *dev   = osd_dev(dt->do_lu.lo_dev);
1080         struct osd_scrub       *scrub = &dev->od_scrub;
1081         struct osd_otable_it   *it;
1082         __u32                   start = 0;
1083         int                     rc;
1084         ENTRY;
1085
1086         /* od_otable_mutex: prevent curcurrent init/fini */
1087         cfs_mutex_lock(&dev->od_otable_mutex);
1088         if (dev->od_otable_it != NULL)
1089                 GOTO(out, it = ERR_PTR(-EALREADY));
1090
1091         OBD_ALLOC_PTR(it);
1092         if (it == NULL)
1093                 GOTO(out, it = ERR_PTR(-ENOMEM));
1094
1095         dev->od_otable_it = it;
1096         it->ooi_dev = dev;
1097         it->ooi_cache.ooc_consumer_idx = -1;
1098         if (flags & DOIF_OUTUSED)
1099                 it->ooi_used_outside = 1;
1100
1101         if (flags & DOIF_RESET)
1102                 start |= SS_RESET;
1103
1104         if (valid & DOIV_ERROR_HANDLE) {
1105                 if (flags & DOIF_FAILOUT)
1106                         start |= SS_SET_FAILOUT;
1107                 else
1108                         start |= SS_CLEAR_FAILOUT;
1109         }
1110
1111         rc = do_osd_scrub_start(dev, start);
1112         if (rc == -EALREADY) {
1113                 it->ooi_cache.ooc_pos_preload = scrub->os_pos_current - 1;
1114         } else if (rc < 0) {
1115                 dev->od_otable_it = NULL;
1116                 OBD_FREE_PTR(it);
1117                 GOTO(out, it = ERR_PTR(-EALREADY));
1118         } else {
1119                 it->ooi_cache.ooc_pos_preload = scrub->os_pos_current;
1120         }
1121
1122         GOTO(out, it);
1123
1124 out:
1125         cfs_mutex_unlock(&dev->od_otable_mutex);
1126         return (struct dt_it *)it;
1127 }
1128
1129 static void osd_otable_it_fini(const struct lu_env *env, struct dt_it *di)
1130 {
1131         struct osd_otable_it *it  = (struct osd_otable_it *)di;
1132         struct osd_device    *dev = it->ooi_dev;
1133
1134         /* od_otable_mutex: prevent curcurrent init/fini */
1135         cfs_mutex_lock(&dev->od_otable_mutex);
1136         do_osd_scrub_stop(&dev->od_scrub);
1137         LASSERT(dev->od_otable_it == it);
1138
1139         dev->od_otable_it = NULL;
1140         cfs_mutex_unlock(&dev->od_otable_mutex);
1141         OBD_FREE_PTR(it);
1142 }
1143
1144 /**
1145  * Set the OSD layer iteration start position as the specified key.
1146  *
1147  * The LFSCK out of OSD layer does not know the detail of the key, so if there
1148  * are several keys, they cannot be compared out of OSD, so call "::get()" for
1149  * each key, and OSD will select the smallest one by itself.
1150  */
1151 static int osd_otable_it_get(const struct lu_env *env,
1152                              struct dt_it *di, const struct dt_key *key)
1153 {
1154         struct osd_otable_it    *it  = (struct osd_otable_it *)di;
1155         struct osd_otable_cache *ooc = &it->ooi_cache;
1156         const char              *str = (const char *)key;
1157         __u32                    ino;
1158         ENTRY;
1159
1160         /* Forbid to set iteration position after iteration started. */
1161         if (it->ooi_user_ready)
1162                 RETURN(-EPERM);
1163
1164         if (str[0] == '\0')
1165                 RETURN(-EINVAL);
1166
1167         if (sscanf(str, "%u", &ino) <= 0)
1168                 RETURN(-EINVAL);
1169
1170         /* Skip the one that has been processed last time. */
1171         if (ooc->ooc_pos_preload > ++ino)
1172                 ooc->ooc_pos_preload = ino;
1173
1174         RETURN(0);
1175 }
1176
1177 static int osd_otable_it_preload(const struct lu_env *env,
1178                                  struct osd_otable_it *it)
1179 {
1180         struct osd_device       *dev   = it->ooi_dev;
1181         struct osd_scrub        *scrub = &dev->od_scrub;
1182         struct osd_otable_cache *ooc   = &it->ooi_cache;
1183         int                      rc;
1184         ENTRY;
1185
1186         rc = osd_inode_iteration(osd_oti_get(env), dev,
1187                                  OSD_OTABLE_IT_CACHE_SIZE, 1);
1188         if (rc == SCRUB_IT_ALL)
1189                 it->ooi_all_cached = 1;
1190
1191         CDEBUG(D_LFSCK, "OSD pre-loaded: max = %u, preload = %u, rc = %d\n",
1192                le32_to_cpu(LDISKFS_SB(osd_sb(dev))->s_es->s_inodes_count),
1193                ooc->ooc_pos_preload, rc);
1194
1195         if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
1196                 scrub->os_waiting = 0;
1197                 cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
1198         }
1199
1200         RETURN(rc < 0 ? rc : ooc->ooc_cached_items);
1201 }
1202
1203 static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
1204 {
1205         struct osd_otable_it    *it     = (struct osd_otable_it *)di;
1206         struct osd_device       *dev    = it->ooi_dev;
1207         struct osd_scrub        *scrub  = &dev->od_scrub;
1208         struct osd_otable_cache *ooc    = &it->ooi_cache;
1209         struct ptlrpc_thread    *thread = &scrub->os_thread;
1210         struct l_wait_info       lwi    = { 0 };
1211         int                      rc;
1212         ENTRY;
1213
1214         LASSERT(it->ooi_user_ready);
1215
1216 again:
1217         if (!thread_is_running(thread) && !it->ooi_used_outside)
1218                 RETURN(1);
1219
1220         if (ooc->ooc_cached_items > 0) {
1221                 ooc->ooc_cached_items--;
1222                 ooc->ooc_consumer_idx = (ooc->ooc_consumer_idx + 1) &
1223                                         ~OSD_OTABLE_IT_CACHE_MASK;
1224                 RETURN(0);
1225         }
1226
1227         if (it->ooi_all_cached) {
1228                 l_wait_event(thread->t_ctl_waitq,
1229                              !thread_is_running(thread),
1230                              &lwi);
1231                 RETURN(1);
1232         }
1233
1234         it->ooi_waiting = 1;
1235         l_wait_event(thread->t_ctl_waitq,
1236                      ooc->ooc_pos_preload < scrub->os_pos_current ||
1237                      !thread_is_running(thread),
1238                      &lwi);
1239         it->ooi_waiting = 0;
1240
1241         if (!thread_is_running(thread) && !it->ooi_used_outside)
1242                 RETURN(1);
1243
1244         rc = osd_otable_it_preload(env, it);
1245         if (rc >= 0)
1246                 goto again;
1247
1248         RETURN(rc);
1249 }
1250
1251 static struct dt_key *osd_otable_it_key(const struct lu_env *env,
1252                                         const struct dt_it *di)
1253 {
1254         struct osd_otable_it    *it  = (struct osd_otable_it *)di;
1255         struct osd_otable_cache *ooc = &it->ooi_cache;
1256
1257         sprintf(it->ooi_key, "%u",
1258                 ooc->ooc_cache[ooc->ooc_consumer_idx].oic_lid.oii_ino);
1259         return (struct dt_key *)it->ooi_key;
1260 }
1261
1262 static int osd_otable_it_key_size(const struct lu_env *env,
1263                                   const struct dt_it *di)
1264 {
1265         return sizeof(((struct osd_otable_it *)di)->ooi_key);
1266 }
1267
1268 static int osd_otable_it_rec(const struct lu_env *env, const struct dt_it *di,
1269                              struct dt_rec *rec, __u32 attr)
1270 {
1271         struct osd_otable_it    *it  = (struct osd_otable_it *)di;
1272         struct osd_otable_cache *ooc = &it->ooi_cache;
1273
1274         *(struct lu_fid *)rec = ooc->ooc_cache[ooc->ooc_consumer_idx].oic_fid;
1275         return 0;
1276 }
1277
1278 static int osd_otable_it_load(const struct lu_env *env,
1279                               const struct dt_it *di, __u64 hash)
1280 {
1281         struct osd_otable_it    *it    = (struct osd_otable_it *)di;
1282         struct osd_device       *dev   = it->ooi_dev;
1283         struct osd_otable_cache *ooc   = &it->ooi_cache;
1284         struct osd_scrub        *scrub = &dev->od_scrub;
1285
1286         if (it->ooi_user_ready)
1287                 return 0;
1288
1289         if (ooc->ooc_pos_preload < LDISKFS_FIRST_INO(osd_sb(dev)))
1290                 ooc->ooc_pos_preload = LDISKFS_FIRST_INO(osd_sb(dev));
1291         it->ooi_user_ready = 1;
1292         if (!scrub->os_full_speed)
1293                 cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
1294
1295         /* Unplug OSD layer iteration by the first next() call. */
1296         return osd_otable_it_next(env, (struct dt_it *)it);
1297 }
1298
1299 const struct dt_index_operations osd_otable_ops = {
1300         .dio_it = {
1301                 .init     = osd_otable_it_init,
1302                 .fini     = osd_otable_it_fini,
1303                 .get      = osd_otable_it_get,
1304                 .next     = osd_otable_it_next,
1305                 .key      = osd_otable_it_key,
1306                 .key_size = osd_otable_it_key_size,
1307                 .rec      = osd_otable_it_rec,
1308                 .load     = osd_otable_it_load,
1309         }
1310 };
1311
1312 int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
1313                    int insert)
1314 {
1315         struct osd_inconsistent_item *oii;
1316         struct osd_scrub             *scrub  = &dev->od_scrub;
1317         struct ptlrpc_thread         *thread = &scrub->os_thread;
1318         int                           wakeup = 0;
1319         ENTRY;
1320
1321         OBD_ALLOC_PTR(oii);
1322         if (unlikely(oii == NULL))
1323                 RETURN(-ENOMEM);
1324
1325         CFS_INIT_LIST_HEAD(&oii->oii_list);
1326         oii->oii_cache = *oic;
1327         oii->oii_insert = insert;
1328
1329         cfs_spin_lock(&scrub->os_lock);
1330         if (unlikely(!thread_is_running(thread))) {
1331                 cfs_spin_unlock(&scrub->os_lock);
1332                 OBD_FREE_PTR(oii);
1333                 RETURN(-EAGAIN);
1334         }
1335
1336         if (cfs_list_empty(&scrub->os_inconsistent_items))
1337                 wakeup = 1;
1338         cfs_list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
1339         cfs_spin_unlock(&scrub->os_lock);
1340
1341         if (wakeup != 0)
1342                 cfs_waitq_broadcast(&thread->t_ctl_waitq);
1343
1344         RETURN(0);
1345 }
1346
1347 int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
1348                    struct osd_inode_id *id)
1349 {
1350         struct osd_scrub             *scrub = &dev->od_scrub;
1351         struct osd_inconsistent_item *oii;
1352         ENTRY;
1353
1354         cfs_spin_lock(&scrub->os_lock);
1355         cfs_list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
1356                 if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
1357                         *id = oii->oii_cache.oic_lid;
1358                         cfs_spin_unlock(&scrub->os_lock);
1359                         RETURN(0);
1360                 }
1361         }
1362         cfs_spin_unlock(&scrub->os_lock);
1363
1364         RETURN(-ENOENT);
1365 }