Whamcloud - gitweb
8579ee08e6d92c66a5f76c59bcf8dcc3db12526e
[fs/lustre-release.git] / lustre / osd-zfs / osd_scrub.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License version 2 for more details.  A copy is
14  * included in the COPYING file that accompanied this code.
15
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2017, Intel Corporation.
24  */
25 /*
26  * lustre/osd-zfs/osd_scrub.c
27  *
28  * Top-level entry points into osd module
29  *
30  * The OI scrub is used for rebuilding Object Index files when restores MDT from
31  * file-level backup.
32  *
33  * The otable based iterator scans ZFS objects to feed up layer LFSCK.
34  *
35  * Author: Fan Yong <fan.yong@intel.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LFSCK
39
40 #include <linux/kthread.h>
41 #include <uapi/linux/lustre/lustre_idl.h>
42 #include <lustre_disk.h>
43 #include <dt_object.h>
44 #include <linux/xattr.h>
45 #include <lustre_scrub.h>
46 #include <obd_class.h>
47 #include <lustre_nodemap.h>
48 #include <sys/dsl_dataset.h>
49 #include <sys/zap_impl.h>
50 #include <sys/zap.h>
51 #include <sys/zap_leaf.h>
52
53 #include "osd_internal.h"
54
55 #define OSD_OTABLE_MAX_HASH             ((1ULL << 48) - 1)
56 #define OTABLE_PREFETCH                 256
57
58 static inline bool osd_scrub_has_window(struct osd_otable_it *it)
59 {
60         return it->ooi_prefetched < OTABLE_PREFETCH;
61 }
62
63 /**
64  * update/insert/delete the specified OI mapping (@fid @id) according to the ops
65  *
66  * \retval   1, changed nothing
67  * \retval   0, changed successfully
68  * \retval -ve, on error
69  */
70 int osd_scrub_refresh_mapping(const struct lu_env *env,
71                               struct osd_device *dev,
72                               const struct lu_fid *fid,
73                               uint64_t oid, enum dt_txn_op ops,
74                               bool force, const char *name)
75 {
76         struct osd_thread_info *info = osd_oti_get(env);
77         struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
78         char *buf = info->oti_str;
79         dmu_tx_t *tx = NULL;
80         dnode_t *dn = NULL;
81         uint64_t zapid;
82         int rc;
83         ENTRY;
84
85         if (dev->od_scrub.os_file.sf_param & SP_DRYRUN && !force)
86                 GOTO(log, rc = 0);
87
88         tx = dmu_tx_create(dev->od_os);
89         if (!tx)
90                 GOTO(log, rc = -ENOMEM);
91
92         zapid = osd_get_name_n_idx(env, dev, fid, buf,
93                                    sizeof(info->oti_str), &dn);
94         osd_tx_hold_zap(tx, zapid, dn,
95                         ops == DTO_INDEX_INSERT ? TRUE : FALSE, NULL);
96         rc = -dmu_tx_assign(tx, TXG_WAIT);
97         if (rc) {
98                 dmu_tx_abort(tx);
99                 GOTO(log, rc);
100         }
101
102         switch (ops) {
103         case DTO_INDEX_UPDATE:
104                 zde->zde_pad = 0;
105                 zde->zde_dnode = oid;
106                 zde->zde_type = 0; /* The type in OI mapping is useless. */
107                 rc = -zap_update(dev->od_os, zapid, buf, 8, sizeof(*zde) / 8,
108                                  zde, tx);
109                 if (unlikely(rc == -ENOENT)) {
110                         /* Some unlink thread may removed the OI mapping. */
111                         rc = 1;
112                 }
113                 break;
114         case DTO_INDEX_INSERT:
115                 zde->zde_pad = 0;
116                 zde->zde_dnode = oid;
117                 zde->zde_type = 0; /* The type in OI mapping is useless. */
118                 rc = osd_zap_add(dev, zapid, dn, buf, 8, sizeof(*zde) / 8,
119                                  zde, tx);
120                 if (unlikely(rc == -EEXIST))
121                         rc = 1;
122                 break;
123         case DTO_INDEX_DELETE:
124                 rc = osd_zap_remove(dev, zapid, dn, buf, tx);
125                 if (rc == -ENOENT) {
126                         /* It is normal that the unlink thread has removed the
127                          * OI mapping already. */
128                         rc = 1;
129                 }
130                 break;
131         default:
132                 LASSERTF(0, "Unexpected ops %d\n", ops);
133                 rc = -EINVAL;
134                 break;
135         }
136
137         dmu_tx_commit(tx);
138         GOTO(log, rc);
139
140 log:
141         CDEBUG(D_LFSCK, "%s: refresh OI map for scrub, op %d, force %s, "
142                DFID" => %llu (%s): rc = %d\n", osd_name(dev), ops,
143                force ? "yes" : "no", PFID(fid), oid, name ? name : "null", rc);
144
145         return rc;
146 }
147
148 static int
149 osd_scrub_check_update(const struct lu_env *env, struct osd_device *dev,
150                        const struct lu_fid *fid, uint64_t oid, int val)
151 {
152         struct lustre_scrub *scrub = &dev->od_scrub;
153         struct scrub_file *sf = &scrub->os_file;
154         struct osd_inconsistent_item *oii = NULL;
155         nvlist_t *nvbuf = NULL;
156         dnode_t *dn = NULL;
157         uint64_t oid2;
158         int ops = DTO_INDEX_UPDATE;
159         int rc;
160         ENTRY;
161
162         down_write(&scrub->os_rwsem);
163         scrub->os_new_checked++;
164         if (val < 0)
165                 GOTO(out, rc = val);
166
167         if (scrub->os_in_prior)
168                 oii = list_entry(scrub->os_inconsistent_items.next,
169                                  struct osd_inconsistent_item, oii_list);
170
171         if (oid < sf->sf_pos_latest_start && !oii)
172                 GOTO(out, rc = 0);
173
174         if (oii && oii->oii_insert) {
175                 ops = DTO_INDEX_INSERT;
176                 goto zget;
177         }
178
179         rc = osd_fid_lookup(env, dev, fid, &oid2);
180         if (rc) {
181                 if (rc != -ENOENT)
182                         GOTO(out, rc);
183
184                 ops = DTO_INDEX_INSERT;
185
186 zget:
187                 rc = __osd_obj2dnode(dev->od_os, oid, &dn);
188                 if (rc) {
189                         /* Someone removed the object by race. */
190                         if (rc == -ENOENT || rc == -EEXIST)
191                                 rc = 0;
192                         GOTO(out, rc);
193                 }
194
195                 spin_lock(&scrub->os_lock);
196                 scrub->os_full_speed = 1;
197                 spin_unlock(&scrub->os_lock);
198
199                 sf->sf_flags |= SF_INCONSISTENT;
200         } else if (oid == oid2) {
201                 GOTO(out, rc = 0);
202         } else {
203                 struct lustre_mdt_attrs *lma = NULL;
204                 int size;
205
206                 rc = __osd_xattr_load_by_oid(dev, oid2, &nvbuf);
207                 if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
208                         goto update;
209                 if (rc)
210                         GOTO(out, rc);
211
212                 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
213                                                (uchar_t **)&lma, &size);
214                 if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
215                         goto update;
216                 if (rc)
217                         GOTO(out, rc);
218
219                 lustre_lma_swab(lma);
220                 if (unlikely(lu_fid_eq(&lma->lma_self_fid, fid))) {
221                         CDEBUG(D_LFSCK, "%s: the FID "DFID" is used by "
222                                "two objects: %llu and %llu (in OI)\n",
223                                osd_name(dev), PFID(fid), oid, oid2);
224
225                         GOTO(out, rc = -EEXIST);
226                 }
227
228 update:
229                 spin_lock(&scrub->os_lock);
230                 scrub->os_full_speed = 1;
231                 spin_unlock(&scrub->os_lock);
232                 sf->sf_flags |= SF_INCONSISTENT;
233         }
234
235         rc = osd_scrub_refresh_mapping(env, dev, fid, oid, ops, false, NULL);
236         if (!rc) {
237                 if (scrub->os_in_prior)
238                         sf->sf_items_updated_prior++;
239                 else
240                         sf->sf_items_updated++;
241         }
242
243         GOTO(out, rc);
244
245 out:
246         if (nvbuf)
247                 nvlist_free(nvbuf);
248
249         if (rc < 0) {
250                 sf->sf_items_failed++;
251                 if (sf->sf_pos_first_inconsistent == 0 ||
252                     sf->sf_pos_first_inconsistent > oid)
253                         sf->sf_pos_first_inconsistent = oid;
254         } else {
255                 rc = 0;
256         }
257
258         /* There may be conflict unlink during the OI scrub,
259          * if happend, then remove the new added OI mapping. */
260         if (ops == DTO_INDEX_INSERT && dn && dn->dn_free_txg)
261                 osd_scrub_refresh_mapping(env, dev, fid, oid,
262                                           DTO_INDEX_DELETE, false, NULL);
263         up_write(&scrub->os_rwsem);
264
265         if (dn)
266                 osd_dnode_rele(dn);
267
268         if (oii) {
269                 spin_lock(&scrub->os_lock);
270                 if (likely(!list_empty(&oii->oii_list)))
271                         list_del(&oii->oii_list);
272                 spin_unlock(&scrub->os_lock);
273                 OBD_FREE_PTR(oii);
274         }
275
276         RETURN(sf->sf_param & SP_FAILOUT ? rc : 0);
277 }
278
279 static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
280 {
281         struct lustre_scrub *scrub = &dev->od_scrub;
282         struct scrub_file *sf = &scrub->os_file;
283         __u32 flags = scrub->os_start_flags;
284         int rc;
285         bool drop_dryrun = false;
286         ENTRY;
287
288         CDEBUG(D_LFSCK, "%s: OI scrub prep, flags = 0x%x\n",
289                scrub->os_name, flags);
290
291         down_write(&scrub->os_rwsem);
292         if (flags & SS_SET_FAILOUT)
293                 sf->sf_param |= SP_FAILOUT;
294         else if (flags & SS_CLEAR_FAILOUT)
295                 sf->sf_param &= ~SP_FAILOUT;
296
297         if (flags & SS_SET_DRYRUN) {
298                 sf->sf_param |= SP_DRYRUN;
299         } else if (flags & SS_CLEAR_DRYRUN && sf->sf_param & SP_DRYRUN) {
300                 sf->sf_param &= ~SP_DRYRUN;
301                 drop_dryrun = true;
302         }
303
304         if (flags & SS_RESET)
305                 scrub_file_reset(scrub, dev->od_uuid, 0);
306
307         spin_lock(&scrub->os_lock);
308         scrub->os_partial_scan = 0;
309         if (flags & SS_AUTO_FULL) {
310                 scrub->os_full_speed = 1;
311                 sf->sf_flags |= SF_AUTO;
312         } else if (sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
313                                    SF_UPGRADE)) {
314                 scrub->os_full_speed = 1;
315         } else {
316                 scrub->os_full_speed = 0;
317         }
318
319         scrub->os_in_prior = 0;
320         scrub->os_waiting = 0;
321         scrub->os_paused = 0;
322         scrub->os_in_join = 0;
323         scrub->os_full_scrub = 0;
324         spin_unlock(&scrub->os_lock);
325         scrub->os_new_checked = 0;
326         if (drop_dryrun && sf->sf_pos_first_inconsistent != 0)
327                 sf->sf_pos_latest_start = sf->sf_pos_first_inconsistent;
328         else if (sf->sf_pos_last_checkpoint != 0)
329                 sf->sf_pos_latest_start = sf->sf_pos_last_checkpoint + 1;
330         else
331                 sf->sf_pos_latest_start = 1;
332
333         scrub->os_pos_current = sf->sf_pos_latest_start;
334         sf->sf_status = SS_SCANNING;
335         sf->sf_time_latest_start = ktime_get_real_seconds();
336         sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
337         sf->sf_pos_last_checkpoint = sf->sf_pos_latest_start - 1;
338         rc = scrub_file_store(env, scrub);
339         if (!rc) {
340                 spin_lock(&scrub->os_lock);
341                 scrub->os_running = 1;
342                 spin_unlock(&scrub->os_lock);
343                 wake_up_var(scrub);
344         }
345         up_write(&scrub->os_rwsem);
346
347         RETURN(rc);
348 }
349
350 static int osd_scrub_post(const struct lu_env *env, struct osd_device *dev,
351                           int result)
352 {
353         struct lustre_scrub *scrub = &dev->od_scrub;
354         struct scrub_file *sf = &scrub->os_file;
355         int rc;
356         ENTRY;
357
358         CDEBUG(D_LFSCK, "%s: OI scrub post with result = %d\n",
359                scrub->os_name, result);
360
361         down_write(&scrub->os_rwsem);
362         spin_lock(&scrub->os_lock);
363         scrub->os_running = 0;
364         spin_unlock(&scrub->os_lock);
365         if (scrub->os_new_checked > 0) {
366                 sf->sf_items_checked += scrub->os_new_checked;
367                 scrub->os_new_checked = 0;
368                 sf->sf_pos_last_checkpoint = scrub->os_pos_current;
369         }
370         sf->sf_time_last_checkpoint = ktime_get_real_seconds();
371         if (result > 0) {
372                 sf->sf_status = SS_COMPLETED;
373                 if (!(sf->sf_param & SP_DRYRUN)) {
374                         memset(sf->sf_oi_bitmap, 0, SCRUB_OI_BITMAP_SIZE);
375                         sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
376                                           SF_UPGRADE | SF_AUTO);
377                 }
378                 sf->sf_time_last_complete = sf->sf_time_last_checkpoint;
379                 sf->sf_success_count++;
380         } else if (result == 0) {
381                 if (scrub->os_paused)
382                         sf->sf_status = SS_PAUSED;
383                 else
384                         sf->sf_status = SS_STOPPED;
385         } else {
386                 sf->sf_status = SS_FAILED;
387         }
388         sf->sf_run_time += ktime_get_seconds() -
389                            scrub->os_time_last_checkpoint;
390
391         rc = scrub_file_store(env, scrub);
392         up_write(&scrub->os_rwsem);
393
394         RETURN(rc < 0 ? rc : result);
395 }
396
397 /* iteration engine */
398
399 static inline int
400 osd_scrub_wakeup(struct lustre_scrub *scrub, struct osd_otable_it *it)
401 {
402         spin_lock(&scrub->os_lock);
403         if (osd_scrub_has_window(it) ||
404             !list_empty(&scrub->os_inconsistent_items) ||
405             it->ooi_waiting || kthread_should_stop())
406                 scrub->os_waiting = 0;
407         else
408                 scrub->os_waiting = 1;
409         spin_unlock(&scrub->os_lock);
410
411         return !scrub->os_waiting;
412 }
413
414 static int osd_scrub_next(const struct lu_env *env, struct osd_device *dev,
415                           struct lu_fid *fid, uint64_t *oid)
416 {
417         struct lustre_scrub *scrub = &dev->od_scrub;
418         struct osd_otable_it *it = dev->od_otable_it;
419         struct lustre_mdt_attrs *lma = NULL;
420         nvlist_t *nvbuf = NULL;
421         int size = 0;
422         int rc = 0;
423         ENTRY;
424
425         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
426                 wait_var_event_timeout(
427                         scrub,
428                         !list_empty(&scrub->os_inconsistent_items) ||
429                         kthread_should_stop(),
430                         cfs_time_seconds(cfs_fail_val));
431
432                 if (kthread_should_stop())
433                         RETURN(SCRUB_NEXT_EXIT);
434         }
435
436         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
437                 spin_lock(&scrub->os_lock);
438                 scrub->os_running = 0;
439                 spin_unlock(&scrub->os_lock);
440                 RETURN(SCRUB_NEXT_CRASH);
441         }
442
443         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_FATAL))
444                 RETURN(SCRUB_NEXT_FATAL);
445
446 again:
447         if (nvbuf) {
448                 nvlist_free(nvbuf);
449                 nvbuf = NULL;
450                 lma = NULL;
451         }
452
453         if (!list_empty(&scrub->os_inconsistent_items)) {
454                 spin_lock(&scrub->os_lock);
455                 if (likely(!list_empty(&scrub->os_inconsistent_items))) {
456                         struct osd_inconsistent_item *oii;
457
458                         oii = list_entry(scrub->os_inconsistent_items.next,
459                                 struct osd_inconsistent_item, oii_list);
460                         *fid = oii->oii_cache.oic_fid;
461                         *oid = oii->oii_cache.oic_dnode;
462                         scrub->os_in_prior = 1;
463                         spin_unlock(&scrub->os_lock);
464
465                         GOTO(out, rc = 0);
466                 }
467                 spin_unlock(&scrub->os_lock);
468         }
469
470         if (!scrub->os_full_speed && !osd_scrub_has_window(it))
471                 wait_var_event(scrub, osd_scrub_wakeup(scrub, it));
472
473         if (kthread_should_stop())
474                 GOTO(out, rc = SCRUB_NEXT_EXIT);
475
476         rc = -dmu_object_next(dev->od_os, &scrub->os_pos_current, B_FALSE, 0);
477         if (rc)
478                 GOTO(out, rc = (rc == -ESRCH ? SCRUB_NEXT_BREAK : rc));
479
480         rc = __osd_xattr_load_by_oid(dev, scrub->os_pos_current, &nvbuf);
481         if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
482                 goto again;
483
484         if (rc)
485                 GOTO(out, rc);
486
487         LASSERT(nvbuf != NULL);
488         rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
489                                        (uchar_t **)&lma, &size);
490         if (!rc) {
491                 lustre_lma_swab(lma);
492                 if (likely(!(lma->lma_compat & LMAC_NOT_IN_OI) &&
493                            !(lma->lma_incompat & LMAI_AGENT))) {
494                         *fid = lma->lma_self_fid;
495                         *oid = scrub->os_pos_current;
496
497                         GOTO(out, rc = 0);
498                 }
499         }
500
501         if (!scrub->os_full_speed) {
502                 spin_lock(&scrub->os_lock);
503                 it->ooi_prefetched++;
504                 if (it->ooi_waiting) {
505                         it->ooi_waiting = 0;
506                         wake_up_var(scrub);
507                 }
508                 spin_unlock(&scrub->os_lock);
509         }
510
511         goto again;
512
513 out:
514         if (nvbuf)
515                 nvlist_free(nvbuf);
516
517         return rc;
518 }
519
520 static int osd_scrub_exec(const struct lu_env *env, struct osd_device *dev,
521                           const struct lu_fid *fid, uint64_t oid, int rc)
522 {
523         struct lustre_scrub *scrub = &dev->od_scrub;
524         struct osd_otable_it *it = dev->od_otable_it;
525
526         rc = osd_scrub_check_update(env, dev, fid, oid, rc);
527         if (!scrub->os_in_prior) {
528                 if (!scrub->os_full_speed) {
529                         spin_lock(&scrub->os_lock);
530                         it->ooi_prefetched++;
531                         if (it->ooi_waiting) {
532                                 it->ooi_waiting = 0;
533                                 wake_up_var(scrub);
534                         }
535                         spin_unlock(&scrub->os_lock);
536                 }
537         } else {
538                 spin_lock(&scrub->os_lock);
539                 scrub->os_in_prior = 0;
540                 spin_unlock(&scrub->os_lock);
541         }
542
543         if (rc)
544                 return rc;
545
546         rc = scrub_checkpoint(env, scrub);
547         if (rc) {
548                 CDEBUG(D_LFSCK, "%s: fail to checkpoint, pos = %llu: "
549                        "rc = %d\n", scrub->os_name, scrub->os_pos_current, rc);
550                 /* Continue, as long as the scrub itself can go ahead. */
551         }
552
553         return 0;
554 }
555
556 static int osd_scrub_main(void *args)
557 {
558         struct lu_env env;
559         struct osd_device *dev = (struct osd_device *)args;
560         struct lustre_scrub *scrub = &dev->od_scrub;
561         struct lu_fid *fid;
562         uint64_t oid;
563         int rc = 0;
564         ENTRY;
565
566         rc = lu_env_init(&env, LCT_LOCAL | LCT_DT_THREAD);
567         if (rc) {
568                 CDEBUG(D_LFSCK, "%s: OI scrub fail to init env: rc = %d\n",
569                        scrub->os_name, rc);
570                 GOTO(noenv, rc);
571         }
572
573         rc = osd_scrub_prep(&env, dev);
574         if (rc) {
575                 CDEBUG(D_LFSCK, "%s: OI scrub fail to scrub prep: rc = %d\n",
576                        scrub->os_name, rc);
577                 GOTO(out, rc);
578         }
579
580         if (!scrub->os_full_speed) {
581                 struct osd_otable_it *it = dev->od_otable_it;
582
583                 wait_var_event(scrub,
584                                it->ooi_user_ready ||
585                                kthread_should_stop());
586
587                 if (kthread_should_stop())
588                         GOTO(post, rc = 0);
589
590                 scrub->os_pos_current = it->ooi_pos;
591         }
592
593         CDEBUG(D_LFSCK, "%s: OI scrub start, flags = 0x%x, pos = %llu\n",
594                scrub->os_name, scrub->os_start_flags,
595                scrub->os_pos_current);
596
597         fid = &osd_oti_get(&env)->oti_fid;
598         while (!rc && !kthread_should_stop()) {
599                 rc = osd_scrub_next(&env, dev, fid, &oid);
600                 switch (rc) {
601                 case SCRUB_NEXT_EXIT:
602                         GOTO(post, rc = 0);
603                 case SCRUB_NEXT_CRASH:
604                         spin_lock(&scrub->os_lock);
605                         scrub->os_running = 0;
606                         spin_unlock(&scrub->os_lock);
607                         GOTO(out, rc = -EINVAL);
608                 case SCRUB_NEXT_FATAL:
609                         GOTO(post, rc = -EINVAL);
610                 case SCRUB_NEXT_BREAK:
611                         GOTO(post, rc = 1);
612                 }
613
614                 rc = osd_scrub_exec(&env, dev, fid, oid, rc);
615         }
616
617         GOTO(post, rc);
618
619 post:
620         rc = osd_scrub_post(&env, dev, rc);
621         CDEBUG(D_LFSCK, "%s: OI scrub: stop, pos = %llu: rc = %d\n",
622                scrub->os_name, scrub->os_pos_current, rc);
623
624 out:
625         while (!list_empty(&scrub->os_inconsistent_items)) {
626                 struct osd_inconsistent_item *oii;
627
628                 oii = list_entry(scrub->os_inconsistent_items.next,
629                                  struct osd_inconsistent_item, oii_list);
630                 list_del_init(&oii->oii_list);
631                 OBD_FREE_PTR(oii);
632         }
633
634         lu_env_fini(&env);
635
636 noenv:
637         spin_lock(&scrub->os_lock);
638         scrub->os_running = 0;
639         spin_unlock(&scrub->os_lock);
640         if (xchg(&scrub->os_task, NULL) == NULL)
641                 /* scrub_stop is waiting, we need to synchronize */
642                 wait_var_event(scrub, kthread_should_stop());
643         wake_up_var(scrub);
644         return rc;
645 }
646
647 /* initial OI scrub */
648
649 struct osd_lf_map;
650
651 typedef int (*handle_dirent_t)(const struct lu_env *, struct osd_device *,
652                                const char *, uint64_t, uint64_t,
653                                enum osd_lf_flags, bool);
654 static int osd_ios_varfid_hd(const struct lu_env *, struct osd_device *,
655                              const char *, uint64_t, uint64_t,
656                              enum osd_lf_flags, bool);
657 static int osd_ios_uld_hd(const struct lu_env *, struct osd_device *,
658                           const char *, uint64_t, uint64_t,
659                           enum osd_lf_flags, bool);
660
661 typedef int (*scan_dir_t)(const struct lu_env *, struct osd_device *,
662                           uint64_t, handle_dirent_t, enum osd_lf_flags);
663 static int osd_ios_general_sd(const struct lu_env *, struct osd_device *,
664                               uint64_t, handle_dirent_t, enum osd_lf_flags);
665 static int osd_ios_ROOT_sd(const struct lu_env *, struct osd_device *,
666                            uint64_t, handle_dirent_t, enum osd_lf_flags);
667
668 struct osd_lf_map {
669         char                    *olm_name;
670         struct lu_fid            olm_fid;
671         enum osd_lf_flags        olm_flags;
672         scan_dir_t               olm_scan_dir;
673         handle_dirent_t          olm_handle_dirent;
674 };
675
676 /* Add the new introduced local files in the list in the future. */
677 static const struct osd_lf_map osd_lf_maps[] = {
678         /* CONFIGS */
679         {
680                 .olm_name               = MOUNT_CONFIGS_DIR,
681                 .olm_fid                = {
682                         .f_seq  = FID_SEQ_LOCAL_FILE,
683                         .f_oid  = MGS_CONFIGS_OID,
684                 },
685                 .olm_flags              = OLF_SCAN_SUBITEMS,
686                 .olm_scan_dir           = osd_ios_general_sd,
687                 .olm_handle_dirent      = osd_ios_varfid_hd,
688         },
689
690         /* NIDTBL_VERSIONS */
691         {
692                 .olm_name               = MGS_NIDTBL_DIR,
693                 .olm_flags              = OLF_SCAN_SUBITEMS,
694                 .olm_scan_dir           = osd_ios_general_sd,
695                 .olm_handle_dirent      = osd_ios_varfid_hd,
696         },
697
698         /* PENDING */
699         {
700                 .olm_name               = MDT_ORPHAN_DIR,
701         },
702
703         /* ROOT */
704         {
705                 .olm_name               = "ROOT",
706                 .olm_fid                = {
707                         .f_seq  = FID_SEQ_ROOT,
708                         .f_oid  = FID_OID_ROOT,
709                 },
710                 .olm_flags              = OLF_SCAN_SUBITEMS,
711                 .olm_scan_dir           = osd_ios_ROOT_sd,
712         },
713
714         /* fld */
715         {
716                 .olm_name               = "fld",
717                 .olm_fid                = {
718                         .f_seq  = FID_SEQ_LOCAL_FILE,
719                         .f_oid  = FLD_INDEX_OID,
720                 },
721         },
722
723         /* changelog_catalog */
724         {
725                 .olm_name               = CHANGELOG_CATALOG,
726         },
727
728         /* changelog_users */
729         {
730                 .olm_name               = CHANGELOG_USERS,
731         },
732
733         /* quota_master */
734         {
735                 .olm_name               = QMT_DIR,
736                 .olm_flags              = OLF_SCAN_SUBITEMS,
737                 .olm_scan_dir           = osd_ios_general_sd,
738                 .olm_handle_dirent      = osd_ios_varfid_hd,
739         },
740
741         /* quota_slave */
742         {
743                 .olm_name               = QSD_DIR,
744                 .olm_flags              = OLF_SCAN_SUBITEMS,
745                 .olm_scan_dir           = osd_ios_general_sd,
746                 .olm_handle_dirent      = osd_ios_varfid_hd,
747         },
748
749         /* LFSCK */
750         {
751                 .olm_name               = LFSCK_DIR,
752                 .olm_flags              = OLF_SCAN_SUBITEMS | OLF_NOT_BACKUP,
753                 .olm_scan_dir           = osd_ios_general_sd,
754                 .olm_handle_dirent      = osd_ios_varfid_hd,
755         },
756
757         /* lfsck_bookmark */
758         {
759                 .olm_name               = LFSCK_BOOKMARK,
760         },
761
762         /* lfsck_layout */
763         {
764                 .olm_name               = LFSCK_LAYOUT,
765         },
766
767         /* lfsck_namespace */
768         {
769                 .olm_name               = LFSCK_NAMESPACE,
770         },
771
772         /* OSP update logs update_log{_dir} use f_seq = FID_SEQ_UPDATE_LOG{_DIR}
773          * and f_oid = index for their log files.  See lu_update_log{_dir}_fid()
774          * for more details. */
775
776         /* update_log */
777         {
778                 .olm_name               = "update_log",
779                 .olm_fid                = {
780                         .f_seq  = FID_SEQ_UPDATE_LOG,
781                 },
782                 .olm_flags              = OLF_IDX_IN_FID,
783         },
784
785         /* update_log_dir */
786         {
787                 .olm_name               = "update_log_dir",
788                 .olm_fid        = {
789                         .f_seq  = FID_SEQ_UPDATE_LOG_DIR,
790                 },
791                 .olm_flags              = OLF_SCAN_SUBITEMS | OLF_IDX_IN_FID,
792                 .olm_scan_dir           = osd_ios_general_sd,
793                 .olm_handle_dirent      = osd_ios_uld_hd,
794         },
795
796         /* hsm_actions */
797         {
798                 .olm_name               = HSM_ACTIONS,
799         },
800
801         /* nodemap */
802         {
803                 .olm_name               = LUSTRE_NODEMAP_NAME,
804         },
805
806         /* index_backup */
807         {
808                 .olm_name               = INDEX_BACKUP_DIR,
809                 .olm_fid                = {
810                         .f_seq  = FID_SEQ_LOCAL_FILE,
811                         .f_oid  = INDEX_BACKUP_OID,
812                 },
813                 .olm_flags              = OLF_SCAN_SUBITEMS | OLF_NOT_BACKUP,
814                 .olm_scan_dir           = osd_ios_general_sd,
815                 .olm_handle_dirent      = osd_ios_varfid_hd,
816         },
817
818         {
819                 .olm_name               = NULL
820         }
821 };
822
823 /* Add the new introduced files under .lustre/ in the list in the future. */
824 static const struct osd_lf_map osd_dl_maps[] = {
825         /* .lustre/fid */
826         {
827                 .olm_name               = "fid",
828                 .olm_fid                = {
829                         .f_seq  = FID_SEQ_DOT_LUSTRE,
830                         .f_oid  = FID_OID_DOT_LUSTRE_OBF,
831                 },
832         },
833
834         /* .lustre/lost+found */
835         {
836                 .olm_name               = "lost+found",
837                 .olm_fid                = {
838                         .f_seq  = FID_SEQ_DOT_LUSTRE,
839                         .f_oid  = FID_OID_DOT_LUSTRE_LPF,
840                 },
841         },
842
843         {
844                 .olm_name               = NULL
845         }
846 };
847
848 struct osd_ios_item {
849         struct list_head        oii_list;
850         uint64_t                oii_parent;
851         enum osd_lf_flags       oii_flags;
852         scan_dir_t              oii_scan_dir;
853         handle_dirent_t         oii_handle_dirent;
854 };
855
856 static int osd_ios_new_item(struct osd_device *dev, uint64_t parent,
857                             enum osd_lf_flags flags, scan_dir_t scan_dir,
858                             handle_dirent_t handle_dirent)
859 {
860         struct osd_ios_item *item;
861
862         OBD_ALLOC_PTR(item);
863         if (!item) {
864                 CWARN("%s: initial OI scrub failed to add item for %llu\n",
865                       osd_name(dev), parent);
866                 return -ENOMEM;
867         }
868
869         INIT_LIST_HEAD(&item->oii_list);
870         item->oii_parent = parent;
871         item->oii_flags = flags;
872         item->oii_scan_dir = scan_dir;
873         item->oii_handle_dirent = handle_dirent;
874         list_add_tail(&item->oii_list, &dev->od_ios_list);
875
876         return 0;
877 }
878
879 static bool osd_index_need_recreate(const struct lu_env *env,
880                                     struct osd_device *dev, uint64_t oid)
881 {
882         struct osd_thread_info *info = osd_oti_get(env);
883         zap_attribute_t *za = &info->oti_za2;
884         zap_cursor_t *zc = &info->oti_zc2;
885         int rc;
886         ENTRY;
887
888         zap_cursor_init_serialized(zc, dev->od_os, oid, 0);
889         rc = -zap_cursor_retrieve(zc, za);
890         zap_cursor_fini(zc);
891         if (rc && rc != -ENOENT)
892                 RETURN(true);
893
894         RETURN(false);
895 }
896
897 static void osd_ios_index_register(const struct lu_env *env,
898                                    struct osd_device *osd,
899                                    const struct lu_fid *fid, uint64_t oid)
900 {
901         struct osd_thread_info *info = osd_oti_get(env);
902         zap_attribute_t *za = &info->oti_za2;
903         zap_cursor_t *zc = &info->oti_zc2;
904         struct zap_leaf_entry *le;
905         dnode_t *dn = NULL;
906         sa_handle_t *hdl;
907         __u64 mode = 0;
908         __u32 keysize = 0;
909         __u32 recsize = 0;
910         int rc;
911         ENTRY;
912
913         rc = __osd_obj2dnode(osd->od_os, oid, &dn);
914         if (rc == -EEXIST || rc == -ENOENT)
915                 RETURN_EXIT;
916
917         if (rc < 0)
918                 GOTO(log, rc);
919
920         if (!osd_object_is_zap(dn))
921                 GOTO(log, rc = 1);
922
923         rc = -sa_handle_get(osd->od_os, oid, NULL, SA_HDL_PRIVATE, &hdl);
924         if (rc)
925                 GOTO(log, rc);
926
927         rc = -sa_lookup(hdl, SA_ZPL_MODE(osd), &mode, sizeof(mode));
928         sa_handle_destroy(hdl);
929         if (rc)
930                 GOTO(log, rc);
931
932         if (!S_ISREG(mode))
933                 GOTO(log, rc = 1);
934
935         zap_cursor_init_serialized(zc, osd->od_os, oid, 0);
936         rc = -zap_cursor_retrieve(zc, za);
937         if (rc)
938                 /* Skip empty index object */
939                 GOTO(fini, rc = (rc == -ENOENT ? 1 : rc));
940
941         if (zc->zc_zap->zap_ismicro ||
942             !(zap_f_phys(zc->zc_zap)->zap_flags & ZAP_FLAG_UINT64_KEY))
943                 GOTO(fini, rc = 1);
944
945         le = ZAP_LEAF_ENTRY(zc->zc_leaf, 0);
946         keysize = le->le_name_numints * 8;
947         recsize = za->za_integer_length * za->za_num_integers;
948         if (likely(keysize && recsize))
949                 rc = osd_index_register(osd, fid, keysize, recsize);
950
951         GOTO(fini, rc);
952
953 fini:
954         zap_cursor_fini(zc);
955
956 log:
957         if (dn)
958                 osd_dnode_rele(dn);
959         if (rc < 0)
960                 CWARN("%s: failed to register index "DFID" (%u/%u): rc = %d\n",
961                       osd_name(osd), PFID(fid), keysize, recsize, rc);
962         else if (!rc)
963                 CDEBUG(D_LFSCK, "%s: registered index "DFID" (%u/%u)\n",
964                        osd_name(osd), PFID(fid), keysize, recsize);
965 }
966
967 static void osd_index_restore(const struct lu_env *env, struct osd_device *dev,
968                               struct lustre_index_restore_unit *liru, void *buf,
969                               int bufsize)
970 {
971         struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
972         struct lu_fid *tgt_fid = &liru->liru_cfid;
973         struct lu_fid bak_fid;
974         int rc;
975         ENTRY;
976
977         lustre_fid2lbx(buf, tgt_fid, bufsize);
978         rc = -zap_lookup(dev->od_os, dev->od_index_backup_id, buf, 8,
979                          sizeof(*zde) / 8, (void *)zde);
980         if (rc)
981                 GOTO(log, rc);
982
983         rc = osd_get_fid_by_oid(env, dev, zde->lzd_reg.zde_dnode, &bak_fid);
984         if (rc)
985                 GOTO(log, rc);
986
987         /* The OI mapping for index may be invalid, since it will be
988          * re-created, not update the OI mapping, just cache it in RAM. */
989         rc = osd_idc_find_and_init_with_oid(env, dev, tgt_fid,
990                                             liru->liru_clid);
991         if (!rc)
992                 rc = lustre_index_restore(env, &dev->od_dt_dev,
993                                 &liru->liru_pfid, tgt_fid, &bak_fid,
994                                 liru->liru_name, &dev->od_index_backup_list,
995                                 &dev->od_lock, buf, bufsize);
996         GOTO(log, rc);
997
998 log:
999         CDEBUG(D_WARNING, "%s: restore index '%s' with "DFID": rc = %d\n",
1000                osd_name(dev), liru->liru_name, PFID(tgt_fid), rc);
1001 }
1002
1003 /**
1004  * verify FID-in-LMA and OI entry for one object
1005  *
1006  * ios: Initial OI Scrub.
1007  */
1008 static int osd_ios_scan_one(const struct lu_env *env, struct osd_device *dev,
1009                             const struct lu_fid *fid, uint64_t parent,
1010                             uint64_t oid, const char *name,
1011                             enum osd_lf_flags flags)
1012 {
1013         struct lustre_scrub *scrub = &dev->od_scrub;
1014         struct scrub_file *sf = &scrub->os_file;
1015         struct lustre_mdt_attrs *lma = NULL;
1016         nvlist_t *nvbuf = NULL;
1017         struct lu_fid tfid;
1018         uint64_t oid2 = 0;
1019         __u64 flag = 0;
1020         int size = 0;
1021         int op = 0;
1022         int rc;
1023         ENTRY;
1024
1025         rc = __osd_xattr_load_by_oid(dev, oid, &nvbuf);
1026         if (unlikely(rc == -ENOENT || rc == -EEXIST))
1027                 RETURN(0);
1028
1029         if (rc && rc != -ENODATA) {
1030                 CWARN("%s: initial OI scrub failed to get lma for %llu: "
1031                       "rc = %d\n", osd_name(dev), oid, rc);
1032
1033                 RETURN(rc);
1034         }
1035
1036         if (!rc) {
1037                 LASSERT(nvbuf != NULL);
1038                 rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
1039                                                (uchar_t **)&lma, &size);
1040                 if (rc || size == 0) {
1041                         LASSERT(lma == NULL);
1042                         rc = -ENODATA;
1043                 } else {
1044                         LASSERTF(lma != NULL, "corrupted LMA, size %d\n", size);
1045                         lustre_lma_swab(lma);
1046                         if (lma->lma_compat & LMAC_NOT_IN_OI) {
1047                                 nvlist_free(nvbuf);
1048                                 RETURN(0);
1049                         }
1050
1051                         if (lma->lma_compat & LMAC_IDX_BACKUP &&
1052                             osd_index_need_recreate(env, dev, oid)) {
1053                                 if (parent == dev->od_root) {
1054                                         lu_local_obj_fid(&tfid,
1055                                                          OSD_FS_ROOT_OID);
1056                                 } else {
1057                                         rc = osd_get_fid_by_oid(env, dev,
1058                                                                 parent, &tfid);
1059                                         if (rc) {
1060                                                 nvlist_free(nvbuf);
1061                                                 RETURN(rc);
1062                                         }
1063                                 }
1064
1065                                 rc = lustre_liru_new(
1066                                                 &dev->od_index_restore_list,
1067                                                 &tfid, &lma->lma_self_fid, oid,
1068                                                 name, strlen(name));
1069                                 nvlist_free(nvbuf);
1070                                 RETURN(rc);
1071                         }
1072
1073                         tfid = lma->lma_self_fid;
1074                         if (!(flags & OLF_NOT_BACKUP))
1075                                 osd_ios_index_register(env, dev, &tfid, oid);
1076                 }
1077                 nvlist_free(nvbuf);
1078         }
1079
1080         if (rc == -ENODATA) {
1081                 if (!fid) {
1082                         /* Skip the object without FID-in-LMA */
1083                         CDEBUG(D_LFSCK, "%s: %llu has no FID-in-LMA, skip it\n",
1084                                osd_name(dev), oid);
1085
1086                         RETURN(0);
1087                 }
1088
1089                 LASSERT(!fid_is_zero(fid));
1090
1091                 tfid = *fid;
1092                 if (flags & OLF_IDX_IN_FID) {
1093                         LASSERT(dev->od_index >= 0);
1094
1095                         tfid.f_oid = dev->od_index;
1096                 }
1097         }
1098
1099         rc = osd_fid_lookup(env, dev, &tfid, &oid2);
1100         if (rc) {
1101                 if (rc != -ENOENT) {
1102                         CWARN("%s: initial OI scrub failed to lookup fid for "
1103                               DFID"=>%llu: rc = %d\n",
1104                               osd_name(dev), PFID(&tfid), oid, rc);
1105
1106                         RETURN(rc);
1107                 }
1108
1109                 flag = SF_RECREATED;
1110                 op = DTO_INDEX_INSERT;
1111         } else {
1112                 if (oid == oid2)
1113                         RETURN(0);
1114
1115                 flag = SF_INCONSISTENT;
1116                 op = DTO_INDEX_UPDATE;
1117         }
1118
1119         if (!(sf->sf_flags & flag)) {
1120                 scrub_file_reset(scrub, dev->od_uuid, flag);
1121                 rc = scrub_file_store(env, scrub);
1122                 if (rc)
1123                         RETURN(rc);
1124         }
1125
1126         rc = osd_scrub_refresh_mapping(env, dev, &tfid, oid, op, true, name);
1127
1128         RETURN(rc > 0 ? 0 : rc);
1129 }
1130
1131 static int osd_ios_varfid_hd(const struct lu_env *env, struct osd_device *dev,
1132                              const char *name, uint64_t parent, uint64_t oid,
1133                              enum osd_lf_flags flags, bool is_dir)
1134 {
1135         int rc;
1136         ENTRY;
1137
1138         rc = osd_ios_scan_one(env, dev, NULL, parent, oid, name, 0);
1139         if (!rc && is_dir)
1140                 rc = osd_ios_new_item(dev, oid, flags, osd_ios_general_sd,
1141                                       osd_ios_varfid_hd);
1142
1143         RETURN(rc);
1144 }
1145
1146 static int osd_ios_uld_hd(const struct lu_env *env, struct osd_device *dev,
1147                           const char *name, uint64_t parent, uint64_t oid,
1148                           enum osd_lf_flags flags, bool is_dir)
1149 {
1150         struct lu_fid tfid;
1151         int rc;
1152         ENTRY;
1153
1154         /* skip any non-DFID format name */
1155         if (name[0] != '[')
1156                 RETURN(0);
1157
1158         /* skip the start '[' */
1159         sscanf(&name[1], SFID, RFID(&tfid));
1160         if (fid_is_sane(&tfid))
1161                 rc = osd_ios_scan_one(env, dev, &tfid, parent, oid, name, 0);
1162         else
1163                 rc = -EIO;
1164
1165         RETURN(rc);
1166 }
1167
1168 /*
1169  * General scanner for the directories execpt /ROOT during initial OI scrub.
1170  * It scans the name entries under the given directory one by one. For each
1171  * entry, verifies its OI mapping via the given @handle_dirent.
1172  */
1173 static int osd_ios_general_sd(const struct lu_env *env, struct osd_device *dev,
1174                               uint64_t parent, handle_dirent_t handle_dirent,
1175                               enum osd_lf_flags flags)
1176 {
1177         struct osd_thread_info *info = osd_oti_get(env);
1178         struct luz_direntry *zde = &info->oti_zde;
1179         zap_attribute_t *za = &info->oti_za;
1180         zap_cursor_t *zc = &info->oti_zc;
1181         int rc;
1182         ENTRY;
1183
1184         zap_cursor_init_serialized(zc, dev->od_os, parent, 0);
1185         rc = -zap_cursor_retrieve(zc, za);
1186         if (rc == -ENOENT)
1187                 zap_cursor_advance(zc);
1188         else if (rc)
1189                 GOTO(log, rc);
1190
1191         while (1) {
1192                 rc = -zap_cursor_retrieve(zc, za);
1193                 if (rc)
1194                         GOTO(log, rc = (rc == -ENOENT ? 0 : rc));
1195
1196                 /* skip the entry started with '.' */
1197                 if (likely(za->za_name[0] != '.')) {
1198                         rc = osd_zap_lookup(dev, parent, NULL, za->za_name,
1199                                         za->za_integer_length,
1200                                         sizeof(*zde) / za->za_integer_length,
1201                                         (void *)zde);
1202                         if (rc) {
1203                                 CWARN("%s: initial OI scrub failed to lookup "
1204                                       "%s under %llu: rc = %d\n",
1205                                       osd_name(dev), za->za_name, parent, rc);
1206                                 continue;
1207                         }
1208
1209                         rc = handle_dirent(env, dev, za->za_name, parent,
1210                                         zde->lzd_reg.zde_dnode, flags,
1211                                         S_ISDIR(DTTOIF(zde->lzd_reg.zde_type)) ?
1212                                         true : false);
1213                         CDEBUG(D_LFSCK, "%s: initial OI scrub handled %s under "
1214                                "%llu: rc = %d\n",
1215                                osd_name(dev), za->za_name, parent, rc);
1216                 }
1217
1218                 zap_cursor_advance(zc);
1219         }
1220
1221 log:
1222         if (rc)
1223                 CWARN("%s: initial OI scrub failed to scan the directory %llu: "
1224                       "rc = %d\n", osd_name(dev), parent, rc);
1225         zap_cursor_fini(zc);
1226
1227         return rc;
1228 }
1229
1230 /*
1231  * The scanner for /ROOT directory. It is not all the items under /ROOT will
1232  * be scanned during the initial OI scrub, instead, only the .lustre and the
1233  * sub-items under .lustre will be handled.
1234  */
1235 static int osd_ios_ROOT_sd(const struct lu_env *env, struct osd_device *dev,
1236                            uint64_t parent, handle_dirent_t handle_dirent,
1237                            enum osd_lf_flags flags)
1238 {
1239         struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
1240         const struct osd_lf_map *map;
1241         uint64_t oid;
1242         int rc;
1243         int rc1 = 0;
1244         ENTRY;
1245
1246         rc = osd_zap_lookup(dev, parent, NULL, dot_lustre_name, 8,
1247                             sizeof(*zde) / 8, (void *)zde);
1248         if (rc == -ENOENT) {
1249                 /* The .lustre directory is lost. That is not fatal. It can
1250                  * be re-created in the subsequent MDT start processing. */
1251                 RETURN(0);
1252         }
1253
1254         if (rc) {
1255                 CWARN("%s: initial OI scrub failed to find .lustre: "
1256                       "rc = %d\n", osd_name(dev), rc);
1257
1258                 RETURN(rc);
1259         }
1260
1261         oid = zde->lzd_reg.zde_dnode;
1262         rc = osd_ios_scan_one(env, dev, &LU_DOT_LUSTRE_FID, parent, oid,
1263                               dot_lustre_name, 0);
1264         if (rc)
1265                 RETURN(rc);
1266
1267         for (map = osd_dl_maps; map->olm_name; map++) {
1268                 rc = osd_zap_lookup(dev, oid, NULL, map->olm_name, 8,
1269                                     sizeof(*zde) / 8, (void *)zde);
1270                 if (rc) {
1271                         if (rc != -ENOENT)
1272                                 CWARN("%s: initial OI scrub failed to find the entry %s under .lustre: rc = %d\n",
1273                                       osd_name(dev), map->olm_name, rc);
1274                         else if (!fid_is_zero(&map->olm_fid))
1275                                 /* Try to remove the stale OI mapping. */
1276                                 osd_scrub_refresh_mapping(env, dev,
1277                                                 &map->olm_fid, 0,
1278                                                 DTO_INDEX_DELETE, true,
1279                                                 map->olm_name);
1280                         continue;
1281                 }
1282
1283                 rc = osd_ios_scan_one(env, dev, &map->olm_fid, oid,
1284                                       zde->lzd_reg.zde_dnode, map->olm_name,
1285                                       map->olm_flags);
1286                 if (rc)
1287                         rc1 = rc;
1288         }
1289
1290         RETURN(rc1);
1291 }
1292
1293 static void osd_initial_OI_scrub(const struct lu_env *env,
1294                                  struct osd_device *dev)
1295 {
1296         struct luz_direntry *zde = &osd_oti_get(env)->oti_zde;
1297         const struct osd_lf_map *map;
1298         int rc;
1299         ENTRY;
1300
1301         for (map = osd_lf_maps; map->olm_name; map++) {
1302                 rc = osd_zap_lookup(dev, dev->od_root, NULL, map->olm_name, 8,
1303                                     sizeof(*zde) / 8, (void *)zde);
1304                 if (rc) {
1305                         if (rc != -ENOENT)
1306                                 CWARN("%s: initial OI scrub failed "
1307                                       "to find the entry %s: rc = %d\n",
1308                                       osd_name(dev), map->olm_name, rc);
1309                         else if (!fid_is_zero(&map->olm_fid))
1310                                 /* Try to remove the stale OI mapping. */
1311                                 osd_scrub_refresh_mapping(env, dev,
1312                                                 &map->olm_fid, 0,
1313                                                 DTO_INDEX_DELETE, true,
1314                                                 map->olm_name);
1315                         continue;
1316                 }
1317
1318                 rc = osd_ios_scan_one(env, dev, &map->olm_fid, dev->od_root,
1319                                       zde->lzd_reg.zde_dnode, map->olm_name,
1320                                       map->olm_flags);
1321                 if (!rc && map->olm_flags & OLF_SCAN_SUBITEMS)
1322                         osd_ios_new_item(dev, zde->lzd_reg.zde_dnode,
1323                                          map->olm_flags, map->olm_scan_dir,
1324                                          map->olm_handle_dirent);
1325         }
1326
1327         while (!list_empty(&dev->od_ios_list)) {
1328                 struct osd_ios_item *item;
1329
1330                 item = list_entry(dev->od_ios_list.next,
1331                                   struct osd_ios_item, oii_list);
1332                 list_del_init(&item->oii_list);
1333                 item->oii_scan_dir(env, dev, item->oii_parent,
1334                                    item->oii_handle_dirent, item->oii_flags);
1335                 OBD_FREE_PTR(item);
1336         }
1337
1338         if (!list_empty(&dev->od_index_restore_list)) {
1339                 char *buf;
1340
1341                 OBD_ALLOC_LARGE(buf, INDEX_BACKUP_BUFSIZE);
1342                 if (!buf)
1343                         CERROR("%s: not enough RAM for rebuild index\n",
1344                                osd_name(dev));
1345
1346                 while (!list_empty(&dev->od_index_restore_list)) {
1347                         struct lustre_index_restore_unit *liru;
1348
1349                         liru = list_entry(dev->od_index_restore_list.next,
1350                                           struct lustre_index_restore_unit,
1351                                           liru_link);
1352                         list_del(&liru->liru_link);
1353                         if (buf)
1354                                 osd_index_restore(env, dev, liru, buf,
1355                                                   INDEX_BACKUP_BUFSIZE);
1356                         OBD_FREE(liru, liru->liru_len);
1357                 }
1358
1359                 if (buf)
1360                         OBD_FREE_LARGE(buf, INDEX_BACKUP_BUFSIZE);
1361         }
1362
1363         EXIT;
1364 }
1365
1366 /* OI scrub start/stop */
1367
1368 int osd_scrub_start(const struct lu_env *env, struct osd_device *dev,
1369                     __u32 flags)
1370 {
1371         int rc;
1372         ENTRY;
1373
1374         if (dev->od_dt_dev.dd_rdonly)
1375                 RETURN(-EROFS);
1376
1377         /* od_otable_sem: prevent concurrent start/stop */
1378         down(&dev->od_otable_sem);
1379         rc = scrub_start(osd_scrub_main, &dev->od_scrub, dev, flags);
1380         up(&dev->od_otable_sem);
1381
1382         RETURN(rc == -EALREADY ? 0 : rc);
1383 }
1384
1385 void osd_scrub_stop(struct osd_device *dev)
1386 {
1387         struct lustre_scrub *scrub = &dev->od_scrub;
1388         ENTRY;
1389
1390         /* od_otable_sem: prevent concurrent start/stop */
1391         down(&dev->od_otable_sem);
1392         spin_lock(&scrub->os_lock);
1393         scrub->os_paused = 1;
1394         spin_unlock(&scrub->os_lock);
1395         scrub_stop(scrub);
1396         up(&dev->od_otable_sem);
1397
1398         EXIT;
1399 }
1400
1401 /* OI scrub setup/cleanup */
1402
1403 static const char osd_scrub_name[] = "OI_scrub";
1404
1405 int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev)
1406 {
1407         struct osd_thread_info *info = osd_oti_get(env);
1408         struct lustre_scrub *scrub = &dev->od_scrub;
1409         struct scrub_file *sf = &scrub->os_file;
1410         struct lu_fid *fid = &info->oti_fid;
1411         struct dt_object *obj;
1412         uint64_t oid;
1413         int rc = 0;
1414         bool dirty = false;
1415         ENTRY;
1416
1417         memcpy(dev->od_uuid.b,
1418                &dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid,
1419                sizeof(dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid));
1420         memset(&dev->od_scrub, 0, sizeof(struct lustre_scrub));
1421         init_rwsem(&scrub->os_rwsem);
1422         spin_lock_init(&scrub->os_lock);
1423         INIT_LIST_HEAD(&scrub->os_inconsistent_items);
1424         scrub->os_name = osd_name(dev);
1425
1426         /* 'What the @fid is' is not imporatant, because the object
1427          * has no OI mapping, and only is visible inside the OSD.*/
1428         fid->f_seq = FID_SEQ_IGIF_MAX;
1429         if (dev->od_is_ost)
1430                 fid->f_oid = ((1 << 31) | dev->od_index) + 1;
1431         else
1432                 fid->f_oid = dev->od_index + 1;
1433         fid->f_ver = 0;
1434         rc = osd_obj_find_or_create(env, dev, dev->od_root,
1435                                     osd_scrub_name, &oid, fid, false);
1436         if (rc)
1437                 RETURN(rc);
1438
1439         rc = osd_idc_find_and_init_with_oid(env, dev, fid, oid);
1440         if (rc)
1441                 RETURN(rc);
1442
1443         obj = lu2dt(lu_object_find_slice(env, osd2lu_dev(dev), fid, NULL));
1444         if (IS_ERR_OR_NULL(obj))
1445                 RETURN(obj ? PTR_ERR(obj) : -ENOENT);
1446
1447         obj->do_body_ops = &osd_body_scrub_ops;
1448         scrub->os_obj = obj;
1449         rc = scrub_file_load(env, scrub);
1450         if (rc == -ENOENT || rc == -EFAULT) {
1451                 scrub_file_init(scrub, dev->od_uuid);
1452                 dirty = true;
1453         } else if (rc < 0) {
1454                 GOTO(cleanup_obj, rc);
1455         } else {
1456                 if (!uuid_equal(&sf->sf_uuid, &dev->od_uuid)) {
1457                         CDEBUG(D_LFSCK,
1458                                "%s: UUID has been changed from %pU to %pU\n",
1459                                osd_name(dev), &sf->sf_uuid, &dev->od_uuid);
1460                         scrub_file_reset(scrub, dev->od_uuid, SF_INCONSISTENT);
1461                         dirty = true;
1462                 } else if (sf->sf_status == SS_SCANNING) {
1463                         sf->sf_status = SS_CRASHED;
1464                         dirty = true;
1465                 }
1466
1467                 if ((sf->sf_oi_count & (sf->sf_oi_count - 1)) != 0) {
1468                         LCONSOLE_WARN("%s: invalid oi count %d, set it to %d\n",
1469                                       osd_name(dev), sf->sf_oi_count,
1470                                       osd_oi_count);
1471                         sf->sf_oi_count = osd_oi_count;
1472                         dirty = true;
1473                 }
1474         }
1475
1476         if (sf->sf_pos_last_checkpoint != 0)
1477                 scrub->os_pos_current = sf->sf_pos_last_checkpoint + 1;
1478         else
1479                 scrub->os_pos_current = 1;
1480
1481         if (dirty) {
1482                 rc = scrub_file_store(env, scrub);
1483                 if (rc)
1484                         GOTO(cleanup_obj, rc);
1485         }
1486
1487         /* Initialize OI files. */
1488         rc = osd_oi_init(env, dev);
1489         if (rc < 0)
1490                 GOTO(cleanup_obj, rc);
1491
1492         if (!dev->od_dt_dev.dd_rdonly)
1493                 osd_initial_OI_scrub(env, dev);
1494
1495         if (!dev->od_dt_dev.dd_rdonly &&
1496             dev->od_auto_scrub_interval != AS_NEVER &&
1497             ((sf->sf_status == SS_PAUSED) ||
1498              (sf->sf_status == SS_CRASHED &&
1499               sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
1500                               SF_UPGRADE | SF_AUTO)) ||
1501              (sf->sf_status == SS_INIT &&
1502               sf->sf_flags & (SF_RECREATED | SF_INCONSISTENT |
1503                               SF_UPGRADE))))
1504                 rc = osd_scrub_start(env, dev, SS_AUTO_FULL);
1505
1506         if (rc)
1507                 GOTO(cleanup_oi, rc);
1508
1509         RETURN(0);
1510
1511 cleanup_oi:
1512         osd_oi_fini(env, dev);
1513 cleanup_obj:
1514         dt_object_put_nocache(env, scrub->os_obj);
1515         scrub->os_obj = NULL;
1516
1517         return rc;
1518 }
1519
1520 void osd_scrub_cleanup(const struct lu_env *env, struct osd_device *dev)
1521 {
1522         struct lustre_scrub *scrub = &dev->od_scrub;
1523
1524         LASSERT(!dev->od_otable_it);
1525
1526         if (scrub->os_obj) {
1527                 osd_scrub_stop(dev);
1528                 dt_object_put_nocache(env, scrub->os_obj);
1529                 scrub->os_obj = NULL;
1530         }
1531
1532         if (dev->od_oi_table)
1533                 osd_oi_fini(env, dev);
1534 }
1535
1536 /* object table based iteration APIs */
1537
1538 static struct dt_it *osd_otable_it_init(const struct lu_env *env,
1539                                        struct dt_object *dt, __u32 attr)
1540 {
1541         enum dt_otable_it_flags flags = attr >> DT_OTABLE_IT_FLAGS_SHIFT;
1542         enum dt_otable_it_valid valid = attr & ~DT_OTABLE_IT_FLAGS_MASK;
1543         struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1544         struct lustre_scrub *scrub = &dev->od_scrub;
1545         struct osd_otable_it *it;
1546         __u32 start = 0;
1547         int rc;
1548         ENTRY;
1549
1550         if (dev->od_dt_dev.dd_rdonly)
1551                 RETURN(ERR_PTR(-EROFS));
1552
1553         /* od_otable_sem: prevent concurrent init/fini */
1554         down(&dev->od_otable_sem);
1555         if (dev->od_otable_it)
1556                 GOTO(out, it = ERR_PTR(-EALREADY));
1557
1558         OBD_ALLOC_PTR(it);
1559         if (!it)
1560                 GOTO(out, it = ERR_PTR(-ENOMEM));
1561
1562         if (flags & DOIF_OUTUSED)
1563                 it->ooi_used_outside = 1;
1564
1565         if (flags & DOIF_RESET)
1566                 start |= SS_RESET;
1567
1568         if (valid & DOIV_ERROR_HANDLE) {
1569                 if (flags & DOIF_FAILOUT)
1570                         start |= SS_SET_FAILOUT;
1571                 else
1572                         start |= SS_CLEAR_FAILOUT;
1573         }
1574
1575         if (valid & DOIV_DRYRUN) {
1576                 if (flags & DOIF_DRYRUN)
1577                         start |= SS_SET_DRYRUN;
1578                 else
1579                         start |= SS_CLEAR_DRYRUN;
1580         }
1581
1582         /* XXX: dmu_object_next() does NOT find dnodes allocated
1583          *      in the current non-committed txg, so we force txg
1584          *      commit to find all existing dnodes ... */
1585         txg_wait_synced(dmu_objset_pool(dev->od_os), 0ULL);
1586
1587         dev->od_otable_it = it;
1588         it->ooi_dev = dev;
1589         rc = scrub_start(osd_scrub_main, scrub, dev, start & ~SS_AUTO_PARTIAL);
1590         if (rc == -EALREADY) {
1591                 it->ooi_pos = 1;
1592         } else if (rc < 0) {
1593                 dev->od_otable_it = NULL;
1594                 OBD_FREE_PTR(it);
1595                 it = ERR_PTR(rc);
1596         } else {
1597                 it->ooi_pos = scrub->os_pos_current;
1598         }
1599
1600         GOTO(out, it);
1601
1602 out:
1603         up(&dev->od_otable_sem);
1604         return (struct dt_it *)it;
1605 }
1606
1607 static void osd_otable_it_fini(const struct lu_env *env, struct dt_it *di)
1608 {
1609         struct osd_otable_it *it = (struct osd_otable_it *)di;
1610         struct osd_device *dev = it->ooi_dev;
1611
1612         /* od_otable_sem: prevent concurrent init/fini */
1613         down(&dev->od_otable_sem);
1614         scrub_stop(&dev->od_scrub);
1615         LASSERT(dev->od_otable_it == it);
1616
1617         dev->od_otable_it = NULL;
1618         up(&dev->od_otable_sem);
1619         OBD_FREE_PTR(it);
1620 }
1621
1622 static int osd_otable_it_get(const struct lu_env *env,
1623                              struct dt_it *di, const struct dt_key *key)
1624 {
1625         return 0;
1626 }
1627
1628 static void osd_otable_it_put(const struct lu_env *env, struct dt_it *di)
1629 {
1630 }
1631
1632 static void osd_otable_it_preload(const struct lu_env *env,
1633                                   struct osd_otable_it *it)
1634 {
1635         struct osd_device *dev = it->ooi_dev;
1636         int rc;
1637
1638         /* can go negative on the very first access to the iterator
1639          * or if some non-Lustre objects were found */
1640         if (unlikely(it->ooi_prefetched < 0))
1641                 it->ooi_prefetched = 0;
1642
1643         if (it->ooi_prefetched >= (OTABLE_PREFETCH >> 1))
1644                 return;
1645
1646         if (it->ooi_prefetched_dnode == 0)
1647                 it->ooi_prefetched_dnode = it->ooi_pos;
1648
1649         while (it->ooi_prefetched < OTABLE_PREFETCH) {
1650                 rc = -dmu_object_next(dev->od_os, &it->ooi_prefetched_dnode,
1651                                       B_FALSE, 0);
1652                 if (rc)
1653                         break;
1654
1655                 osd_dmu_prefetch(dev->od_os, it->ooi_prefetched_dnode,
1656                                  0, 0, 0, ZIO_PRIORITY_ASYNC_READ);
1657                 it->ooi_prefetched++;
1658         }
1659 }
1660
1661 static inline int
1662 osd_otable_it_wakeup(struct lustre_scrub *scrub, struct osd_otable_it *it)
1663 {
1664         spin_lock(&scrub->os_lock);
1665         if (it->ooi_pos < scrub->os_pos_current || scrub->os_waiting ||
1666             !scrub->os_running)
1667                 it->ooi_waiting = 0;
1668         else
1669                 it->ooi_waiting = 1;
1670         spin_unlock(&scrub->os_lock);
1671
1672         return !it->ooi_waiting;
1673 }
1674
1675 static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
1676 {
1677         struct osd_otable_it *it = (struct osd_otable_it *)di;
1678         struct osd_device *dev = it->ooi_dev;
1679         struct lustre_scrub *scrub = &dev->od_scrub;
1680         struct lustre_mdt_attrs *lma = NULL;
1681         nvlist_t *nvbuf = NULL;
1682         int rc, size = 0;
1683         bool locked;
1684         ENTRY;
1685
1686         LASSERT(it->ooi_user_ready);
1687         fid_zero(&it->ooi_fid);
1688
1689         if (unlikely(it->ooi_all_cached))
1690                 RETURN(1);
1691
1692 again:
1693         if (nvbuf) {
1694                 nvlist_free(nvbuf);
1695                 nvbuf = NULL;
1696                 lma = NULL;
1697                 size = 0;
1698         }
1699
1700         if (it->ooi_pos >= scrub->os_pos_current)
1701                 wait_var_event(scrub,
1702                                osd_otable_it_wakeup(scrub, it));
1703
1704         if (!scrub->os_running && !it->ooi_used_outside)
1705                 GOTO(out, rc = 1);
1706
1707         rc = -dmu_object_next(dev->od_os, &it->ooi_pos, B_FALSE, 0);
1708         if (rc) {
1709                 if (unlikely(rc == -ESRCH)) {
1710                         it->ooi_all_cached = 1;
1711                         rc = 1;
1712                 }
1713
1714                 GOTO(out, rc);
1715         }
1716
1717         rc = __osd_xattr_load_by_oid(dev, it->ooi_pos, &nvbuf);
1718
1719         locked = false;
1720         if (!scrub->os_full_speed) {
1721                 spin_lock(&scrub->os_lock);
1722                 locked = true;
1723         }
1724         it->ooi_prefetched--;
1725         if (!scrub->os_full_speed) {
1726                 if (scrub->os_waiting) {
1727                         scrub->os_waiting = 0;
1728                         wake_up_var(scrub);
1729                 }
1730         }
1731         if (locked)
1732                 spin_unlock(&scrub->os_lock);
1733
1734         if (rc == -ENOENT || rc == -EEXIST || rc == -ENODATA)
1735                 goto again;
1736
1737         if (rc)
1738                 GOTO(out, rc);
1739
1740         LASSERT(nvbuf != NULL);
1741         rc = -nvlist_lookup_byte_array(nvbuf, XATTR_NAME_LMA,
1742                                        (uchar_t **)&lma, &size);
1743         if (rc || size == 0)
1744                 /* It is either non-Lustre object or OSD internal object,
1745                  * ignore it, go ahead */
1746                 goto again;
1747
1748         LASSERTF(lma != NULL, "corrupted LMA, size %d\n", size);
1749         lustre_lma_swab(lma);
1750         if (unlikely(lma->lma_compat & LMAC_NOT_IN_OI ||
1751                      lma->lma_incompat & LMAI_AGENT))
1752                 goto again;
1753
1754         it->ooi_fid = lma->lma_self_fid;
1755
1756         GOTO(out, rc = 0);
1757
1758 out:
1759         if (nvbuf)
1760                 nvlist_free(nvbuf);
1761
1762         if (!rc && scrub->os_full_speed)
1763                 osd_otable_it_preload(env, it);
1764
1765         return rc;
1766 }
1767
1768 static struct dt_key *osd_otable_it_key(const struct lu_env *env,
1769                                         const struct dt_it *di)
1770 {
1771         return NULL;
1772 }
1773
1774 static int osd_otable_it_key_size(const struct lu_env *env,
1775                                   const struct dt_it *di)
1776 {
1777         return sizeof(__u64);
1778 }
1779
1780 static int osd_otable_it_rec(const struct lu_env *env, const struct dt_it *di,
1781                              struct dt_rec *rec, __u32 attr)
1782 {
1783         struct osd_otable_it *it  = (struct osd_otable_it *)di;
1784         struct lu_fid *fid = (struct lu_fid *)rec;
1785
1786         *fid = it->ooi_fid;
1787         return 0;
1788 }
1789
1790 static __u64 osd_otable_it_store(const struct lu_env *env,
1791                                  const struct dt_it *di)
1792 {
1793         struct osd_otable_it *it = (struct osd_otable_it *)di;
1794
1795         return it->ooi_pos;
1796 }
1797
1798 /**
1799  * Set the OSD layer iteration start position as the specified hash.
1800  */
1801 static int osd_otable_it_load(const struct lu_env *env,
1802                               const struct dt_it *di, __u64 hash)
1803 {
1804         struct osd_otable_it *it = (struct osd_otable_it *)di;
1805         struct osd_device *dev = it->ooi_dev;
1806         struct lustre_scrub *scrub = &dev->od_scrub;
1807         int rc;
1808         ENTRY;
1809
1810         /* Forbid to set iteration position after iteration started. */
1811         if (it->ooi_user_ready)
1812                 RETURN(-EPERM);
1813
1814         if (hash > OSD_OTABLE_MAX_HASH)
1815                 hash = OSD_OTABLE_MAX_HASH;
1816
1817         /* The hash is the last checkpoint position,
1818          * we will start from the next one. */
1819         it->ooi_pos = hash + 1;
1820         it->ooi_prefetched = 0;
1821         it->ooi_prefetched_dnode = 0;
1822         it->ooi_user_ready = 1;
1823         if (!scrub->os_full_speed)
1824                 wake_up_var(scrub);
1825
1826         /* Unplug OSD layer iteration by the first next() call. */
1827         rc = osd_otable_it_next(env, (struct dt_it *)it);
1828
1829         RETURN(rc);
1830 }
1831
1832 static int osd_otable_it_key_rec(const struct lu_env *env,
1833                                  const struct dt_it *di, void *key_rec)
1834 {
1835         return 0;
1836 }
1837
1838 const struct dt_index_operations osd_otable_ops = {
1839         .dio_it = {
1840                 .init     = osd_otable_it_init,
1841                 .fini     = osd_otable_it_fini,
1842                 .get      = osd_otable_it_get,
1843                 .put      = osd_otable_it_put,
1844                 .next     = osd_otable_it_next,
1845                 .key      = osd_otable_it_key,
1846                 .key_size = osd_otable_it_key_size,
1847                 .rec      = osd_otable_it_rec,
1848                 .store    = osd_otable_it_store,
1849                 .load     = osd_otable_it_load,
1850                 .key_rec  = osd_otable_it_key_rec,
1851         }
1852 };
1853
1854 /* high priority inconsistent items list APIs */
1855
1856 int osd_oii_insert(const struct lu_env *env, struct osd_device *dev,
1857                    const struct lu_fid *fid, uint64_t oid, bool insert)
1858 {
1859         struct lustre_scrub *scrub = &dev->od_scrub;
1860         struct osd_inconsistent_item *oii;
1861         bool wakeup = false;
1862         ENTRY;
1863
1864         osd_idc_find_and_init_with_oid(env, dev, fid, oid);
1865         OBD_ALLOC_PTR(oii);
1866         if (unlikely(!oii))
1867                 RETURN(-ENOMEM);
1868
1869         INIT_LIST_HEAD(&oii->oii_list);
1870         oii->oii_cache.oic_dev = dev;
1871         oii->oii_cache.oic_fid = *fid;
1872         oii->oii_cache.oic_dnode = oid;
1873         oii->oii_insert = insert;
1874
1875         spin_lock(&scrub->os_lock);
1876         if (!scrub->os_running) {
1877                 spin_unlock(&scrub->os_lock);
1878                 OBD_FREE_PTR(oii);
1879                 RETURN(-EAGAIN);
1880         }
1881
1882         if (list_empty(&scrub->os_inconsistent_items))
1883                 wakeup = true;
1884         list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
1885         spin_unlock(&scrub->os_lock);
1886
1887         if (wakeup)
1888                 wake_up_var(scrub);
1889
1890         RETURN(0);
1891 }
1892
1893 int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
1894                    uint64_t *oid)
1895 {
1896         struct lustre_scrub *scrub = &dev->od_scrub;
1897         struct osd_inconsistent_item *oii;
1898         int ret = -ENOENT;
1899         ENTRY;
1900
1901         spin_lock(&scrub->os_lock);
1902         list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
1903                 if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
1904                         *oid = oii->oii_cache.oic_dnode;
1905                         ret = 0;
1906                         break;
1907                 }
1908         }
1909         spin_unlock(&scrub->os_lock);
1910
1911         RETURN(ret);
1912 }