+test_28() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "The target name entry is lost. The LFSCK should insert the"
+ echo "orphan MDT-object under .lustre/lost+found/MDTxxxx. But if"
+ echo "the MDT (on which the orphan MDT-object resides) has ever"
+ echo "failed to respond some name entry verification during the"
+ echo "first stage-scanning, then the LFSCK should skip to handle"
+ echo "orphan MDT-object on this MDT. But other MDTs should not"
+ echo "be affected."
+ echo "#####"
+
+ check_mount_and_prep
+ $LFS mkdir -i 0 $DIR/$tdir/d1
+ $LFS mkdir -i 1 $DIR/$tdir/d1/a1
+ $LFS mkdir -i 1 $DIR/$tdir/d1/a2
+
+ $LFS mkdir -i 1 $DIR/$tdir/d2
+ $LFS mkdir -i 0 $DIR/$tdir/d2/a1
+ $LFS mkdir -i 0 $DIR/$tdir/d2/a2
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "d1/a1's name entry will be removed, but the d1/a1's object"
+ echo "and its linkEA are kept in the system. And the case that"
+ echo "d2/a2's name entry will be removed, but the d2/a2's object"
+ echo "and its linkEA are kept in the system."
+
+ #define OBD_FAIL_LFSCK_NO_NAMEENTRY 0x1624
+ do_facet mds1 $LCTL set_param fail_loc=0x1624
+ do_facet mds2 $LCTL set_param fail_loc=0x1624
+ rmdir $DIR/$tdir/d1/a1 || error "(1) Fail to rmdir $DIR/$tdir/d1/a1"
+ rmdir $DIR/$tdir/d2/a2 || error "(2) Fail to rmdir $DIR/$tdir/d2/a2"
+ do_facet mds1 $LCTL set_param fail_loc=0
+ do_facet mds2 $LCTL set_param fail_loc=0
+
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+
+ echo "Inject failure, to simulate the MDT0 fail to handle"
+ echo "MDT1 LFSCK request during the first-stage scanning."
+ #define OBD_FAIL_LFSCK_BAD_NETWORK 0x161c
+ do_facet mds2 $LCTL set_param fail_loc=0x161c fail_val=0
+
+ echo "Trigger namespace LFSCK on all devices to find out orphan object"
+ $START_NAMESPACE -r -A ||
+ error "(3) Fail to start LFSCK for namespace"
+
+ wait_update_facet mds1 "$LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "partial" 32 || {
+ error "(4) mds1 is not the expected 'partial'"
+ }
+
+ wait_update_facet mds2 "$LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 || {
+ error "(5) mds2 is not the expected 'completed'"
+ }
+
+ do_facet mds2 $LCTL set_param fail_loc=0 fail_val=0
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(6) Expect 0 fixed on mds1, but got: $repaired"
+
+ repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Expect 1 fixed on mds2, but got: $repaired"
+
+ echo "Trigger namespace LFSCK on all devices again to cleanup"
+ $START_NAMESPACE -r -A ||
+ error "(8) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 9
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(10) Expect 1 fixed on mds1, but got: $repaired"
+
+ repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(11) Expect 0 fixed on mds2, but got: $repaired"
+}
+run_test 28 "Skip the failed MDT(s) when handle orphan MDT-objects"
+
+test_29a() {
+ echo "#####"
+ echo "The object's nlink attribute is larger than the object's known"
+ echo "name entries count. The LFSCK will repair the object's nlink"
+ echo "attribute to match the known name entries count"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ touch $DIR/$tdir/d0/foo || error "(2) Fail to create foo"
+
+ echo "Inject failure stub on MDT0 to simulate the case that foo's"
+ echo "nlink attribute is larger than its name entries count."
+
+ #define OBD_FAIL_LFSCK_MORE_NLINK 0x1625
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1625
+ ln $DIR/$tdir/d0/foo $DIR/$tdir/d0/h1 ||
+ error "(3) Fail to hard link to $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ cancel_lru_locks mdc
+ local count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 3 ] || error "(4) Cannot inject error: $count"
+
+ echo "Trigger namespace LFSCK to repair the nlink count"
+ $START_NAMESPACE -r -A ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^nlinks_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair nlink count: $repaired"
+
+ cancel_lru_locks mdc
+ count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 2 ] || error "(8) Fail to repair nlink count: $count"
+}
+# Disable 29a, we only allow nlink to be updated if the known linkEA
+# entries is larger than nlink count.
+#
+#run_test 29a "LFSCK can repair bad nlink count (1)"
+
+test_29b() {
+ echo "#####"
+ echo "The object's nlink attribute is smaller than the object's known"
+ echo "name entries count. The LFSCK will repair the object's nlink"
+ echo "attribute to match the known name entries count"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ touch $DIR/$tdir/d0/foo || error "(2) Fail to create foo"
+
+ echo "Inject failure stub on MDT0 to simulate the case that foo's"
+ echo "nlink attribute is smaller than its name entries count."
+
+ #define OBD_FAIL_LFSCK_LESS_NLINK 0x1626
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1626
+ ln $DIR/$tdir/d0/foo $DIR/$tdir/d0/h1 ||
+ error "(3) Fail to hard link to $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ cancel_lru_locks mdc
+ local count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 1 ] || error "(4) Cannot inject error: $count"
+
+ echo "Trigger namespace LFSCK to repair the nlink count"
+ $START_NAMESPACE -r -A ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^nlinks_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair nlink count: $repaired"
+
+ cancel_lru_locks mdc
+ count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 2 ] || error "(8) Fail to repair nlink count: $count"
+}
+run_test 29b "LFSCK can repair bad nlink count (2)"
+
+test_29c()
+{
+ echo "#####"
+ echo "The namespace LFSCK will create many hard links to the target"
+ echo "file as to exceed the linkEA size limitation. Under such case"
+ echo "the linkEA will be marked as overflow that will prevent the"
+ echo "target file to be migrated. Then remove some hard links to"
+ echo "make the left hard links to be held within the linkEA size"
+ echo "limitation. But before the namespace LFSCK adding all the"
+ echo "missed linkEA entries back, the overflow mark (timestamp)"
+ echo "will not be cleared."
+ echo "#####"
+
+ check_mount_and_prep
+
+ mkdir -p $DIR/$tdir/guard || error "(0.1) Fail to mkdir"
+ $LFS mkdir -i $((MDSCOUNT - 1)) $DIR/$tdir/foo ||
+ error "(0.2) Fail to mkdir"
+ touch $DIR/$tdir/guard/f0 || error "(1) Fail to create"
+ local oldfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+
+ # define MAX_LINKEA_SIZE 4096
+ # sizeof(link_ea_header) = 24
+ # sizeof(link_ea_entry) = 18
+ # nlink_min=$(((MAX_LINKEA_SIZE - sizeof(link_ea_header)) /
+ # (sizeof(link_ea_entry) + name_length))
+ # If the average name length is 12 bytes, then 150 hard links
+ # is totally enough to overflow the linkEA
+ echo "Create 150 hard links should succeed although the linkEA overflow"
+ createmany -l $DIR/$tdir/guard/f0 $DIR/$tdir/foo/ttttttttttt 150 ||
+ error "(2) Fail to hard link"
+
+ cancel_lru_locks mdc
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS migrate -m 1 $DIR/$tdir/guard 2>/dev/null &&
+ error "(3.1) Migrate should fail"
+
+ echo "The object with linkEA overflow should NOT be migrated"
+ local newfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+ [ "$newfid" == "$oldfid" ] ||
+ error "(3.2) Migrate should fail: $newfid != $oldfid"
+ fi
+
+ # Remove 100 hard links, then the linkEA should have space
+ # to hold the missed linkEA entries.
+ echo "Remove 100 hard links to save space for the missed linkEA entries"
+ unlinkmany $DIR/$tdir/foo/ttttttttttt 100 || error "(4) Fail to unlink"
+
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS migrate -m 1 $DIR/$tdir/guard 2>/dev/null &&
+ error "(5.1) Migrate should fail"
+
+ # The overflow timestamp is still there, so migration will fail.
+ local newfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+ [ "$newfid" == "$oldfid" ] ||
+ error "(5.2) Migrate should fail: $newfid != $oldfid"
+ fi
+
+ # sleep 3 seconds to guarantee that the overflow is recognized
+ sleep 3
+
+ echo "Trigger namespace LFSCK to clear the overflow timestamp"
+ $START_NAMESPACE -r -A ||
+ error "(6) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 7
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^linkea_overflow_cleared/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(8) Fail to clear linkea overflow: $repaired"
+
+ repaired=$($SHOW_NAMESPACE |
+ awk '/^nlinks_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(9) Unexpected nlink repaired: $repaired"
+
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS migrate -m 1 $DIR/$tdir/guard 2>/dev/null ||
+ error "(10.1) Migrate failure"
+
+ # Migration should succeed after clear the overflow timestamp.
+ local newfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+ [ "$newfid" != "$oldfid" ] ||
+ error "(10.2) Migrate should succeed"
+
+ ls -l $DIR/$tdir/foo > /dev/null ||
+ error "(11) 'ls' failed after migration"
+ fi
+
+ rm -f $DIR/$tdir/guard/f0 || error "(12) Fail to unlink f0"
+ rm -rf $DIR/$tdir/foo || error "(13) Fail to rmdir foo"
+}
+run_test 29c "verify linkEA size limitation"
+
+test_30() {
+ [ $(facet_fstype $SINGLEMDS) != ldiskfs ] &&
+ skip "ldiskfs only test" && return
+
+ echo "#####"
+ echo "The namespace LFSCK will move the orphans from backend"
+ echo "/lost+found directory to normal client visible namespace"
+ echo "or to global visible ./lustre/lost+found/MDTxxxx/ directory"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/foo || error "(1) Fail to mkdir foo"
+ touch $DIR/$tdir/foo/f0 || error "(2) Fail to touch f1"
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "directory d0 has no linkEA entry, then the LFSCK will"
+ echo "move it into .lustre/lost+found/MDTxxxx/ later."
+
+ #define OBD_FAIL_LFSCK_NO_LINKEA 0x161d
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x161d
+ mkdir $DIR/$tdir/foo/d0 || error "(3) Fail to mkdir d0"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ local pfid=$($LFS path2fid $DIR/$tdir/foo)
+ local cfid=$($LFS path2fid $DIR/$tdir/foo/d0)
+
+ touch $DIR/$tdir/foo/d0/f1 || error "(4) Fail to touch f1"
+ mkdir $DIR/$tdir/foo/d0/d1 || error "(5) Fail to mkdir d1"
+
+ echo "Inject failure stub on MDT0 to simulate the case that the"
+ echo "object's name entry will be removed, but not destroy the"
+ echo "object. Then backend e2fsck will handle it as orphan and"
+ echo "add them into the backend /lost+found directory."
+
+ #define OBD_FAIL_LFSCK_NO_NAMEENTRY 0x1624
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1624
+ rmdir $DIR/$tdir/foo/d0/d1 || error "(6) Fail to rmdir d1"
+ rm -f $DIR/$tdir/foo/d0/f1 || error "(7) Fail to unlink f1"
+ rmdir $DIR/$tdir/foo/d0 || error "(8) Fail to rmdir d0"
+ rm -f $DIR/$tdir/foo/f0 || error "(9) Fail to unlink f0"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ umount_client $MOUNT || error "(10) Fail to stop client!"
+
+ stop $SINGLEMDS || error "(11) Fail to stop MDT0"
+
+ echo "run e2fsck"
+ run_e2fsck $(facet_host $SINGLEMDS) $MDT_DEVNAME "-y" ||
+ error "(12) Fail to run e2fsck"
+
+ start $SINGLEMDS $MDT_DEVNAME $MOUNT_OPTS_NOSCRUB > /dev/null ||
+ error "(13) Fail to start MDT0"
+
+ echo "Trigger namespace LFSCK to recover backend orphans"
+ $START_NAMESPACE -r -A ||
+ error "(14) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 15
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^local_lost_found_moved/ { print $2 }')
+ [ $repaired -ge 4 ] ||
+ error "(16) Fail to recover backend orphans: $repaired"
+
+ mount_client $MOUNT || error "(17) Fail to start client!"
+
+ stat $DIR/$tdir/foo/f0 || error "(18) f0 is not recovered"
+
+ ls -ail $MOUNT/.lustre/lost+found/
+
+ echo "d0 should become orphan under .lustre/lost+found/MDT0000/"
+ [ -d $MOUNT/.lustre/lost+found/MDT0000 ] ||
+ error "(19) $MOUNT/.lustre/lost+found/MDT0000/ should be there"
+
+ ls -ail $MOUNT/.lustre/lost+found/MDT0000/
+
+ local cname=$MOUNT/.lustre/lost+found/MDT0000/${cfid}-${pfid}-D-0
+ [ ! -z "$cname" ] || error "(20) d0 is not recovered"
+
+ stat ${cname}/d1 || error "(21) d1 is not recovered"
+ stat ${cname}/f1 || error "(22) f1 is not recovered"
+}
+run_test 30 "LFSCK can recover the orphans from backend /lost+found"
+
+test_31a() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For the name entry under a striped directory, if the name"
+ echo "hash does not match the shard, then the LFSCK will repair"
+ echo "the bad name entry"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+
+ echo "Inject failure stub on client to simulate the case that"
+ echo "some name entry should be inserted into other non-first"
+ echo "shard, but inserted into the first shard by wrong"
+
+ #define OBD_FAIL_LFSCK_BAD_NAME_HASH 0x1628
+ $LCTL set_param fail_loc=0x1628 fail_val=0
+ createmany -d $DIR/$tdir/striped_dir/d $MDSCOUNT ||
+ error "(2) Fail to create file under striped directory"
+ $LCTL set_param fail_loc=0 fail_val=0
+
+ echo "Trigger namespace LFSCK to repair bad name hash"
+ $START_NAMESPACE -r -A ||
+ error "(3) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 4
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^name_hash_repaired/ { print $2 }')
+ [ $repaired -ge 1 ] ||
+ error "(5) Fail to repair bad name hash: $repaired"
+
+ umount_client $MOUNT || error "(6) umount failed"
+ mount_client $MOUNT || error "(7) mount failed"
+
+ for ((i = 0; i < $MDSCOUNT; i++)); do
+ stat $DIR/$tdir/striped_dir/d$i ||
+ error "(8) Fail to stat d$i after LFSCK"
+ rmdir $DIR/$tdir/striped_dir/d$i ||
+ error "(9) Fail to unlink d$i after LFSCK"
+ done
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(10) Fail to remove the striped directory after LFSCK"
+}
+run_test 31a "The LFSCK can find/repair the name entry with bad name hash (1)"
+
+test_31b() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For the name entry under a striped directory, if the name"
+ echo "hash does not match the shard, then the LFSCK will repair"
+ echo "the bad name entry"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+
+ echo "Inject failure stub on client to simulate the case that"
+ echo "some name entry should be inserted into other non-second"
+ echo "shard, but inserted into the secod shard by wrong"
+
+ #define OBD_FAIL_LFSCK_BAD_NAME_HASH 0x1628
+ $LCTL set_param fail_loc=0x1628 fail_val=1
+ createmany -d $DIR/$tdir/striped_dir/d $MDSCOUNT ||
+ error "(2) Fail to create file under striped directory"
+ $LCTL set_param fail_loc=0 fail_val=0
+
+ echo "Trigger namespace LFSCK to repair bad name hash"
+ $START_NAMESPACE -r -A ||
+ error "(3) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 4
+
+ local repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^name_hash_repaired/ { print $2 }')
+ [ $repaired -ge 1 ] ||
+ error "(5) Fail to repair bad name hash: $repaired"
+
+ umount_client $MOUNT || error "(6) umount failed"
+ mount_client $MOUNT || error "(7) mount failed"
+
+ for ((i = 0; i < $MDSCOUNT; i++)); do
+ stat $DIR/$tdir/striped_dir/d$i ||
+ error "(8) Fail to stat d$i after LFSCK"
+ rmdir $DIR/$tdir/striped_dir/d$i ||
+ error "(9) Fail to unlink d$i after LFSCK"
+ done
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(10) Fail to remove the striped directory after LFSCK"
+}
+run_test 31b "The LFSCK can find/repair the name entry with bad name hash (2)"
+
+test_31c() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For some reason, the master MDT-object of the striped directory"
+ echo "may lost its master LMV EA. If nobody created files under the"
+ echo "master directly after the master LMV EA lost, then the LFSCK"
+ echo "should re-generate the master LMV EA."
+ echo "#####"
+
+ check_mount_and_prep
+
+ echo "Inject failure stub on MDT0 to simulate the case that the"
+ echo "master MDT-object of the striped directory lost the LMV EA."
+
+ #define OBD_FAIL_LFSCK_LOST_MASTER_LMV 0x1629
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1629
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ echo "Trigger namespace LFSCK to re-generate master LMV EA"
+ $START_NAMESPACE -r -A ||
+ error "(2) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 3
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^striped_dirs_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(4) Fail to re-generate master LMV EA: $repaired"
+
+ umount_client $MOUNT || error "(5) umount failed"
+ mount_client $MOUNT || error "(6) mount failed"
+
+ local empty=$(ls $DIR/$tdir/striped_dir/)
+ [ -z "$empty" ] || error "(7) The master LMV EA is not repaired: $empty"
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(8) Fail to remove the striped directory after LFSCK"
+}
+run_test 31c "Re-generate the lost master LMV EA for striped directory"
+
+test_31d() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For some reason, the master MDT-object of the striped directory"
+ echo "may lost its master LMV EA. If somebody created files under the"
+ echo "master directly after the master LMV EA lost, then the LFSCK"
+ echo "should NOT re-generate the master LMV EA, instead, it should"
+ echo "change the broken striped dirctory as read-only to prevent"
+ echo "further damage"
+ echo "#####"
+
+ check_mount_and_prep
+
+ echo "Inject failure stub on MDT0 to simulate the case that the"
+ echo "master MDT-object of the striped directory lost the LMV EA."
+
+ #define OBD_FAIL_LFSCK_LOST_MASTER_LMV 0x1629
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1629
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0
+
+ umount_client $MOUNT || error "(2) umount failed"
+ mount_client $MOUNT || error "(3) mount failed"
+
+ touch $DIR/$tdir/striped_dir/dummy ||
+ error "(4) Fail to touch under broken striped directory"
+
+ echo "Trigger namespace LFSCK to find out the inconsistency"
+ $START_NAMESPACE -r -A ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^striped_dirs_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(7) Re-generate master LMV EA unexpected: $repaired"
+
+ stat $DIR/$tdir/striped_dir/dummy ||
+ error "(8) Fail to stat $DIR/$tdir/striped_dir/dummy"
+
+ touch $DIR/$tdir/striped_dir/foo &&
+ error "(9) The broken striped directory should be read-only"
+
+ chattr -i $DIR/$tdir/striped_dir ||
+ error "(10) Fail to chattr on the broken striped directory"
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(11) Fail to remove the striped directory after LFSCK"
+}
+run_test 31d "Set broken striped directory (modified after broken) as read-only"
+
+test_31e() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For some reason, the slave MDT-object of the striped directory"
+ echo "may lost its slave LMV EA. The LFSCK should re-generate the"
+ echo "slave LMV EA."
+ echo "#####"
+
+ check_mount_and_prep
+
+ echo "Inject failure stub on MDT0 to simulate the case that the"
+ echo "slave MDT-object (that resides on the same MDT as the master"
+ echo "MDT-object resides on) lost the LMV EA."
+
+ #define OBD_FAIL_LFSCK_LOST_SLAVE_LMV 0x162a
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x162a fail_val=0
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0 fail_val=0
+
+ echo "Trigger namespace LFSCK to re-generate slave LMV EA"
+ $START_NAMESPACE -r -A ||
+ error "(2) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 3
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^striped_shards_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(4) Fail to re-generate slave LMV EA: $repaired"
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(5) Fail to remove the striped directory after LFSCK"
+}
+run_test 31e "Re-generate the lost slave LMV EA for striped directory (1)"
+
+test_31f() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For some reason, the slave MDT-object of the striped directory"
+ echo "may lost its slave LMV EA. The LFSCK should re-generate the"
+ echo "slave LMV EA."
+ echo "#####"
+
+ check_mount_and_prep
+
+ echo "Inject failure stub on MDT0 to simulate the case that the"
+ echo "slave MDT-object (that resides on different MDT as the master"
+ echo "MDT-object resides on) lost the LMV EA."
+
+ #define OBD_FAIL_LFSCK_LOST_SLAVE_LMV 0x162a
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x162a fail_val=1
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0 fail_val=0
+
+ echo "Trigger namespace LFSCK to re-generate slave LMV EA"
+ $START_NAMESPACE -r -A ||
+ error "(2) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 3
+
+ local repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^striped_shards_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(4) Fail to re-generate slave LMV EA: $repaired"
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(5) Fail to remove the striped directory after LFSCK"
+}
+run_test 31f "Re-generate the lost slave LMV EA for striped directory (2)"
+
+test_31g() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For some reason, the stripe index in the slave LMV EA is"
+ echo "corrupted. The LFSCK should repair the slave LMV EA."
+ echo "#####"
+
+ check_mount_and_prep
+
+ echo "Inject failure stub on MDT0 to simulate the case that the"
+ echo "slave LMV EA on the first shard of the striped directory"
+ echo "claims the same index as the second shard claims"
+
+ #define OBD_FAIL_LFSCK_BAD_SLAVE_LMV 0x162b
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x162b fail_val=0
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0 fail_val=0
+
+ echo "Trigger namespace LFSCK to repair the slave LMV EA"
+ $START_NAMESPACE -r -A ||
+ error "(2) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 3
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^striped_shards_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(4) Fail to repair slave LMV EA: $repaired"
+
+ umount_client $MOUNT || error "(5) umount failed"
+ mount_client $MOUNT || error "(6) mount failed"
+
+ touch $DIR/$tdir/striped_dir/foo ||
+ error "(7) Fail to touch file after the LFSCK"
+
+ rm -f $DIR/$tdir/striped_dir/foo ||
+ error "(8) Fail to unlink file after the LFSCK"
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(9) Fail to remove the striped directory after LFSCK"
+}
+run_test 31g "Repair the corrupted slave LMV EA"
+
+test_31h() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "For some reason, the shard's name entry in the striped"
+ echo "directory may be corrupted. The LFSCK should repair the"
+ echo "bad shard's name entry."
+ echo "#####"
+
+ check_mount_and_prep
+
+ echo "Inject failure stub on MDT0 to simulate the case that the"
+ echo "first shard's name entry in the striped directory claims"
+ echo "the same index as the second shard's name entry claims."
+
+ #define OBD_FAIL_LFSCK_BAD_SLAVE_NAME 0x162c
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x162c fail_val=0
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "(1) Fail to create striped directory"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0 fail_val=0
+
+ echo "Trigger namespace LFSCK to repair the shard's name entry"
+ $START_NAMESPACE -r -A ||
+ error "(2) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 3
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(4) Fail to repair shard's name entry: $repaired"
+
+ umount_client $MOUNT || error "(5) umount failed"
+ mount_client $MOUNT || error "(6) mount failed"
+
+ touch $DIR/$tdir/striped_dir/foo ||
+ error "(7) Fail to touch file after the LFSCK"
+
+ rm -f $DIR/$tdir/striped_dir/foo ||
+ error "(8) Fail to unlink file after the LFSCK"
+
+ rmdir $DIR/$tdir/striped_dir ||
+ error "(9) Fail to remove the striped directory after LFSCK"
+}
+run_test 31h "Repair the corrupted shard's name entry"
+
+test_32a()
+{
+ lfsck_prep 5 5
+ umount_client $MOUNT
+
+ #define OBD_FAIL_LFSCK_ENGINE_DELAY 0x162d
+ do_facet $SINGLEMDS $LCTL set_param fail_val=3 fail_loc=0x162d
+ $START_LAYOUT -r || error "(1) Fail to start LFSCK for layout!"
+
+ local STATUS=$($SHOW_LAYOUT | awk '/^status/ { print $2 }')
+ [ "$STATUS" == "scanning-phase1" ] ||
+ error "(2) Expect 'scanning-phase1', but got '$STATUS'"
+
+ echo "stop ost1"
+ stop ost1 > /dev/null || error "(3) Fail to stop OST1!"
+
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0 fail_val=0
+ sleep 4
+
+ echo "stop LFSCK"
+ $STOP_LFSCK || error "(4) Fail to stop LFSCK!"
+
+ start ost1 $(ostdevname 1) $MOUNT_OPTS_NOSCRUB > /dev/null ||
+ error "(5) Fail to start ost1"
+}
+run_test 32a "stop LFSCK when some OST failed"
+
+test_32b()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ lfsck_prep 5 5
+ $LFS mkdir -i 1 $DIR/$tdir/dp ||
+ error "(1) Fail to create $DIR/$tdir/dp"
+ $LFS mkdir -i 0 -c $MDSCOUNT $DIR/$tdir/dp/dc1 ||
+ error "(2) Fail to create $DIR/$tdir/dp/dc1"
+ $LFS mkdir -i 0 -c $MDSCOUNT $DIR/$tdir/dp/dc2 ||
+ error "(3) Fail to create $DIR/$tdir/dp/dc2"
+ umount_client $MOUNT
+
+ #define OBD_FAIL_LFSCK_ENGINE_DELAY 0x162d
+ do_facet $SINGLEMDS $LCTL set_param fail_val=3 fail_loc=0x162d
+ $START_NAMESPACE -r -A || error "(4) Fail to start LFSCK for namespace!"
+
+ wait_update_facet $SINGLEMDS "$LCTL get_param -n \
+ mdd.${MDT_DEV}.lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "scanning-phase1" 32 || {
+ $SHOW_NAMESPACE
+ error "(5) unexpected status"
+ }
+
+ echo "stop mds2"
+ stop mds2 > /dev/null || error "(6) Fail to stop MDT2!"
+
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0 fail_val=0
+ sleep 4
+
+ echo "stop LFSCK"
+ $STOP_LFSCK || error "(7) Fail to stop LFSCK!"
+
+ start mds2 $(mdsdevname 2) $MOUNT_OPTS_NOSCRUB > /dev/null ||
+ error "(8) Fail to start MDT2"
+}
+run_test 32b "stop LFSCK when some MDT failed"
+
+test_33()
+{
+ lfsck_prep 5 5
+
+ $START_LAYOUT --dryrun -o -r ||
+ error "(1) Fail to start layout LFSCK"
+ wait_all_targets_blocked layout completed 2
+
+ local PARAMS=$($SHOW_LAYOUT | awk '/^param/ { print $2 }')
+ [ "$PARAMS" == "dryrun,all_targets,orphan" ] ||
+ error "(3) Expect 'dryrun,all_targets,orphan', got '$PARAMS'"
+
+ $START_NAMESPACE -e abort -A -r ||
+ error "(4) Fail to start namespace LFSCK"
+ wait_all_targets_blocked namespace completed 5
+
+ PARAMS=$($SHOW_NAMESPACE | awk '/^param/ { print $2 }')
+ [ "$PARAMS" == "failout,all_targets" ] ||
+ error "(6) Expect 'failout,all_targets', got '$PARAMS'"
+}
+run_test 33 "check LFSCK paramters"
+
+test_34()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [ $(facet_fstype $SINGLEMDS) != zfs ] &&
+ skip "Only valid for ZFS backend" && return
+
+ lfsck_prep 1 1
+
+ #define OBD_FAIL_LFSCK_NO_AGENTOBJ 0x1630
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1630
+ $LFS mkdir -i 1 $DIR/$tdir/dummy ||
+ error "(1) Fail to create $DIR/$tdir/dummy"
+
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+ $START_NAMESPACE -r || error "(2) Fail to start LFSCK for namespace!"
+ wait_update_facet $SINGLEMDS "$LCTL get_param -n \
+ mdd.${MDT_DEV}.lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 || {
+ $SHOW_NAMESPACE
+ error "(3) unexpected status"
+ }
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(4) Fail to repair the lost agent object: $repaired"
+
+ $START_NAMESPACE -r || error "(5) Fail to start LFSCK for namespace!"
+ wait_update_facet $SINGLEMDS "$LCTL get_param -n \
+ mdd.${MDT_DEV}.lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 || {
+ $SHOW_NAMESPACE
+ error "(6) unexpected status"
+ }
+
+ repaired=$($SHOW_NAMESPACE | awk '/^dirent_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(7) Unexpected repairing: $repaired"
+}
+run_test 34 "LFSCK can rebuild the lost agent object"
+
+test_35()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ lfsck_prep 1 1
+
+ #define OBD_FAIL_LFSCK_NO_AGENTENT 0x1631
+ do_facet mds2 $LCTL set_param fail_loc=0x1631
+ $LFS mkdir -i 1 $DIR/$tdir/dummy ||
+ error "(1) Fail to create $DIR/$tdir/dummy"
+
+ sync; sleep 3
+ do_facet mds2 $LCTL set_param fail_loc=0
+ $START_NAMESPACE -A -r || error "(2) Fail to start LFSCK for namespace!"
+ wait_update_facet mds2 "$LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" $LTIME ||
+ error "(3) MDS${k} is not the expected 'completed'"
+
+ local repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^agent_entries_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(4) Fail to repair the lost agent entry: $repaired"
+
+ echo "stopall to cleanup object cache"
+ stopall > /dev/null
+ echo "setupall"
+ setupall > /dev/null
+
+ $START_NAMESPACE -A -r || error "(5) Fail to start LFSCK for namespace!"
+ wait_update_facet mds2 "$LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" $LTIME ||
+ error "(6) MDS${k} is not the expected 'completed'"
+
+ repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^agent_entries_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(7) Unexpected repairing: $repaired"
+}
+run_test 35 "LFSCK can rebuild the lost agent entry"
+
+test_36a() {
+ [ $OSTCOUNT -lt 3 ] && skip "needs >= 3 OSTs" && return
+
+ echo "#####"
+ echo "The target MDT-object's LOV EA corrupted as to lose one of the "
+ echo "mirrors information. The layout LFSCK should rebuild the LOV EA "
+ echo "with the PFID EA of related OST-object(s) belong to the mirror."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS setstripe -N -E 1M -o 0,1 -E -1 -o 2 -N -E 2M -o 1,2 -E -1 -o 0 \
+ -N -E 3M -o 2,0 -E -1 -o 1 $DIR/$tdir/f0 ||
+ error "(0) Fail to create mirror file $DIR/$tdir/f0"
+ $LFS setstripe -N -E 1M -o 0,1 -E -1 -o 2 -N -E 2M -o 1,2 -E -1 -o 0 \
+ -N -E 3M -o 2,0 -E -1 -o 1 $DIR/$tdir/f1 ||
+ error "(1) Fail to create mirror file $DIR/$tdir/f1"
+ $LFS setstripe -N -E 1M -o 0,1 -E -1 -o 2 -N -E 2M -o 1,2 -E -1 -o 0 \
+ -N -E 3M -o 2,0 -E -1 -o 1 $DIR/$tdir/f2 ||
+ error "(2) Fail to create mirror file $DIR/$tdir/f2"
+
+ dd if=/dev/zero of=$DIR/$tdir/f0 bs=1M count=4 ||
+ error "(3) Fail to write $DIR/$tdir/f0"
+ dd if=/dev/zero of=$DIR/$tdir/f1 bs=1M count=4 ||
+ error "(4) Fail to write $DIR/$tdir/f1"
+ dd if=/dev/zero of=$DIR/$tdir/f2 bs=1M count=4 ||
+ error "(5) Fail to write $DIR/$tdir/f2"
+
+ $LFS mirror resync $DIR/$tdir/f0 ||
+ error "(6) Fail to resync $DIR/$tdir/f0"
+ $LFS mirror resync $DIR/$tdir/f1 ||
+ error "(7) Fail to resync $DIR/$tdir/f1"
+ $LFS mirror resync $DIR/$tdir/f2 ||
+ error "(8) Fail to resync $DIR/$tdir/f2"
+
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+
+ $LFS getstripe $DIR/$tdir/f0 ||
+ error "(9) Fail to getstripe for $DIR/$tdir/f0"
+ $LFS getstripe $DIR/$tdir/f1 ||
+ error "(10) Fail to getstripe for $DIR/$tdir/f1"
+ $LFS getstripe $DIR/$tdir/f2 ||
+ error "(11) Fail to getstripe for $DIR/$tdir/f2"
+
+ echo "Inject failure, to simulate the case of missing one mirror in LOV"
+ #define OBD_FAIL_LFSCK_LOST_MDTOBJ 0x1616
+ do_facet mds1 $LCTL set_param fail_loc=0x1616
+
+ $LFS mirror split --mirror-id 1 -d $DIR/$tdir/f0 ||
+ error "(12) Fail to split 1st mirror from $DIR/$tdir/f0"
+ $LFS mirror split --mirror-id 2 -d $DIR/$tdir/f1 ||
+ error "(13) Fail to split 2nd mirror from $DIR/$tdir/f1"
+ $LFS mirror split --mirror-id 3 -d $DIR/$tdir/f2 ||
+ error "(14) Fail to split 3rd mirror from $DIR/$tdir/f2"
+
+ sync
+ sleep 2
+ do_facet mds1 $LCTL set_param fail_loc=0
+
+ $LFS getstripe $DIR/$tdir/f0 | grep "lcme_mirror_id:.*1" &&
+ error "(15) The 1st of mirror is not destroyed"
+ $LFS getstripe $DIR/$tdir/f1 | grep "lcme_mirror_id:.*2" &&
+ error "(16) The 2nd of mirror is not destroyed"
+ $LFS getstripe $DIR/$tdir/f2 | grep "lcme_mirror_id:.*3" &&
+ error "(17) The 3rd of mirror is not destroyed"
+
+ local mirrors
+
+ mirrors=$($LFS getstripe -N $DIR/$tdir/f0)
+ [ $mirrors -eq 2 ] || error "(18) $DIR/$tdir/f0 has $mirrors mirrors"
+ mirrors=$($LFS getstripe -N $DIR/$tdir/f1)
+ [ $mirrors -eq 2 ] || error "(19) $DIR/$tdir/f1 has $mirrors mirrors"
+ mirrors=$($LFS getstripe -N $DIR/$tdir/f2)
+ [ $mirrors -eq 2 ] || error "(20) $DIR/$tdir/f2 has $mirrors mirrors"
+
+ echo "Trigger layout LFSCK on all devices to find out orphan OST-object"
+ $START_LAYOUT -r -o || error "(21) Fail to start LFSCK for layout!"
+
+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 ||
+ error "(22) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ local cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(23) OST${k} Expect 'completed', but got '$cur_status'"
+ done
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $repaired -eq 9 ] ||
+ error "(24) Expect 9 fixed on mds1, but got: $repaired"
+
+ mirrors=$($LFS getstripe -N $DIR/$tdir/f0)
+ [ $mirrors -eq 3 ] || error "(25) $DIR/$tdir/f0 has $mirrors mirrors"
+ mirrors=$($LFS getstripe -N $DIR/$tdir/f1)
+ [ $mirrors -eq 3 ] || error "(26) $DIR/$tdir/f1 has $mirrors mirrors"
+ mirrors=$($LFS getstripe -N $DIR/$tdir/f2)
+ [ $mirrors -eq 3 ] || error "(27) $DIR/$tdir/f2 has $mirrors mirrors"
+
+ $LFS getstripe $DIR/$tdir/f0 | grep "lcme_mirror_id:.*1" || {
+ $LFS getstripe $DIR/$tdir/f0
+ error "(28) The 1st of mirror is not recovered"
+ }
+
+ $LFS getstripe $DIR/$tdir/f1 | grep "lcme_mirror_id:.*2" || {
+ $LFS getstripe $DIR/$tdir/f1
+ error "(29) The 2nd of mirror is not recovered"
+ }
+
+ $LFS getstripe $DIR/$tdir/f2 | grep "lcme_mirror_id:.*3" || {
+ $LFS getstripe $DIR/$tdir/f2
+ error "(30) The 3rd of mirror is not recovered"
+ }
+}
+run_test 36a "rebuild LOV EA for mirrored file (1)"
+
+test_36b() {
+ [ $OSTCOUNT -lt 3 ] && skip "needs >= 3 OSTs" && return
+
+ echo "#####"
+ echo "The mirrored file lost its MDT-object, but relatd OST-objects "
+ echo "are still there. The layout LFSCK should rebuild the LOV EA "
+ echo "with the PFID EA of related OST-object(s) belong to the file. "
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS setstripe -N -E 1M -o 0,1 -E -1 -o 2 -N -E 2M -o 1,2 -E -1 -o 0 \
+ -N -E 3M -o 2,0 -E -1 -o 1 $DIR/$tdir/f0 ||
+ error "(0) Fail to create mirror file $DIR/$tdir/f0"
+
+ local fid=$($LFS path2fid $DIR/$tdir/f0)
+
+ dd if=/dev/zero of=$DIR/$tdir/f0 bs=1M count=4 ||
+ error "(1) Fail to write $DIR/$tdir/f0"
+ $LFS mirror resync $DIR/$tdir/f0 ||
+ error "(2) Fail to resync $DIR/$tdir/f0"
+
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+
+ $LFS getstripe $DIR/$tdir/f0 ||
+ error "(3) Fail to getstripe for $DIR/$tdir/f0"
+
+ echo "Inject failure, to simulate the case of missing the MDT-object"
+ #define OBD_FAIL_LFSCK_LOST_MDTOBJ 0x1616
+ do_facet mds1 $LCTL set_param fail_loc=0x1616
+ rm -f $DIR/$tdir/f0 || error "(4) Fail to remove $DIR/$tdir/f0"
+
+ sync
+ sleep 2
+ do_facet mds1 $LCTL set_param fail_loc=0
+
+ echo "Trigger layout LFSCK on all devices to find out orphan OST-object"
+ $START_LAYOUT -r -o || error "(5) Fail to start LFSCK for layout!"
+
+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 ||
+ error "(6) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ local cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(7) OST${k} Expect 'completed', but got '$cur_status'"
+ done
+
+ local count=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $count -eq 9 ] || error "(8) Expect 9 fixed on mds1, but got: $count"
+
+ local name=$MOUNT/.lustre/lost+found/MDT0000/${fid}-R-0
+ count=$($LFS getstripe --mirror-count $name)
+ [ $count -eq 3 ] || error "(9) $DIR/$tdir/f0 has $count mirrors"
+
+ count=$($LFS getstripe --component-count $name)
+ [ $count -eq 6 ] || error "(10) $DIR/$tdir/f0 has $count components"
+
+ $LFS getstripe $name | grep "lcme_mirror_id:.*1" || {
+ $LFS getstripe $name
+ error "(11) The 1st of mirror is not recovered"
+ }
+
+ $LFS getstripe $name | grep "lcme_mirror_id:.*2" || {
+ $LFS getstripe $name
+ error "(12) The 2nd of mirror is not recovered"
+ }
+
+ $LFS getstripe $name | grep "lcme_mirror_id:.*3" || {
+ $LFS getstripe $name
+ error "(13) The 3rd of mirror is not recovered"
+ }
+}
+run_test 36b "rebuild LOV EA for mirrored file (2)"
+
+test_36c() {
+ [ $OSTCOUNT -lt 3 ] && skip "needs >= 3 OSTs" && return
+
+ echo "#####"
+ echo "The mirrored file has been modified, not resynced yet, then "
+ echo "lost its MDT-object, but relatd OST-objects are still there. "
+ echo "The layout LFSCK should rebuild the LOV EA and relatd status "
+ echo "with the PFID EA of related OST-object(s) belong to the file. "
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS setstripe -N -E 1M -o 0,1 -E -1 -o 2 -N -E 2M -o 1,2 -E -1 -o 0 \
+ $DIR/$tdir/f0 ||
+ error "(0) Fail to create mirror file $DIR/$tdir/f0"
+
+ local fid=$($LFS path2fid $DIR/$tdir/f0)
+
+ # The 1st dd && resync makes all related OST-objects have been written
+ dd if=/dev/zero of=$DIR/$tdir/f0 bs=1M count=4 ||
+ error "(1.1) Fail to write $DIR/$tdir/f0"
+ $LFS mirror resync $DIR/$tdir/f0 ||
+ error "(1.2) Fail to resync $DIR/$tdir/f0"
+ # The 2nd dd makes one mirror to be stale
+ dd if=/dev/zero of=$DIR/$tdir/f0 bs=1M count=4 ||
+ error "(1.3) Fail to write $DIR/$tdir/f0"
+
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+
+ $LFS getstripe $DIR/$tdir/f0 ||
+ error "(2) Fail to getstripe for $DIR/$tdir/f0"
+
+ local saved_flags1=$($LFS getstripe $DIR/$tdir/f0 | head -n 10 |
+ awk '/lcme_flags/ { print $2 }')
+ local saved_flags2=$($LFS getstripe $DIR/$tdir/f0 | tail -n 10 |
+ awk '/lcme_flags/ { print $2 }')
+
+ echo "Inject failure, to simulate the case of missing the MDT-object"
+ #define OBD_FAIL_LFSCK_LOST_MDTOBJ 0x1616
+ do_facet mds1 $LCTL set_param fail_loc=0x1616
+ rm -f $DIR/$tdir/f0 || error "(3) Fail to remove $DIR/$tdir/f0"
+
+ sync
+ sleep 2
+ do_facet mds1 $LCTL set_param fail_loc=0
+
+ echo "Trigger layout LFSCK on all devices to find out orphan OST-object"
+ $START_LAYOUT -r -o || error "(4) Fail to start LFSCK for layout!"
+
+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 ||
+ error "(5) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ local cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(6) OST${k} Expect 'completed', but got '$cur_status'"
+ done
+
+ local count=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $count -eq 6 ] || error "(7) Expect 9 fixed on mds1, but got: $count"
+
+ local name=$MOUNT/.lustre/lost+found/MDT0000/${fid}-R-0
+ count=$($LFS getstripe --mirror-count $name)
+ [ $count -eq 2 ] || error "(8) $DIR/$tdir/f0 has $count mirrors"
+
+ count=$($LFS getstripe --component-count $name)
+ [ $count -eq 4 ] || error "(9) $DIR/$tdir/f0 has $count components"
+
+ local flags=$($LFS getstripe $name | head -n 10 |
+ awk '/lcme_flags/ { print $2 }')
+ [ "$flags" == "$saved_flags1" ] || {
+ $LFS getstripe $name
+ error "(10) expect flags $saved_flags1, got $flags"
+ }
+
+ flags=$($LFS getstripe $name | tail -n 10 |
+ awk '/lcme_flags/ { print $2 }')
+ [ "$flags" == "$saved_flags2" ] || {
+ $LFS getstripe $name
+ error "(11) expect flags $saved_flags2, got $flags"
+ }
+}
+run_test 36c "rebuild LOV EA for mirrored file (3)"
+
+test_37()
+{
+ local PID
+ local rc
+ local t_dir="$DIR/$tdir/d0"
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $t_dir || error "(2) Fail to mkdir $t_dir on MDT0"
+ multiop_bg_pause $t_dir D_c || { error "multiop failed: $?"; return 1; }
+ PID=$!
+ rmdir $t_dir
+
+ $START_NAMESPACE -r -A || {
+ error "(3) Fail to start LFSCK for namespace!"; kill -USR1 $PID; }
+
+ wait_all_targets_blocked namespace completed 4
+ stat $t_dir && rc=1
+ kill -USR1 $PID
+ return $rc
+}
+run_test 37 "LFSCK must skip a ORPHAN"
+