+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" $LTIME ||
+ error "(8) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(9) OST${k} Expect 'completed', but got '$cur_status'"
+
+ done
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $repaired -eq 2 ] ||
+ error "(10) Expect 2 fixed on mds{1}, but got: $repaired"
+
+ if [ $MDSCOUNT -ge 2 ]; then
+ repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $repaired -eq 2 ] ||
+ error "(11) Expect 2 fixed on mds{2}, but got: $repaired"
+ fi
+}
+run_test 18f "Skip the failed OST(s) when handle orphan OST-objects"
+
+test_18g() {
+ echo "#####"
+ echo "The target MDT-object is lost, but related OI mapping is there"
+ echo "The LFSCK should recreate the lost MDT-object without affected"
+ echo "by the stale OI mapping."
+ echo "#####"
+
+ check_mount_and_prep
+ $LFS mkdir -i 0 $DIR/$tdir/a1
+ $LFS setstripe -c -1 -i 0 -S 1M $DIR/$tdir/a1
+ dd if=/dev/zero of=$DIR/$tdir/a1/f1 bs=1M count=$OSTCOUNT
+ local fid1=$($LFS path2fid $DIR/$tdir/a1/f1)
+ echo ${fid1}
+ $LFS getstripe $DIR/$tdir/a1/f1
+ cancel_lru_locks osc
+
+ echo "Inject failure to simulate lost MDT-object but keep OI mapping"
+ #define OBD_FAIL_LFSCK_LOST_MDTOBJ2 0x162e
+ do_facet mds1 $LCTL set_param fail_loc=0x162e
+ rm -f $DIR/$tdir/a1/f1
+
+ do_facet mds1 $LCTL set_param fail_loc=0
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+
+ echo "Trigger layout LFSCK on all devices to find out orphan OST-object"
+ $START_LAYOUT -r -o || error "(1) Fail to start LFSCK for layout!"
+
+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" $LTIME ||
+ error "(2) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ local cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(3) OST${k} Expect 'completed', but got '$cur_status'"
+ done
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $repaired -eq $OSTCOUNT ] ||
+ error "(4) Expect $OSTCOUNT fixed, but got: $repaired"
+
+ echo "Move the files from ./lustre/lost+found/MDTxxxx to namespace"
+ mv $MOUNT/.lustre/lost+found/MDT0000/${fid1}-R-0 $DIR/$tdir/a1/f1 ||
+ error "(5) Fail to move $MOUNT/.lustre/lost+found/MDT0000/${fid1}-R-0"
+
+ $LFS path2fid $DIR/$tdir/a1/f1
+ $LFS getstripe $DIR/$tdir/a1/f1
+}
+run_test 18g "Find out orphan OST-object and repair it (7)"
+
+test_18h() {
+ echo "#####"
+ echo "The PFL extent crashed. During the first cycle LFSCK scanning,"
+ echo "the layout LFSCK will keep the bad PFL file(s) there without"
+ echo "scanning its OST-object(s). Then in the second stage scanning,"
+ echo "the OST will return related OST-object(s) to the MDT as orphan."
+ echo "And then the LFSCK on the MDT can rebuild the PFL extent with"
+ echo "the 'orphan(s)' stripe information."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS setstripe -E 2M -c 1 -E -1 $DIR/$tdir/f0 ||
+ error "(0) Fail to create PFL $DIR/$tdir/f0"
+
+ cat $LUSTRE/tests/test-framework.sh > $DIR/$tdir/f0 ||
+ error "(1.1) Fail to write $DIR/$tdir/f0"
+
+ dd if=$LUSTRE/tests/test-framework.sh of=$DIR/$tdir/f0 bs=1M seek=2 ||
+ error "(1.2) Fail to write $DIR/$tdir/f0"
+
+ cp $DIR/$tdir/f0 $DIR/$tdir/guard
+
+ echo "Inject failure stub to simulate bad PFL extent range"
+ #define OBD_FAIL_LFSCK_BAD_PFL_RANGE 0x162f
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x162f
+
+ chown 1.1 $DIR/$tdir/f0
+
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ dd if=/dev/zero of=$DIR/$tdir/f0 bs=1M count=1 &&
+ error "(2) Write to bad PFL file should fail"
+
+ echo "Trigger layout LFSCK to find out the bad lmm_oi and fix them"
+ $START_LAYOUT -r -o || error "(3) Fail to start LFSCK for layout!"
+
+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" $LTIME ||
+ error "(4.1) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(4.2) OST${k} Expect 'completed', but got '$cur_status'"
+
+ done
+
+ local repaired=$($SHOW_LAYOUT |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $repaired -eq 2 ] ||
+ error "(5) Fail to repair crashed PFL range: $repaired"
+
+ echo "Data in $DIR/$tdir/f0 should not be broken"
+ diff $DIR/$tdir/f0 $DIR/$tdir/guard ||
+ error "(6) Data in $DIR/$tdir/f0 is broken"
+
+ echo "Write should succeed after LFSCK repairing the bad PFL range"
+ dd if=/dev/zero of=$DIR/$tdir/f0 bs=1M count=1 ||
+ error "(7) Write should succeed after LFSCK"
+}
+run_test 18h "LFSCK can repair crashed PFL extent range"
+
+$LCTL set_param debug=-cache > /dev/null
+
+test_19a() {
+ check_mount_and_prep
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir
+
+ do_nodes $(comma_list $(osts_nodes)) $LCTL set_param -n \
+ obdfilter.${FSNAME}-OST0000.lfsck_verify_pfid 0
+
+ echo "foo1" > $DIR/$tdir/a0
+ $LFS setstripe -E 512K -S 512K -o 0 -E -1 -S 1M $DIR/$tdir/a1 ||
+ error "(0) Fail to create PFL $DIR/$tdir/a1"
+ echo "foo2" > $DIR/$tdir/a1
+ echo "guard" > $DIR/$tdir/a2
+ cancel_lru_locks osc
+
+ echo "Inject failure, then client will offer wrong parent FID when read"
+ do_nodes $(comma_list $(osts_nodes)) $LCTL set_param -n \
+ obdfilter.${FSNAME}-OST0000.lfsck_verify_pfid 1
+
+ #define OBD_FAIL_LFSCK_INVALID_PFID 0x1619
+ $LCTL set_param fail_loc=0x1619
+
+ echo "Read RPC with wrong parent FID should be denied"
+ cat $DIR/$tdir/a0 && error "(3.1) Read a0 should be denied!"
+ cat $DIR/$tdir/a1 && error "(3.2) Read a1 should be denied!"
+ $LCTL set_param fail_loc=0
+}
+run_test 19a "OST-object inconsistency self detect"
+
+test_19b() {
+ check_mount_and_prep
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir
+
+ echo "Inject failure stub to make the OST-object to back point to"
+ echo "non-exist MDT-object"
+
+ do_nodes $(comma_list $(osts_nodes)) $LCTL set_param -n \
+ obdfilter.${FSNAME}-OST0000.lfsck_verify_pfid 0
+
+ #define OBD_FAIL_LFSCK_UNMATCHED_PAIR1 0x1611
+ do_nodes $(comma_list $(osts_nodes)) $LCTL set_param fail_loc=0x1611
+ echo "foo1" > $DIR/$tdir/f0
+ $LFS setstripe -E 1M -S 1M -o 0 -E 4M -S 256K $DIR/$tdir/f1 ||
+ error "(0) Fail to create PFL $DIR/$tdir/f1"
+ echo "foo2" > $DIR/$tdir/f1
+ cancel_lru_locks osc
+ do_nodes $(comma_list $(osts_nodes)) $LCTL set_param fail_loc=0
+
+ do_facet ost1 $LCTL set_param -n \
+ obdfilter.${FSNAME}-OST0000.lfsck_verify_pfid 0
+ echo "Nothing should be fixed since self detect and repair is disabled"
+ local repaired=$(do_facet ost1 $LCTL get_param -n \
+ obdfilter.${FSNAME}-OST0000.lfsck_verify_pfid |
+ awk '/^repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(1) Expected 0 repaired, but got $repaired"
+
+ echo "Read RPC with right parent FID should be accepted,"
+ echo "and cause parent FID on OST to be fixed"
+
+ do_nodes $(comma_list $(osts_nodes)) $LCTL set_param -n \
+ obdfilter.${FSNAME}-OST0000.lfsck_verify_pfid 1
+
+ cat $DIR/$tdir/f0 || error "(2.1) Read f0 should not be denied!"
+ cat $DIR/$tdir/f1 || error "(2.2) Read f1 should not be denied!"
+
+ repaired=$(do_facet ost1 $LCTL get_param -n \
+ obdfilter.${FSNAME}-OST0000.lfsck_verify_pfid |
+ awk '/^repaired/ { print $2 }')
+ [ $repaired -eq 2 ] ||
+ error "(3) Expected 1 repaired, but got $repaired"
+}
+run_test 19b "OST-object inconsistency self repair"
+
+PATTERN_WITH_HOLE="40000001"
+PATTERN_WITHOUT_HOLE="1"
+
+test_20a() {
+ [ $OSTCOUNT -lt 2 ] && skip "needs >= 2 OSTs" && return
+
+ echo "#####"
+ echo "The target MDT-object and some of its OST-object are lost."
+ echo "The LFSCK should find out the left OST-objects and re-create"
+ echo "the MDT-object under the direcotry .lustre/lost+found/MDTxxxx/"
+ echo "with the partial OST-objects (LOV EA hole)."
+
+ echo "New client can access the file with LOV EA hole via normal"
+ echo "system tools or commands without crash the system."
+
+ echo "For old client, even though it cannot access the file with"
+ echo "LOV EA hole, it should not cause the system crash."
+ echo "#####"
+
+ check_mount_and_prep
+ $LFS mkdir -i 0 $DIR/$tdir/a1
+ if [ $OSTCOUNT -gt 2 ]; then
+ $LFS setstripe -c 3 -i 0 -S 1M $DIR/$tdir/a1
+ bcount=513
+ else
+ $LFS setstripe -c 2 -i 0 -S 1M $DIR/$tdir/a1
+ bcount=257
+ fi
+
+ # 256 blocks on the stripe0.
+ # 1 block on the stripe1 for 2 OSTs case.
+ # 256 blocks on the stripe1 for other cases.
+ # 1 block on the stripe2 if OSTs > 2
+ dd if=/dev/zero of=$DIR/$tdir/a1/f0 bs=4096 count=$bcount
+ dd if=/dev/zero of=$DIR/$tdir/a1/f1 bs=4096 count=$bcount
+ dd if=/dev/zero of=$DIR/$tdir/a1/f2 bs=4096 count=$bcount
+
+ local fid0=$($LFS path2fid $DIR/$tdir/a1/f0)
+ local fid1=$($LFS path2fid $DIR/$tdir/a1/f1)
+ local fid2=$($LFS path2fid $DIR/$tdir/a1/f2)
+
+ echo ${fid0}
+ $LFS getstripe $DIR/$tdir/a1/f0
+ echo ${fid1}
+ $LFS getstripe $DIR/$tdir/a1/f1
+ echo ${fid2}
+ $LFS getstripe $DIR/$tdir/a1/f2
+
+ if [ $OSTCOUNT -gt 2 ]; then
+ dd if=/dev/zero of=$DIR/$tdir/a1/f3 bs=4096 count=$bcount
+ fid3=$($LFS path2fid $DIR/$tdir/a1/f3)
+ echo ${fid3}
+ $LFS getstripe $DIR/$tdir/a1/f3
+ fi
+
+ cancel_lru_locks osc
+
+ echo "Inject failure..."
+ echo "To simulate f0 lost MDT-object"
+ #define OBD_FAIL_LFSCK_LOST_MDTOBJ 0x1616
+ do_facet mds1 $LCTL set_param fail_loc=0x1616
+ rm -f $DIR/$tdir/a1/f0
+
+ echo "To simulate f1 lost MDT-object and OST-object0"
+ #define OBD_FAIL_LFSCK_LOST_SPEOBJ 0x161a
+ do_facet mds1 $LCTL set_param fail_loc=0x161a
+ rm -f $DIR/$tdir/a1/f1
+
+ echo "To simulate f2 lost MDT-object and OST-object1"
+ do_facet mds1 $LCTL set_param fail_val=1
+ rm -f $DIR/$tdir/a1/f2
+
+ if [ $OSTCOUNT -gt 2 ]; then
+ echo "To simulate f3 lost MDT-object and OST-object2"
+ do_facet mds1 $LCTL set_param fail_val=2
+ rm -f $DIR/$tdir/a1/f3
+ fi
+
+ umount_client $MOUNT
+ sync
+ sleep 2
+ do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0
+
+ echo "Trigger layout LFSCK on all devices to find out orphan OST-object"
+ $START_LAYOUT -r -o || error "(1) Fail to start LFSCK for layout!"
+
+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 ||
+ error "(2) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ local cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(3) OST${k} Expect 'completed', but got '$cur_status'"
+ done
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ if [ $OSTCOUNT -gt 2 ]; then
+ [ $repaired -eq 9 ] ||
+ error "(4.1) Expect 9 fixed on mds1, but got: $repaired"
+ else
+ [ $repaired -eq 4 ] ||
+ error "(4.2) Expect 4 fixed on mds1, but got: $repaired"
+ fi
+
+ mount_client $MOUNT || error "(5.0) Fail to start client!"
+
+ LOV_PATTERN_F_HOLE=0x40000000
+
+ #
+ # ${fid0}-R-0 is the old f0
+ #
+ local name="$MOUNT/.lustre/lost+found/MDT0000/${fid0}-R-0"
+ echo "Check $name, which is the old f0"
+
+ $LFS getstripe -v $name || error "(5.1) cannot getstripe on $name"
+
+ local pattern=$($LFS getstripe -L $name)
+ [[ "$pattern" = "$PATTERN_WITHOUT_HOLE" ]] ||
+ error "(5.2) NOT expect pattern flag hole, but got $pattern"
+
+ local stripes=$($LFS getstripe -c $name)
+ if [ $OSTCOUNT -gt 2 ]; then
+ [ $stripes -eq 3 ] ||
+ error "(5.3.1) expect the stripe count is 3, but got $stripes"
+ else
+ [ $stripes -eq 2 ] ||
+ error "(5.3.2) expect the stripe count is 2, but got $stripes"
+ fi
+
+ local size=$(stat $name | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(5.4) expect the size $((4096 * $bcount)), but got $size"
+
+ cat $name > /dev/null || error "(5.5) cannot read $name"
+
+ echo "dummy" >> $name || error "(5.6) cannot write $name"
+
+ chown $RUNAS_ID:$RUNAS_GID $name || error "(5.7) cannot chown on $name"
+
+ touch $name || error "(5.8) cannot touch $name"
+
+ rm -f $name || error "(5.9) cannot unlink $name"
+
+ #
+ # ${fid1}-R-0 contains the old f1's stripe1 (and stripe2 if OSTs > 2)
+ #
+ name="$MOUNT/.lustre/lost+found/MDT0000/${fid1}-R-0"
+ if [ $OSTCOUNT -gt 2 ]; then
+ echo "Check $name, it contains the old f1's stripe1 and stripe2"
+ else
+ echo "Check $name, it contains the old f1's stripe1"
+ fi
+
+ $LFS getstripe -v $name || error "(6.1) cannot getstripe on $name"
+
+ pattern=$($LFS getstripe -L $name)
+ [[ "$pattern" = "$PATTERN_WITH_HOLE" ]] ||
+ error "(6.2) expect pattern flag hole, but got $pattern"
+
+ stripes=$($LFS getstripe -c $name)
+ if [ $OSTCOUNT -gt 2 ]; then
+ [ $stripes -eq 3 ] ||
+ error "(6.3.1) expect the stripe count is 3, but got $stripes"
+ else
+ [ $stripes -eq 2 ] ||
+ error "(6.3.2) expect the stripe count is 2, but got $stripes"
+ fi
+
+ size=$(stat $name | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(6.4) expect the size $((4096 * $bcount)), but got $size"
+
+ cat $name > /dev/null && error "(6.5) normal read $name should fail"
+
+ local failures=$(dd if=$name of=$DIR/$tdir/dump conv=sync,noerror \
+ bs=4096 2>&1 | grep "Input/output error" | wc -l)
+
+ # stripe0 is dummy
+ [ $failures -eq 256 ] ||
+ error "(6.6) expect 256 IO failures, but get $failures"
+
+ size=$(stat $DIR/$tdir/dump | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(6.7) expect the size $((4096 * $bcount)), but got $size"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 &&
+ error "(6.8) write to the LOV EA hole should fail"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 seek=300 ||
+ error "(6.9) write to normal stripe should NOT fail"
+
+ echo "foo" >> $name && error "(6.10) append write $name should fail"
+
+ chown $RUNAS_ID:$RUNAS_GID $name || error "(6.11) cannot chown on $name"
+
+ touch $name || error "(6.12) cannot touch $name"
+
+ rm -f $name || error "(6.13) cannot unlink $name"
+
+ #
+ # ${fid2}-R-0 it contains the old f2's stripe0 (and stripe2 if OSTs > 2)
+ #
+ name="$MOUNT/.lustre/lost+found/MDT0000/${fid2}-R-0"
+ if [ $OSTCOUNT -gt 2 ]; then
+ echo "Check $name, it contains the old f2's stripe0 and stripe2"
+ else
+ echo "Check $name, it contains the old f2's stripe0"
+ fi
+
+ $LFS getstripe -v $name || error "(7.1) cannot getstripe on $name"
+
+ pattern=$($LFS getstripe -L $name)
+ [[ "$pattern" = "$PATTERN_WITH_HOLE" ]] ||
+ error "(7.2) expect pattern flag hole, but got $pattern"
+
+ stripes=$($LFS getstripe -c $name)
+ size=$(stat $name | awk '/Size:/ { print $2 }')
+ if [ $OSTCOUNT -gt 2 ]; then
+ [ $stripes -eq 3 ] ||
+ error "(7.3.1) expect the stripe count is 3, but got $stripes"
+
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(7.4.1) expect size $((4096 * $bcount)), but got $size"
+
+ cat $name > /dev/null &&
+ error "(7.5.1) normal read $name should fail"
+
+ failures=$(dd if=$name of=$DIR/$tdir/dump conv=sync,noerror \
+ bs=4096 2>&1 | grep "Input/output error" | wc -l)
+ # stripe1 is dummy
+ [ $failures -eq 256 ] ||
+ error "(7.6) expect 256 IO failures, but get $failures"
+
+ size=$(stat $DIR/$tdir/dump | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(7.7) expect the size $((4096 * $bcount)), but got $size"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 \
+ seek=300 && error "(7.8.0) write to the LOV EA hole should fail"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 ||
+ error "(7.8.1) write to normal stripe should NOT fail"
+
+ echo "foo" >> $name &&
+ error "(7.8.3) append write $name should fail"
+
+ chown $RUNAS_ID:$RUNAS_GID $name ||
+ error "(7.9.1) cannot chown on $name"
+
+ touch $name || error "(7.10.1) cannot touch $name"
+ else
+ [ $stripes -eq 2 ] ||
+ error "(7.3.2) expect the stripe count is 2, but got $stripes"
+
+ # stripe1 is dummy
+ [ $size -eq $((4096 * (256 + 0))) ] ||
+ error "(7.4.2) expect the size $((4096 * 256)), but got $size"
+
+ cat $name > /dev/null &&
+ error "(7.5.2) normal read $name should fail"
+
+ failures=$(dd if=$name of=$DIR/$tdir/dump conv=sync,noerror \
+ bs=4096 2>&1 | grep "Input/output error" | wc -l)
+ [ $failures -eq 256 ] ||
+ error "(7.6.2) expect 256 IO failures, but get $failures"
+
+ bcount=$((256 * 2))
+ size=$(stat $DIR/$tdir/dump | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(7.7.2) expect the size $((4096 * $bcount)), got $size"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 \
+ seek=256 && error "(7.8.2) write to the LOV EA hole should fail"
+
+ chown $RUNAS_ID:$RUNAS_GID $name ||
+ error "(7.9.2) cannot chown on $name"
+
+ touch $name || error "(7.10.2) cannot touch $name"
+ fi
+
+ rm -f $name || error "(7.11) cannot unlink $name"
+
+ [ $OSTCOUNT -le 2 ] && return
+
+ #
+ # ${fid3}-R-0 should contains the old f3's stripe0 and stripe1
+ #
+ name="$MOUNT/.lustre/lost+found/MDT0000/${fid3}-R-0"
+ echo "Check $name, which contains the old f3's stripe0 and stripe1"
+
+ $LFS getstripe -v $name || error "(8.1) cannot getstripe on $name"
+
+ pattern=$($LFS getstripe -L $name)
+ [[ "$pattern" = "$PATTERN_WITH_HOLE" ]] ||
+ error "(8.2) expect pattern flag hole, but got $pattern"
+
+ stripes=$($LFS getstripe -c $name)
+ [ $stripes -eq 3 ] ||
+ error "(8.3) expect the stripe count is 3, but got $stripes"
+
+ size=$(stat $name | awk '/Size:/ { print $2 }')
+ # stripe2 is lost
+ [ $size -eq $((4096 * (256 + 256 + 0))) ] ||
+ error "(8.4) expect the size $((4096 * 512)), but got $size"
+
+ cat $name > /dev/null &&
+ error "(8.5) normal read $name should fail"
+
+ failures=$(dd if=$name of=$DIR/$tdir/dump conv=sync,noerror \
+ bs=4096 2>&1 | grep "Input/output error" | wc -l)
+ # stripe2 is dummy
+ [ $failures -eq 256 ] ||
+ error "(8.6) expect 256 IO failures, but get $failures"
+
+ bcount=$((256 * 3))
+ size=$(stat $DIR/$tdir/dump | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(8.7) expect the size $((4096 * $bcount)), but got $size"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 \
+ seek=512 && error "(8.8) write to the LOV EA hole should fail"
+
+ chown $RUNAS_ID:$RUNAS_GID $name ||
+ error "(8.9) cannot chown on $name"
+
+ touch $name || error "(8.10) cannot touch $name"
+
+ rm -f $name || error "(8.11) cannot unlink $name"
+}
+run_test 20a "Handle the orphan with dummy LOV EA slot properly"
+
+test_20b() {
+ [ $OSTCOUNT -lt 2 ] && skip "needs >= 2 OSTs" && return
+
+ echo "#####"
+ echo "The target MDT-object and some of its OST-object are lost."
+ echo "The LFSCK should find out the left OST-objects and re-create"
+ echo "the MDT-object under the direcotry .lustre/lost+found/MDTxxxx/"
+ echo "with the partial OST-objects (LOV EA hole)."
+
+ echo "New client can access the file with LOV EA hole via normal"
+ echo "system tools or commands without crash the system - PFL case."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS setstripe -E 2M -S 1M -c 2 -E -1 -S 1M -c 2 $DIR/$tdir/f0 ||
+ error "(0) Fail to create PFL file $DIR/$tdir/f0"
+ $LFS setstripe -E 2M -S 1M -c 2 -E -1 -S 1M -c 2 $DIR/$tdir/f1 ||
+ error "(1) Fail to create PFL file $DIR/$tdir/f1"
+ $LFS setstripe -E 2M -S 1M -c 2 -E -1 -S 1M -c 2 $DIR/$tdir/f2 ||
+ error "(2) Fail to create PFL file $DIR/$tdir/f2"
+
+ local bcount=$((256 * 3 + 1))
+
+ dd if=/dev/zero of=$DIR/$tdir/f0 bs=4096 count=$bcount
+ dd if=/dev/zero of=$DIR/$tdir/f1 bs=4096 count=$bcount
+ dd if=/dev/zero of=$DIR/$tdir/f2 bs=4096 count=$bcount
+
+ local fid0=$($LFS path2fid $DIR/$tdir/f0)
+ local fid1=$($LFS path2fid $DIR/$tdir/f1)
+ local fid2=$($LFS path2fid $DIR/$tdir/f2)
+
+ echo ${fid0}
+ $LFS getstripe $DIR/$tdir/f0
+ echo ${fid1}
+ $LFS getstripe $DIR/$tdir/f1
+ echo ${fid2}
+ $LFS getstripe $DIR/$tdir/f2
+
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+
+ echo "Inject failure..."
+ echo "To simulate f0 lost MDT-object"
+ #define OBD_FAIL_LFSCK_LOST_MDTOBJ 0x1616
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1616
+ rm -f $DIR/$tdir/f0
+
+ echo "To simulate the case of f1 lost MDT-object and "
+ echo "the first OST-object in each PFL component"
+ #define OBD_FAIL_LFSCK_LOST_SPEOBJ 0x161a
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x161a
+ rm -f $DIR/$tdir/f1
+
+ echo "To simulate the case of f2 lost MDT-object and "
+ echo "the second OST-object in each PFL component"
+ do_facet $SINGLEMDS $LCTL set_param fail_val=1
+ rm -f $DIR/$tdir/f2
+
+ sync
+ sleep 2
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0 fail_val=0
+
+ echo "Trigger layout LFSCK on all devices to find out orphan OST-object"
+ $START_LAYOUT -r -o || error "(3) Fail to start LFSCK for layout!"
+
+ for k in $(seq $MDSCOUNT); do
+ # The LFSCK status query internal is 30 seconds. For the case
+ # of some LFSCK_NOTIFY RPCs failure/lost, we will wait enough
+ # time to guarantee the status sync up.
+ wait_update_facet mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 ||
+ error "(4) MDS${k} is not the expected 'completed'"
+ done
+
+ for k in $(seq $OSTCOUNT); do
+ local cur_status=$(do_facet ost${k} $LCTL get_param -n \
+ obdfilter.$(facet_svc ost${k}).lfsck_layout |
+ awk '/^status/ { print $2 }')
+ [ "$cur_status" == "completed" ] ||
+ error "(5) OST${k} Expect 'completed', but got '$cur_status'"
+ done
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_layout |
+ awk '/^repaired_orphan/ { print $2 }')
+ [ $repaired -eq 8 ] ||
+ error "(6) Expect 8 fixed on mds1, but got: $repaired"
+
+ #
+ # ${fid0}-R-0 is the old f0
+ #
+ local name="$MOUNT/.lustre/lost+found/MDT0000/${fid0}-R-0"
+ echo "Check $name, which is the old f0"
+
+ $LFS getstripe -v $name || error "(7.1) cannot getstripe on $name"
+
+ local pattern=$($LFS getstripe -L -I1 $name)
+ [[ "$pattern" = "$PATTERN_WITHOUT_HOLE" ]] ||
+ error "(7.2.1) NOT expect pattern flag hole, but got $pattern"
+
+ pattern=$($LFS getstripe -L -I2 $name)
+ [[ "$pattern" = "$PATTERN_WITHOUT_HOLE" ]] ||
+ error "(7.2.2) NOT expect pattern flag hole, but got $pattern"
+
+ local stripes=$($LFS getstripe -c -I1 $name)
+ [ $stripes -eq 2 ] ||
+ error "(7.3.1) expect 2 stripes, but got $stripes"
+
+ stripes=$($LFS getstripe -c -I2 $name)
+ [ $stripes -eq 2 ] ||
+ error "(7.3.2) expect 2 stripes, but got $stripes"
+
+ local e_start=$($LFS getstripe -I1 $name |
+ awk '/lcme_extent.e_start:/ { print $2 }')
+ [ $e_start -eq 0 ] ||
+ error "(7.4.1) expect the COMP1 start at 0, got $e_start"
+
+ local e_end=$($LFS getstripe -I1 $name |
+ awk '/lcme_extent.e_end:/ { print $2 }')
+ [ $e_end -eq 2097152 ] ||
+ error "(7.4.2) expect the COMP1 end at 2097152, got $e_end"
+
+ e_start=$($LFS getstripe -I2 $name |
+ awk '/lcme_extent.e_start:/ { print $2 }')
+ [ $e_start -eq 2097152 ] ||
+ error "(7.5.1) expect the COMP2 start at 2097152, got $e_start"
+
+ e_end=$($LFS getstripe -I2 $name |
+ awk '/lcme_extent.e_end:/ { print $2 }')
+ [ "$e_end" = "EOF" ] ||
+ error "(7.5.2) expect the COMP2 end at (EOF), got $e_end"
+
+ local size=$(stat $name | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(7.6) expect the size $((4096 * $bcount)), but got $size"
+
+ cat $name > /dev/null || error "(7.7) cannot read $name"
+
+ echo "dummy" >> $name || error "(7.8) cannot write $name"
+
+ chown $RUNAS_ID:$RUNAS_GID $name || error "(7.9) cannot chown on $name"
+
+ touch $name || error "(7.10) cannot touch $name"
+
+ rm -f $name || error "(7.11) cannot unlink $name"
+
+ #
+ # ${fid1}-R-0 contains the old f1's second stripe in each COMP
+ #
+ name="$MOUNT/.lustre/lost+found/MDT0000/${fid1}-R-0"
+ echo "Check $name, it contains f1's second OST-object in each COMP"
+
+ $LFS getstripe -v $name || error "(8.1) cannot getstripe on $name"
+
+ pattern=$($LFS getstripe -L -I1 $name)
+ [[ "$pattern" = "$PATTERN_WITH_HOLE" ]] ||
+ error "(8.2.1) expect pattern flag hole, but got $pattern"
+
+ pattern=$($LFS getstripe -L -I2 $name)
+ [[ "$pattern" = "$PATTERN_WITH_HOLE" ]] ||
+ error "(8.2.2) expect pattern flag hole, but got $pattern"
+
+ stripes=$($LFS getstripe -c -I1 $name)
+ [ $stripes -eq 2 ] ||
+ error "(8.3.2) expect 2 stripes, but got $stripes"
+
+ stripes=$($LFS getstripe -c -I2 $name)
+ [ $stripes -eq 2 ] ||
+ error "(8.3.2) expect 2 stripes, but got $stripes"
+
+ e_start=$($LFS getstripe -I1 $name |
+ awk '/lcme_extent.e_start:/ { print $2 }')
+ [ $e_start -eq 0 ] ||
+ error "(8.4.1) expect the COMP1 start at 0, got $e_start"
+
+ e_end=$($LFS getstripe -I1 $name |
+ awk '/lcme_extent.e_end:/ { print $2 }')
+ [ $e_end -eq 2097152 ] ||
+ error "(8.4.2) expect the COMP1 end at 2097152, got $e_end"
+
+ e_start=$($LFS getstripe -I2 $name |
+ awk '/lcme_extent.e_start:/ { print $2 }')
+ [ $e_start -eq 2097152 ] ||
+ error "(8.5.1) expect the COMP2 start at 2097152, got $e_start"
+
+ e_end=$($LFS getstripe -I2 $name |
+ awk '/lcme_extent.e_end:/ { print $2 }')
+ [ "$e_end" = "EOF" ] ||
+ error "(8.5.2) expect the COMP2 end at (EOF), got $e_end"
+
+ size=$(stat $name | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(8.6) expect the size $((4096 * $bcount)), but got $size"
+
+ cat $name > /dev/null && error "(8.7) normal read $name should fail"
+
+ local failures=$(dd if=$name of=$DIR/$tdir/dump conv=sync,noerror \
+ bs=4096 2>&1 | grep "Input/output error" | wc -l)
+
+ # The first stripe in each COMP was lost
+ [ $failures -eq 512 ] ||
+ error "(8.8) expect 512 IO failures, but get $failures"
+
+ size=$(stat $DIR/$tdir/dump | awk '/Size:/ { print $2 }')
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(8.9) expect the size $((4096 * $bcount)), but got $size"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 &&
+ error "(8.10) write to the LOV EA hole should fail"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 seek=300 ||
+ error "(8.11) write to normal stripe should NOT fail"
+
+ echo "foo" >> $name && error "(8.12) append write $name should fail"
+
+ chown $RUNAS_ID:$RUNAS_GID $name || error "(8.13) cannot chown on $name"
+
+ touch $name || error "(8.14) cannot touch $name"
+
+ rm -f $name || error "(8.15) cannot unlink $name"
+
+ #
+ # ${fid2}-R-0 contains the old f2's first stripe in each COMP
+ #
+ name="$MOUNT/.lustre/lost+found/MDT0000/${fid2}-R-0"
+ echo "Check $name, it contains f2's first stripe in each COMP"
+
+ $LFS getstripe -v $name || error "(9.1) cannot getstripe on $name"
+
+ pattern=$($LFS getstripe -L -I1 $name)
+ [[ "$pattern" = "$PATTERN_WITH_HOLE" ]] ||
+ error "(9.2.1) expect pattern flag hole, but got $pattern"
+
+ pattern=$($LFS getstripe -L -I2 $name)
+ [[ "$pattern" = "$PATTERN_WITH_HOLE" ]] ||
+ error "(9.2.2) expect pattern flag hole, but got $pattern"
+
+ stripes=$($LFS getstripe -c -I1 $name)
+ [ $stripes -eq 2 ] ||
+ error "(9.3.2) expect 2 stripes, but got $stripes"
+
+ stripes=$($LFS getstripe -c -I2 $name)
+ [ $stripes -eq 2 ] ||
+ error "(9.3.2) expect 2 stripes, but got $stripes"
+
+ e_start=$($LFS getstripe -I1 $name |
+ awk '/lcme_extent.e_start:/ { print $2 }')
+ [ $e_start -eq 0 ] ||
+ error "(9.4.1) expect the COMP1 start at 0, got $e_start"
+
+ e_end=$($LFS getstripe -I1 $name |
+ awk '/lcme_extent.e_end:/ { print $2 }')
+ [ $e_end -eq 2097152 ] ||
+ error "(9.4.2) expect the COMP1 end at 2097152, got $e_end"
+
+ e_start=$($LFS getstripe -I2 $name |
+ awk '/lcme_extent.e_start:/ { print $2 }')
+ [ $e_start -eq 2097152 ] ||
+ error "(9.5.1) expect the COMP2 start at 2097152, got $e_start"
+
+ e_end=$($LFS getstripe -I2 $name |
+ awk '/lcme_extent.e_end:/ { print $2 }')
+ [ "$e_end" = "EOF" ] ||
+ error "(9.5.2) expect the COMP2 end at (EOF), got $e_end"
+
+ size=$(stat $name | awk '/Size:/ { print $2 }')
+ # The second stripe in COMP was lost, so we do not know there
+ # have ever been some data before. 'stat' will regard it as
+ # no data on the lost stripe.
+ bcount=$((256 * 3))
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(9.6) expect size $((4096 * $bcount)), but got $size"
+
+ cat $name > /dev/null &&
+ error "(9.7) normal read $name should fail"
+
+ failures=$(dd if=$name of=$DIR/$tdir/dump conv=sync,noerror \
+ bs=4096 2>&1 | grep "Input/output error" | wc -l)
+ [ $failures -eq 512 ] ||
+ error "(9.8) expect 256 IO failures, but get $failures"
+
+ size=$(stat $DIR/$tdir/dump | awk '/Size:/ { print $2 }')
+ # The second stripe in COMP was lost, so we do not know there
+ # have ever been some data before. Since 'dd' skip failure,
+ # it will regard the lost stripe contains data.
+ bcount=$((256 * 4))
+ [ $size -eq $((4096 * $bcount)) ] ||
+ error "(9.9) expect the size $((4096 * $bcount)), but got $size"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 \
+ seek=300 && error "(9.10) write to the LOV EA hole should fail"
+
+ dd if=/dev/zero of=$name conv=sync,notrunc bs=4096 count=1 ||
+ error "(9.11) write to normal stripe should NOT fail"
+
+ echo "foo" >> $name &&
+ error "(9.12) append write $name should fail"
+
+ chown $RUNAS_ID:$RUNAS_GID $name ||
+ error "(9.13) cannot chown on $name"
+
+ touch $name || error "(9.14) cannot touch $name"
+
+ rm -f $name || error "(7.15) cannot unlink $name"
+}
+run_test 20b "Handle the orphan with dummy LOV EA slot properly - PFL case"
+
+test_21() {
+ [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.5.59) ]] &&
+ skip "ignore the test if MDS is older than 2.5.59" && return
+
+ check_mount_and_prep
+ createmany -o $DIR/$tdir/f 100 || error "(0) Fail to create 100 files"
+
+ echo "Start all LFSCK components by default (-s 1)"
+ do_facet mds1 $LCTL lfsck_start -M ${FSNAME}-MDT0000 -s 1 -r ||
+ error "Fail to start LFSCK"
+
+ echo "namespace LFSCK should be in 'scanning-phase1' status"
+ local STATUS=$($SHOW_NAMESPACE | awk '/^status/ { print $2 }')
+ [ "$STATUS" == "scanning-phase1" ] ||
+ error "Expect namespace 'scanning-phase1', but got '$STATUS'"
+
+ echo "layout LFSCK should be in 'scanning-phase1' status"
+ STATUS=$($SHOW_LAYOUT | awk '/^status/ { print $2 }')
+ [ "$STATUS" == "scanning-phase1" ] ||
+ error "Expect layout 'scanning-phase1', but got '$STATUS'"
+
+ echo "Stop all LFSCK components by default"
+ do_facet mds1 $LCTL lfsck_stop -M ${FSNAME}-MDT0000 ||
+ error "Fail to stop LFSCK"
+}
+run_test 21 "run all LFSCK components by default"
+
+test_22a() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "The parent_A references the child directory via some name entry,"
+ echo "but the child directory back references another parent_B via its"
+ echo "".." name entry. The parent_B does not exist. Then the namespace"
+ echo "LFSCK will repair the child directory's ".." name entry."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 1 $DIR/$tdir/guard || error "(1) Fail to mkdir on MDT1"
+ $LFS mkdir -i 1 $DIR/$tdir/foo || error "(2) Fail to mkdir on MDT1"
+
+ echo "Inject failure stub on MDT0 to simulate bad dotdot name entry"
+ echo "The dummy's dotdot name entry references the guard."
+ #define OBD_FAIL_LFSCK_BAD_PARENT 0x161e
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x161e
+ $LFS mkdir -i 0 $DIR/$tdir/foo/dummy ||
+ error "(3) Fail to mkdir on MDT0"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ rmdir $DIR/$tdir/guard || error "(4) Fail to rmdir $DIR/$tdir/guard"
+
+ echo "Trigger namespace LFSCK to repair unmatched pairs"
+ $START_NAMESPACE -A -r ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^unmatched_pairs_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair unmatched pairs: $repaired"
+
+ echo "'ls' should success after namespace LFSCK repairing"
+ ls -ail $DIR/$tdir/foo/dummy > /dev/null ||
+ error "(8) ls should success."
+}
+run_test 22a "LFSCK can repair unmatched pairs (1)"
+
+test_22b() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "The parent_A references the child directory via the name entry_B,"
+ echo "but the child directory back references another parent_C via its"
+ echo "".." name entry. The parent_C exists, but there is no the name"
+ echo "entry_B under the parent_C. Then the namespace LFSCK will repair"
+ echo "the child directory's ".." name entry and its linkEA."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 1 $DIR/$tdir/guard || error "(1) Fail to mkdir on MDT1"
+ $LFS mkdir -i 1 $DIR/$tdir/foo || error "(2) Fail to mkdir on MDT1"
+
+ echo "Inject failure stub on MDT0 to simulate bad dotdot name entry"
+ echo "and bad linkEA. The dummy's dotdot name entry references the"
+ echo "guard. The dummy's linkEA references n non-exist name entry."
+ #define OBD_FAIL_LFSCK_BAD_PARENT 0x161e
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x161e
+ $LFS mkdir -i 0 $DIR/$tdir/foo/dummy ||
+ error "(3) Fail to mkdir on MDT0"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ local dummyfid=$($LFS path2fid $DIR/$tdir/foo/dummy)
+ echo "fid2path should NOT work on the dummy's FID $dummyfid"
+ local dummyname=$($LFS fid2path $DIR $dummyfid)
+ [ "$dummyname" != "$DIR/$tdir/foo/dummy" ] ||
+ error "(4) fid2path works unexpectedly."
+
+ echo "Trigger namespace LFSCK to repair unmatched pairs"
+ $START_NAMESPACE -A -r ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^unmatched_pairs_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair unmatched pairs: $repaired"
+
+ echo "fid2path should work on the dummy's FID $dummyfid after LFSCK"
+ local dummyname=$($LFS fid2path $DIR $dummyfid)
+ [ "$dummyname" == "$DIR/$tdir/foo/dummy" ] ||
+ error "(8) fid2path does not work"
+}
+run_test 22b "LFSCK can repair unmatched pairs (2)"
+
+test_23a() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "The name entry is there, but the MDT-object for such name "
+ echo "entry does not exist. The namespace LFSCK should find out "
+ echo "and repair the inconsistency as required."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0 on MDT0"
+ $LFS mkdir -i 1 $DIR/$tdir/d0/d1 || error "(2) Fail to mkdir d1 on MDT1"
+
+ echo "Inject failure stub on MDT1 to simulate dangling name entry"
+ #define OBD_FAIL_LFSCK_DANGLING2 0x1620
+ do_facet mds2 $LCTL set_param fail_loc=0x1620
+ rmdir $DIR/$tdir/d0/d1 || error "(3) Fail to rmdir d1"
+ do_facet mds2 $LCTL set_param fail_loc=0
+
+ echo "'ls' should fail because of dangling name entry"
+ ls -ail $DIR/$tdir/d0/d1 > /dev/null 2>&1 && error "(4) ls should fail."
+
+ echo "Trigger namespace LFSCK to find out dangling name entry"
+ $START_NAMESPACE -A -r ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^dangling_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair dangling name entry: $repaired"
+
+ echo "'ls' should fail because not re-create MDT-object by default"
+ ls -ail $DIR/$tdir/d0/d1 > /dev/null 2>&1 && error "(8) ls should fail."
+
+ echo "Trigger namespace LFSCK again to repair dangling name entry"
+ $START_NAMESPACE -A -r -C ||
+ error "(9) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 10
+
+ repaired=$($SHOW_NAMESPACE |
+ awk '/^dangling_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(11) Fail to repair dangling name entry: $repaired"
+
+ echo "'ls' should success after namespace LFSCK repairing"
+ ls -ail $DIR/$tdir/d0/d1 > /dev/null || error "(12) ls should success."
+}
+run_test 23a "LFSCK can repair dangling name entry (1)"
+
+test_23b() {
+ echo "#####"
+ echo "The objectA has multiple hard links, one of them corresponding"
+ echo "to the name entry_B. But there is something wrong for the name"
+ echo "entry_B and cause entry_B to references non-exist object_C."
+ echo "In the first-stage scanning, the LFSCK will think the entry_B"
+ echo "as dangling, and re-create the lost object_C. When the LFSCK"
+ echo "comes to the second-stage scanning, it will find that the"
+ echo "former re-creating object_C is not proper, and will try to"
+ echo "replace the object_C with the real object_A."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0 on MDT0"
+ $LFS path2fid $DIR/$tdir/d0
+
+ createmany -o $DIR/$tdir/d0/t 10 || error "(1.5) Fail to creatmany"
+
+ echo "dummy" > $DIR/$tdir/d0/f0 || error "(2) Fail to touch on MDT0"
+ $LFS path2fid $DIR/$tdir/d0/f0
+
+ echo "dead" > $DIR/$tdir/d0/f1 || error "(3) Fail to touch on MDT0"
+ $LFS path2fid $DIR/$tdir/d0/f1
+
+ local SEQ0=$($LFS path2fid $DIR/$tdir/d0/f0 | awk -F':' '{print $1}')
+ local SEQ1=$($LFS path2fid $DIR/$tdir/d0/f1 | awk -F':' '{print $1}')
+
+ if [ "$SEQ0" != "$SEQ1" ]; then
+ # To guarantee that the f0 and f1 are in the same FID seq
+ rm -f $DIR/$tdir/d0/f0 ||
+ error "(3.1) Fail to unlink $DIR/$tdir/d0/f0"
+ echo "dummy" > $DIR/$tdir/d0/f0 ||
+ error "(3.2) Fail to touch on MDT0"
+ $LFS path2fid $DIR/$tdir/d0/f0
+ fi
+
+ local OID=$($LFS path2fid $DIR/$tdir/d0/f1 | awk -F':' '{print $2}')
+ OID=$(printf %d $OID)
+
+ echo "Inject failure stub on MDT0 to simulate dangling name entry"
+ #define OBD_FAIL_LFSCK_DANGLING3 0x1621
+ do_facet $SINGLEMDS $LCTL set_param fail_val=$OID fail_loc=0x1621
+ ln $DIR/$tdir/d0/f0 $DIR/$tdir/d0/foo || error "(4) Fail to hard link"
+ do_facet $SINGLEMDS $LCTL set_param fail_val=0 fail_loc=0
+
+ # If there is creation after the dangling injection, it may re-use
+ # the just released local object (inode) that is referenced by the
+ # dangling name entry. It will fail the dangling injection.
+ # So before deleting the target object for the dangling name entry,
+ # remove some other objects to avoid the target object being reused
+ # by some potential creations. LU-7429
+ unlinkmany $DIR/$tdir/d0/t 10 || error "(5.0) Fail to unlinkmany"
+
+ rm -f $DIR/$tdir/d0/f1 || error "(5) Fail to unlink $DIR/$tdir/d0/f1"
+
+ echo "'ls' should fail because of dangling name entry"
+ ls -ail $DIR/$tdir/d0/foo > /dev/null 2>&1 &&
+ error "(6) ls should fail."
+
+ echo "Trigger namespace LFSCK to find out dangling name entry"
+ $START_NAMESPACE -r -C ||
+ error "(7) Fail to start LFSCK for namespace"
+
+ wait_update_facet $SINGLEMDS "$LCTL get_param -n \
+ mdd.${MDT_DEV}.lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 || {
+ $SHOW_NAMESPACE
+ error "(8) unexpected status"
+ }
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^dangling_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(9) Fail to repair dangling name entry: $repaired"
+
+ repaired=$($SHOW_NAMESPACE |
+ awk '/^multiple_linked_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(10) Fail to drop the former created object: $repaired"
+
+ local data=$(cat $DIR/$tdir/d0/foo)
+ [ "$data" == "dummy" ] ||
+ error "(11) The $DIR/$tdir/d0/foo is not recovered: $data"
+}
+run_test 23b "LFSCK can repair dangling name entry (2)"
+
+test_23c() {
+ echo "#####"
+ echo "The objectA has multiple hard links, one of them corresponding"
+ echo "to the name entry_B. But there is something wrong for the name"
+ echo "entry_B and cause entry_B to references non-exist object_C."
+ echo "In the first-stage scanning, the LFSCK will think the entry_B"
+ echo "as dangling, and re-create the lost object_C. And then others"
+ echo "modified the re-created object_C. When the LFSCK comes to the"
+ echo "second-stage scanning, it will find that the former re-creating"
+ echo "object_C maybe wrong and try to replace the object_C with the"
+ echo "real object_A. But because object_C has been modified, so the"
+ echo "LFSCK cannot replace it."
+ echo "#####"
+
+ start_full_debug_logging
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0 on MDT0"
+ $LFS path2fid $DIR/$tdir/d0
+
+ createmany -o $DIR/$tdir/d0/t 10 || error "(1.5) Fail to creatmany"
+
+ echo "dummy" > $DIR/$tdir/d0/f0 || error "(2) Fail to touch on MDT0"
+ $LFS path2fid $DIR/$tdir/d0/f0
+
+ echo "dead" > $DIR/$tdir/d0/f1 || error "(3) Fail to touch on MDT0"
+ $LFS path2fid $DIR/$tdir/d0/f1
+
+ local SEQ0=$($LFS path2fid $DIR/$tdir/d0/f0 | awk -F':' '{print $1}')
+ local SEQ1=$($LFS path2fid $DIR/$tdir/d0/f1 | awk -F':' '{print $1}')
+
+ if [ "$SEQ0" != "$SEQ1" ]; then
+ # To guarantee that the f0 and f1 are in the same FID seq
+ rm -f $DIR/$tdir/d0/f0 ||
+ error "(3.1) Fail to unlink $DIR/$tdir/d0/f0"
+ echo "dummy" > $DIR/$tdir/d0/f0 ||
+ error "(3.2) Fail to touch on MDT0"
+ $LFS path2fid $DIR/$tdir/d0/f0
+ fi
+
+ local OID=$($LFS path2fid $DIR/$tdir/d0/f1 | awk -F':' '{print $2}')
+ OID=$(printf %d $OID)
+
+ echo "Inject failure stub on MDT0 to simulate dangling name entry"
+ #define OBD_FAIL_LFSCK_DANGLING3 0x1621
+ do_facet $SINGLEMDS $LCTL set_param fail_val=$OID fail_loc=0x1621
+ ln $DIR/$tdir/d0/f0 $DIR/$tdir/d0/foo || error "(4) Fail to hard link"
+ do_facet $SINGLEMDS $LCTL set_param fail_val=0 fail_loc=0
+
+ # If there is creation after the dangling injection, it may re-use
+ # the just released local object (inode) that is referenced by the
+ # dangling name entry. It will fail the dangling injection.
+ # So before deleting the target object for the dangling name entry,
+ # remove some other objects to avoid the target object being reused
+ # by some potential creations. LU-7429
+ unlinkmany $DIR/$tdir/d0/t 10 || error "(5.0) Fail to unlinkmany"
+
+ rm -f $DIR/$tdir/d0/f1 || error "(5) Fail to unlink $DIR/$tdir/d0/f1"
+
+ echo "'ls' should fail because of dangling name entry"
+ ls -ail $DIR/$tdir/d0/foo > /dev/null 2>&1 &&
+ error "(6) ls should fail."
+
+ #define OBD_FAIL_LFSCK_DELAY3 0x1602
+ do_facet $SINGLEMDS $LCTL set_param fail_val=10 fail_loc=0x1602
+
+ echo "Trigger namespace LFSCK to find out dangling name entry"
+ $START_NAMESPACE -r -C ||
+ error "(7) Fail to start LFSCK for namespace"
+
+ wait_update_facet client "stat $DIR/$tdir/d0/foo |
+ awk '/Size/ { print \\\$2 }'" "0" $LTIME || {
+ stat $DIR/$tdir/d0/foo
+ $SHOW_NAMESPACE
+ error "(8) unexpected size"
+ }
+
+ echo "data" >> $DIR/$tdir/d0/foo || error "(9) Fail to write"
+ cancel_lru_locks osc
+
+ do_facet $SINGLEMDS $LCTL set_param fail_val=0 fail_loc=0
+ wait_update_facet $SINGLEMDS "$LCTL get_param -n \
+ mdd.${MDT_DEV}.lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 || {
+ $SHOW_NAMESPACE
+ error "(10) unexpected status"
+ }
+
+ stop_full_debug_logging
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^dangling_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(11) Fail to repair dangling name entry: $repaired"
+
+ local data=$(cat $DIR/$tdir/d0/foo)
+ [ "$data" != "dummy" ] ||
+ error "(12) The $DIR/$tdir/d0/foo should not be recovered"
+}
+run_test 23c "LFSCK can repair dangling name entry (3)"
+
+test_24() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "Two MDT-objects back reference the same name entry via their"
+ echo "each own linkEA entry, but the name entry only references one"
+ echo "MDT-object. The namespace LFSCK will remove the linkEA entry"
+ echo "for the MDT-object that is not recognized. If such MDT-object"
+ echo "has no other linkEA entry after the removing, then the LFSCK"
+ echo "will add it as orphan under the .lustre/lost+found/MDTxxxx/."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 1 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+
+ mkdir $DIR/$tdir/d0/guard || error "(1) Fail to mkdir guard"
+ $LFS path2fid $DIR/$tdir/d0/guard
+
+ mkdir $DIR/$tdir/d0/dummy || error "(2) Fail to mkdir dummy"
+ $LFS path2fid $DIR/$tdir/d0/dummy
+
+ local pfid
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ pfid=$($LFS path2fid $DIR/$tdir/d0/guard)
+ else
+ pfid=$($LFS path2fid $DIR/$tdir/d0/dummy)
+ fi
+
+ touch $DIR/$tdir/d0/guard/foo ||
+ error "(3) Fail to touch $DIR/$tdir/d0/guard/foo"
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "the $DIR/$tdir/d0/dummy/foo has the 'bad' linkEA entry"
+ echo "that references $DIR/$tdir/d0/guard/foo."
+ echo "Then remove the name entry $DIR/$tdir/d0/dummy/foo."
+ echo "So the MDT-object $DIR/$tdir/d0/dummy/foo will be left"
+ echo "there with the same linkEA entry as another MDT-object"
+ echo "$DIR/$tdir/d0/guard/foo has"
+
+ #define OBD_FAIL_LFSCK_MUL_REF 0x1622
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1622
+ $LFS mkdir -i 0 $DIR/$tdir/d0/dummy/foo ||
+ error "(4) Fail to mkdir $DIR/$tdir/d0/dummy/foo"
+ $LFS path2fid $DIR/$tdir/d0/dummy/foo
+ local cfid=$($LFS path2fid $DIR/$tdir/d0/dummy/foo)
+ rmdir $DIR/$tdir/d0/dummy/foo ||
+ error "(5) Fail to remove $DIR/$tdir/d0/dummy/foo name entry"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ echo "stat $DIR/$tdir/d0/dummy/foo should fail"
+ stat $DIR/$tdir/d0/dummy/foo > /dev/null 2>&1 &&
+ error "(6) stat successfully unexpectedly"
+
+ echo "Trigger namespace LFSCK to repair multiple-referenced name entry"
+ $START_NAMESPACE -A -r ||
+ error "(7) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 8
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^multiple_referenced_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(9) Fail to repair multiple referenced name entry: $repaired"
+
+ echo "There should be an orphan under .lustre/lost+found/MDT0000/"
+ [ -d $MOUNT/.lustre/lost+found/MDT0000 ] ||
+ error "(10) $MOUNT/.lustre/lost+found/MDT0000/ should be there"
+
+ local cname="$cfid-$pfid-D-0"
+ ls -ail $MOUNT/.lustre/lost+found/MDT0000/$cname ||
+ error "(11) .lustre/lost+found/MDT0000/ should not be empty"
+}
+run_test 24 "LFSCK can repair multiple-referenced name entry"
+
+test_25() {
+ [ $(facet_fstype $SINGLEMDS) != ldiskfs ] &&
+ skip "ldiskfs only test" && return
+
+ echo "#####"
+ echo "The file type in the name entry does not match the file type"
+ echo "claimed by the referenced object. Then the LFSCK will update"
+ echo "the file type in the name entry."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "the file type stored in the name entry is wrong."
+
+ #define OBD_FAIL_LFSCK_BAD_TYPE 0x1623
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1623
+ touch $DIR/$tdir/d0/foo || error "(2) Fail to touch $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ echo "Trigger namespace LFSCK to repair bad file type in the name entry"
+ $START_NAMESPACE -r || error "(3) Fail to start LFSCK for namespace"
+
+ wait_update_facet $SINGLEMDS "$LCTL get_param -n \
+ mdd.${MDT_DEV}.lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 || {
+ $SHOW_NAMESPACE
+ error "(4) unexpected status"
+ }
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^bad_file_type_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(5) Fail to repair bad file type in name entry: $repaired"
+
+ ls -ail $DIR/$tdir/d0 || error "(6) Fail to 'ls' the $DIR/$tdir/d0"
+}
+run_test 25 "LFSCK can repair bad file type in the name entry"
+
+test_26a() {
+ echo "#####"
+ echo "The local name entry back referenced by the MDT-object is lost."
+ echo "The namespace LFSCK will add the missing local name entry back"
+ echo "to the normal namespace."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ touch $DIR/$tdir/d0/foo || error "(2) Fail to create foo"
+ local foofid=$($LFS path2fid $DIR/$tdir/d0/foo)
+
+ ln $DIR/$tdir/d0/foo $DIR/$tdir/d0/dummy ||
+ error "(3) Fail to hard link to $DIR/$tdir/d0/foo"
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "foo's name entry will be removed, but the foo's object"
+ echo "and its linkEA are kept in the system."
+
+ #define OBD_FAIL_LFSCK_NO_NAMEENTRY 0x1624
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1624
+ rm -f $DIR/$tdir/d0/foo || error "(4) Fail to unlink $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ ls -ail $DIR/$tdir/d0/foo > /dev/null 2>&1 &&
+ error "(5) 'ls' should fail"
+
+ echo "Trigger namespace LFSCK to repair the missing remote name entry"
+ $START_NAMESPACE -r -A ||
+ error "(6) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 7
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(8) Fail to repair lost dirent: $repaired"
+
+ ls -ail $DIR/$tdir/d0/foo ||
+ error "(9) Fail to 'ls' $DIR/$tdir/d0/foo"
+
+ local foofid2=$($LFS path2fid $DIR/$tdir/d0/foo)
+ [ "$foofid" == "$foofid2" ] ||
+ error "(10) foo's FID changed: $foofid, $foofid2"
+}
+run_test 26a "LFSCK can add the missing local name entry back to the namespace"
+
+test_26b() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "The remote name entry back referenced by the MDT-object is lost."
+ echo "The namespace LFSCK will add the missing remote name entry back"
+ echo "to the normal namespace."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 1 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ $LFS mkdir -i 0 $DIR/$tdir/d0/foo || error "(2) Fail to mkdir foo"
+ local foofid=$($LFS path2fid $DIR/$tdir/d0/foo)
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "foo's name entry will be removed, but the foo's object"
+ echo "and its linkEA are kept in the system."
+
+ #define OBD_FAIL_LFSCK_NO_NAMEENTRY 0x1624
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1624
+ rmdir $DIR/$tdir/d0/foo || error "(3) Fail to rmdir $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ ls -ail $DIR/$tdir/d0/foo > /dev/null 2>&1 &&
+ error "(4) 'ls' should fail"
+
+ echo "Trigger namespace LFSCK to repair the missing remote name entry"
+ $START_NAMESPACE -r -A ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair lost dirent: $repaired"
+
+ ls -ail $DIR/$tdir/d0/foo ||
+ error "(8) Fail to 'ls' $DIR/$tdir/d0/foo"
+
+ local foofid2=$($LFS path2fid $DIR/$tdir/d0/foo)
+ [ "$foofid" == "$foofid2" ] ||
+ error "(9) foo's FID changed: $foofid, $foofid2"
+}
+run_test 26b "LFSCK can add the missing remote name entry back to the namespace"
+
+test_27a() {
+ echo "#####"
+ echo "The local parent referenced by the MDT-object linkEA is lost."
+ echo "The namespace LFSCK will re-create the lost parent as orphan."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ touch $DIR/$tdir/d0/foo || error "(2) Fail to create foo"
+ ln $DIR/$tdir/d0/foo $DIR/$tdir/d0/dummy ||
+ error "(3) Fail to hard link to $DIR/$tdir/d0/foo"
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "foo's name entry will be removed, but the foo's object"
+ echo "and its linkEA are kept in the system. And then remove"
+ echo "another hard link and the parent directory."
+
+ #define OBD_FAIL_LFSCK_NO_NAMEENTRY 0x1624
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1624
+ rm -f $DIR/$tdir/d0/foo ||
+ error "(4) Fail to unlink $DIR/$tdir/d0/foo"
+ rm -f $DIR/$tdir/d0/dummy ||
+ error "(5) Fail to unlink $DIR/$tdir/d0/dummy"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ rm -rf $DIR/$tdir/d0 || error "(5) Fail to unlink the dir d0"
+ ls -ail $DIR/$tdir/d0 > /dev/null 2>&1 && error "(6) 'ls' should fail"
+
+ echo "Trigger namespace LFSCK to repair the lost parent"
+ $START_NAMESPACE -r -A ||
+ error "(6) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 7
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(8) Fail to repair lost dirent: $repaired"
+
+ echo "There should be an orphan under .lustre/lost+found/MDT0000/"
+ [ -d $MOUNT/.lustre/lost+found/MDT0000 ] ||
+ error "(9) $MOUNT/.lustre/lost+found/MDT0000/ should be there"
+
+ ls -ail $MOUNT/.lustre/lost+found/MDT0000/
+
+ cname=$(find $MOUNT/.lustre/lost+found/MDT0000/ -name *-P-*)
+ [ ! -z "$cname" ] ||
+ error "(10) .lustre/lost+found/MDT0000/ should not be empty"
+}
+run_test 27a "LFSCK can recreate the lost local parent directory as orphan"
+
+test_27b() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "The remote parent referenced by the MDT-object linkEA is lost."
+ echo "The namespace LFSCK will re-create the lost parent as orphan."
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 1 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ $LFS mkdir -i 0 $DIR/$tdir/d0/foo || error "(2) Fail to mkdir foo"
+
+ $LFS path2fid $DIR/$tdir/d0
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "foo's name entry will be removed, but the foo's object"
+ echo "and its linkEA are kept in the system. And then remove"
+ echo "the parent directory."
+
+ #define OBD_FAIL_LFSCK_NO_NAMEENTRY 0x1624
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1624
+ rmdir $DIR/$tdir/d0/foo || error "(3) Fail to rmdir $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ rmdir $DIR/$tdir/d0 || error "(4) Fail to unlink the dir d0"
+ ls -ail $DIR/$tdir/d0 > /dev/null 2>&1 && error "(5) 'ls' should fail"
+
+ echo "Trigger namespace LFSCK to repair the missing remote name entry"
+ $START_NAMESPACE -r -A ||
+ error "(6) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 7
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(8) Fail to repair lost dirent: $repaired"
+
+ ls -ail $MOUNT/.lustre/lost+found/
+
+ echo "There should be an orphan under .lustre/lost+found/MDT0001/"
+ [ -d $MOUNT/.lustre/lost+found/MDT0001 ] ||
+ error "(9) $MOUNT/.lustre/lost+found/MDT0001/ should be there"
+
+ ls -ail $MOUNT/.lustre/lost+found/MDT0001/
+
+ cname=$(find $MOUNT/.lustre/lost+found/MDT0001/ -name *-P-*)
+ [ ! -z "$cname" ] ||
+ error "(10) .lustre/lost+found/MDT0001/ should not be empty"
+}
+run_test 27b "LFSCK can recreate the lost remote parent directory as orphan"
+
+test_28() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ echo "#####"
+ echo "The target name entry is lost. The LFSCK should insert the"
+ echo "orphan MDT-object under .lustre/lost+found/MDTxxxx. But if"
+ echo "the MDT (on which the orphan MDT-object resides) has ever"
+ echo "failed to respond some name entry verification during the"
+ echo "first stage-scanning, then the LFSCK should skip to handle"
+ echo "orphan MDT-object on this MDT. But other MDTs should not"
+ echo "be affected."
+ echo "#####"
+
+ check_mount_and_prep
+ $LFS mkdir -i 0 $DIR/$tdir/d1
+ $LFS mkdir -i 1 $DIR/$tdir/d1/a1
+ $LFS mkdir -i 1 $DIR/$tdir/d1/a2
+
+ $LFS mkdir -i 1 $DIR/$tdir/d2
+ $LFS mkdir -i 0 $DIR/$tdir/d2/a1
+ $LFS mkdir -i 0 $DIR/$tdir/d2/a2
+
+ echo "Inject failure stub on MDT0 to simulate the case that"
+ echo "d1/a1's name entry will be removed, but the d1/a1's object"
+ echo "and its linkEA are kept in the system. And the case that"
+ echo "d2/a2's name entry will be removed, but the d2/a2's object"
+ echo "and its linkEA are kept in the system."
+
+ #define OBD_FAIL_LFSCK_NO_NAMEENTRY 0x1624
+ do_facet mds1 $LCTL set_param fail_loc=0x1624
+ do_facet mds2 $LCTL set_param fail_loc=0x1624
+ rmdir $DIR/$tdir/d1/a1 || error "(1) Fail to rmdir $DIR/$tdir/d1/a1"
+ rmdir $DIR/$tdir/d2/a2 || error "(2) Fail to rmdir $DIR/$tdir/d2/a2"
+ do_facet mds1 $LCTL set_param fail_loc=0
+ do_facet mds2 $LCTL set_param fail_loc=0
+
+ cancel_lru_locks mdc
+ cancel_lru_locks osc
+
+ echo "Inject failure, to simulate the MDT0 fail to handle"
+ echo "MDT1 LFSCK request during the first-stage scanning."
+ #define OBD_FAIL_LFSCK_BAD_NETWORK 0x161c
+ do_facet mds2 $LCTL set_param fail_loc=0x161c fail_val=0
+
+ echo "Trigger namespace LFSCK on all devices to find out orphan object"
+ $START_NAMESPACE -r -A ||
+ error "(3) Fail to start LFSCK for namespace"
+
+ wait_update_facet mds1 "$LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "partial" 32 || {
+ error "(4) mds1 is not the expected 'partial'"
+ }
+
+ wait_update_facet mds2 "$LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 32 || {
+ error "(5) mds2 is not the expected 'completed'"
+ }
+
+ do_facet mds2 $LCTL set_param fail_loc=0 fail_val=0
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(6) Expect 0 fixed on mds1, but got: $repaired"
+
+ repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Expect 1 fixed on mds2, but got: $repaired"
+
+ echo "Trigger namespace LFSCK on all devices again to cleanup"
+ $START_NAMESPACE -r -A ||
+ error "(8) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 9
+
+ local repaired=$(do_facet mds1 $LCTL get_param -n \
+ mdd.$(facet_svc mds1).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(10) Expect 1 fixed on mds1, but got: $repaired"
+
+ repaired=$(do_facet mds2 $LCTL get_param -n \
+ mdd.$(facet_svc mds2).lfsck_namespace |
+ awk '/^lost_dirent_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(11) Expect 0 fixed on mds2, but got: $repaired"
+}
+run_test 28 "Skip the failed MDT(s) when handle orphan MDT-objects"
+
+test_29a() {
+ echo "#####"
+ echo "The object's nlink attribute is larger than the object's known"
+ echo "name entries count. The LFSCK will repair the object's nlink"
+ echo "attribute to match the known name entries count"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ touch $DIR/$tdir/d0/foo || error "(2) Fail to create foo"
+
+ echo "Inject failure stub on MDT0 to simulate the case that foo's"
+ echo "nlink attribute is larger than its name entries count."
+
+ #define OBD_FAIL_LFSCK_MORE_NLINK 0x1625
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1625
+ ln $DIR/$tdir/d0/foo $DIR/$tdir/d0/h1 ||
+ error "(3) Fail to hard link to $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ cancel_lru_locks mdc
+ local count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 3 ] || error "(4) Cannot inject error: $count"
+
+ echo "Trigger namespace LFSCK to repair the nlink count"
+ $START_NAMESPACE -r -A ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^nlinks_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair nlink count: $repaired"
+
+ cancel_lru_locks mdc
+ count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 2 ] || error "(8) Fail to repair nlink count: $count"
+}
+# Disable 29a, we only allow nlink to be updated if the known linkEA
+# entries is larger than nlink count.
+#
+#run_test 29a "LFSCK can repair bad nlink count (1)"
+
+test_29b() {
+ echo "#####"
+ echo "The object's nlink attribute is smaller than the object's known"
+ echo "name entries count. The LFSCK will repair the object's nlink"
+ echo "attribute to match the known name entries count"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/d0 || error "(1) Fail to mkdir d0"
+ touch $DIR/$tdir/d0/foo || error "(2) Fail to create foo"
+
+ echo "Inject failure stub on MDT0 to simulate the case that foo's"
+ echo "nlink attribute is smaller than its name entries count."
+
+ #define OBD_FAIL_LFSCK_LESS_NLINK 0x1626
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x1626
+ ln $DIR/$tdir/d0/foo $DIR/$tdir/d0/h1 ||
+ error "(3) Fail to hard link to $DIR/$tdir/d0/foo"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+
+ cancel_lru_locks mdc
+ local count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 1 ] || error "(4) Cannot inject error: $count"
+
+ echo "Trigger namespace LFSCK to repair the nlink count"
+ $START_NAMESPACE -r -A ||
+ error "(5) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 6
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^nlinks_repaired/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(7) Fail to repair nlink count: $repaired"
+
+ cancel_lru_locks mdc
+ count=$(stat --format=%h $DIR/$tdir/d0/foo)
+ [ $count -eq 2 ] || error "(8) Fail to repair nlink count: $count"
+}
+run_test 29b "LFSCK can repair bad nlink count (2)"
+
+test_29c()
+{
+ echo "#####"
+ echo "The namespace LFSCK will create many hard links to the target"
+ echo "file as to exceed the linkEA size limitation. Under such case"
+ echo "the linkEA will be marked as overflow that will prevent the"
+ echo "target file to be migrated. Then remove some hard links to"
+ echo "make the left hard links to be held within the linkEA size"
+ echo "limitation. But before the namespace LFSCK adding all the"
+ echo "missed linkEA entries back, the overflow mark (timestamp)"
+ echo "will not be cleared."
+ echo "#####"
+
+ check_mount_and_prep
+
+ mkdir -p $DIR/$tdir/guard || error "(0.1) Fail to mkdir"
+ $LFS mkdir -i $((MDSCOUNT - 1)) $DIR/$tdir/foo ||
+ error "(0.2) Fail to mkdir"
+ touch $DIR/$tdir/guard/f0 || error "(1) Fail to create"
+ local oldfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+
+ # define MAX_LINKEA_SIZE 4096
+ # sizeof(link_ea_header) = 24
+ # sizeof(link_ea_entry) = 18
+ # nlink_min=$(((MAX_LINKEA_SIZE - sizeof(link_ea_header)) /
+ # (sizeof(link_ea_entry) + name_length))
+ # If the average name length is 12 bytes, then 150 hard links
+ # is totally enough to overflow the linkEA
+ echo "Create 150 hard links should succeed although the linkEA overflow"
+ createmany -l $DIR/$tdir/guard/f0 $DIR/$tdir/foo/ttttttttttt 150 ||
+ error "(2) Fail to hard link"
+
+ cancel_lru_locks mdc
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS migrate -m 1 $DIR/$tdir/guard 2>/dev/null ||
+ error "(3.1) Migrate failure"
+
+ echo "The object with linkEA overflow should NOT be migrated"
+ local newfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+ [ "$newfid" == "$oldfid" ] ||
+ error "(3.2) Migrate should fail: $newfid != $oldfid"
+ fi
+
+ # Remove 100 hard links, then the linkEA should have space
+ # to hold the missed linkEA entries.
+ echo "Remove 100 hard links to save space for the missed linkEA entries"
+ unlinkmany $DIR/$tdir/foo/ttttttttttt 100 || error "(4) Fail to unlink"
+
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS migrate -m 1 $DIR/$tdir/guard 2>/dev/null ||
+ error "(5.1) Migrate failure"
+
+ # The overflow timestamp is still there, so migration will fail.
+ local newfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+ [ "$newfid" == "$oldfid" ] ||
+ error "(5.2) Migrate should fail: $newfid != $oldfid"
+ fi
+
+ # sleep 3 seconds to guarantee that the overflow is recognized
+ sleep 3
+
+ echo "Trigger namespace LFSCK to clear the overflow timestamp"
+ $START_NAMESPACE -r -A ||
+ error "(6) Fail to start LFSCK for namespace"
+
+ wait_all_targets_blocked namespace completed 7
+
+ local repaired=$($SHOW_NAMESPACE |
+ awk '/^linkea_overflow_cleared/ { print $2 }')
+ [ $repaired -eq 1 ] ||
+ error "(8) Fail to clear linkea overflow: $repaired"
+
+ repaired=$($SHOW_NAMESPACE |
+ awk '/^nlinks_repaired/ { print $2 }')
+ [ $repaired -eq 0 ] ||
+ error "(9) Unexpected nlink repaired: $repaired"
+
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS migrate -m 1 $DIR/$tdir/guard 2>/dev/null ||
+ error "(10.1) Migrate failure"
+
+ # Migration should succeed after clear the overflow timestamp.
+ local newfid=$($LFS path2fid $DIR/$tdir/guard/f0)
+ [ "$newfid" != "$oldfid" ] ||
+ error "(10.2) Migrate should succeed"
+
+ ls -l $DIR/$tdir/foo > /dev/null ||
+ error "(11) 'ls' failed after migration"
+ fi
+
+ rm -f $DIR/$tdir/guard/f0 || error "(12) Fail to unlink f0"
+ rm -rf $DIR/$tdir/foo || error "(13) Fail to rmdir foo"
+}
+run_test 29c "verify linkEA size limitation"
+
+test_30() {
+ [ $(facet_fstype $SINGLEMDS) != ldiskfs ] &&
+ skip "ldiskfs only test" && return
+
+ echo "#####"
+ echo "The namespace LFSCK will move the orphans from backend"
+ echo "/lost+found directory to normal client visible namespace"
+ echo "or to global visible ./lustre/lost+found/MDTxxxx/ directory"
+ echo "#####"
+
+ check_mount_and_prep
+
+ $LFS mkdir -i 0 $DIR/$tdir/foo || error "(1) Fail to mkdir foo"
+ touch $DIR/$tdir/foo/f0 || error "(2) Fail to touch f1"