X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=f8ad8adb3bec677a3b2ef7f201a8c927d7f0cd13;hp=78bbc2f3daa52b4ed48142f923cb1bc37fb23198;hb=b78fb445555916e380b1661546c821df14098596;hpb=14b23d67a71cf2aa0b571553171a0894c73f11e6 diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index 78bbc2f..f8ad8ad 100755 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -4,8 +4,8 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: LU-7428 -ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 84" +# bug number for skipped test: LU-8972 +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! is_sles11() # LU-2181 @@ -78,8 +78,8 @@ if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then fi [ $(facet_fstype $SINGLEMDS) = "zfs" ] && -# bug number for skipped test: LU-4444 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 69" +# bug number for skipped test: + ALWAYS_EXCEPT="$ALWAYS_EXCEPT" init_logging @@ -1749,7 +1749,8 @@ t32_test() { ! $mdt2_is_available || poolname_list+=" t32fs-mdt2" for poolname in $poolname_list; do - $r "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $r "modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f -d $tmp $poolname" done @@ -2274,7 +2275,8 @@ t32_test() { if [[ $fstype == zfs ]]; then local poolname=t32fs-mdt1 - $r "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $r "modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f -d $tmp $poolname" fi @@ -2879,24 +2881,45 @@ test_41c() { $server_version -lt $(version_code 2.5.11) ]] || { skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; } + # ensure mds1 ost1 have been created even if running sub-test standalone cleanup + setup + cleanup || error "cleanup failed" + + # using directly mount command instead of start() function to avoid + # any side effect of // with others/externals tools/features + # ("zpool import", ...) + # MDT concurrent start LOAD_MODULES_REMOTE=true load_modules do_facet $SINGLEMDS "lsmod | grep -q libcfs" || error "MDT concurrent start: libcfs module not loaded" + local mds1dev=$(mdsdevname 1) + local mds1mnt=$(facet_mntpt mds1) + local mds1fstype=$(facet_fstype mds1) + local mds1opts=$MDS_MOUNT_OPTS + + if [ $mds1fstype == ldiskfs ] && + ! do_facet mds1 test -b $mds1dev; then + mds1opts=$(csa_add "$mds1opts" -o loop) + fi + if [[ $mds1fstype == zfs ]]; then + import_zpool mds1 || return ${PIPESTATUS[0]} + fi + #define OBD_FAIL_TGT_MOUNT_RACE 0x716 - do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x716" - start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & + do_facet mds1 "$LCTL set_param fail_loc=0x80000716" + + do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts & local pid=$! - start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & - do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x0" - local pid2=$! - wait $pid2 + + do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts local rc2=$? wait $pid local rc=$? + do_facet mds1 "$LCTL set_param fail_loc=0x0" if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then echo "1st MDT start succeed" echo "2nd MDT start failed with $rc2" @@ -2919,17 +2942,30 @@ test_41c() { do_rpc_nodes $oss_list "lsmod | grep -q libcfs" || error "OST concurrent start: libcfs module not loaded" + local ost1dev=$(ostdevname 1) + local ost1mnt=$(facet_mntpt ost1) + local ost1fstype=$(facet_fstype ost1) + local ost1opts=$OST_MOUNT_OPTS + + if [ $ost1fstype == ldiskfs ] && + ! do_facet ost1 test -b $ost1dev; then + ost1opts=$(csa_add "$ost1opts" -o loop) + fi + if [[ $ost1fstype == zfs ]]; then + import_zpool ost1 || return ${PIPESTATUS[0]} + fi + #define OBD_FAIL_TGT_MOUNT_RACE 0x716 - do_facet ost1 "$LCTL set_param fail_loc=0x716" - start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & + do_facet ost1 "$LCTL set_param fail_loc=0x80000716" + + do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts & pid=$! - start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & - do_facet ost1 "$LCTL set_param fail_loc=0x0" - pid2=$! - wait $pid2 + + do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts rc2=$? wait $pid rc=$? + do_facet ost1 "$LCTL set_param fail_loc=0x0" if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then echo "1st OST start succeed" echo "2nd OST start failed with $rc2" @@ -3009,7 +3045,7 @@ test_43a() { setup chmod ugo+x $DIR || error "chmod 0 failed" - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \ "$FSNAME.mdt.root_squash" \ "0:0" @@ -3017,7 +3053,7 @@ test_43a() { "$LCTL get_param -n llite.${FSNAME}*.root_squash" \ "0:0" || error "check llite root_squash failed!" - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ "$FSNAME.mdt.nosquash_nids" \ "NONE" @@ -3049,7 +3085,7 @@ test_43a() { # set root squash UID:GID to RUNAS_ID # root should be able to access only files owned by RUNAS_ID # - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \ "$FSNAME.mdt.root_squash" \ "$RUNAS_ID:$RUNAS_ID" @@ -3119,7 +3155,7 @@ test_43a() { local NIDLIST=$($LCTL list_nids all | tr '\n' ' ') NIDLIST="2@gni $NIDLIST 192.168.0.[2,10]@tcp" NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ') - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ "$FSNAME-MDTall.mdt.nosquash_nids" \ "$NIDLIST" @@ -3209,8 +3245,8 @@ test_45() { #17310 df -h $MOUNT & log "sleep 60 sec" sleep 60 -#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f - do_facet client "$LCTL set_param fail_loc=0x50f fail_val=0" + #define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f + do_facet client "$LCTL set_param fail_loc=0x8000050f" log "sleep 10 sec" sleep 10 manual_umount_client --force || error "manual_umount_client failed" @@ -3669,7 +3705,7 @@ test_50i() { # prepare MDT/OST, make OSC inactive for OST1 [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return - [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + [ $(facet_fstype mds2) == zfs ] && import_zpool mds2 do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" || error "tunefs MDT2 failed" start_mds || error "Unable to start MDT" @@ -4048,6 +4084,10 @@ test_55() { for i in 1023 2048 do + if ! combined_mgs_mds; then + stop_mgs || error "stopping MGS service failed" + format_mgs || error "formatting MGT failed" + fi add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \ $mdsvdev || exit 10 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \ @@ -4071,7 +4111,7 @@ test_55() { } run_test 55 "check lov_objid size" -test_56() { +test_56a() { local server_version=$(lustre_version_code $SINGLEMDS) local mds_journal_size_orig=$MDSJOURNALSIZE local n @@ -4109,7 +4149,104 @@ test_56() { MDSJOURNALSIZE=$mds_journal_size_orig reformat } -run_test 56 "check big OST indexes and out-of-index-order start" +run_test 56a "check big OST indexes and out-of-index-order start" + +cleanup_56b() { + trap 0 + + umount_client $MOUNT -f || error "unmount client failed" + stop mds1 + stop mds2 + stop mds3 + stopall + reformat +} + +test_56b() { + [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return + + trap cleanup_56b EXIT RETURN ERR + stopall + + if ! combined_mgs_mds ; then + format_mgs + start_mgs + fi + + add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) --index=0 --reformat \ + $(mdsdevname 1) $(mdsvdevname 1) + add mds2 $(mkfs_opts mds2 $(mdsdevname 2)) --index=1 --reformat \ + $(mdsdevname 2) $(mdsvdevname 2) + add mds3 $(mkfs_opts mds3 $(mdsdevname 3)) --index=1000 --reformat \ + $(mdsdevname 3) $(mdsvdevname 3) + format_ost 1 + format_ost 2 + + start_mdt 1 || error "MDT 1 (idx 0) start failed" + start_mdt 2 || error "MDT 2 (idx 1) start failed" + start_mdt 3 || error "MDT 3 (idx 1000) start failed" + start_ost || error "Unable to start first ost" + start_ost2 || error "Unable to start second ost" + + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param mdt.*.enable_remote_dir=1 \ + mdt.*.enable_remote_dir_gid=-1" + + mount_client $MOUNT || error "Unable to mount client" + + $LFS mkdir -c3 $MOUNT/$tdir || error "failed to make testdir" + + echo "This is test file 1!" > $MOUNT/$tdir/$tfile.1 || + error "failed to make test file 1" + echo "This is test file 2!" > $MOUNT/$tdir/$tfile.2 || + error "failed to make test file 2" + echo "This is test file 1000!" > $MOUNT/$tdir/$tfile.1000 || + error "failed to make test file 1000" + + rm -rf $MOUNT/$tdir || error "failed to remove testdir" + + $LFS mkdir -i1000 $MOUNT/$tdir.1000 || + error "create remote dir at idx 1000 failed" + + output=$($LFS df) + echo "=== START lfs df OUTPUT ===" + echo -e "$output" + echo "==== END lfs df OUTPUT ====" + + mdtcnt=$(echo -e "$output" | grep $FSNAME-MDT | wc -l) + ostcnt=$(echo -e "$output" | grep $FSNAME-OST | wc -l) + + echo "lfs df returned mdt count $mdtcnt and ost count $ostcnt" + [ $mdtcnt -eq 3 ] || error "lfs df returned wrong mdt count" + [ $ostcnt -eq 2 ] || error "lfs df returned wrong ost count" + + echo "This is test file 1!" > $MOUNT/$tdir.1000/$tfile.1 || + error "failed to make test file 1" + echo "This is test file 2!" > $MOUNT/$tdir.1000/$tfile.2 || + error "failed to make test file 2" + echo "This is test file 1000!" > $MOUNT/$tdir.1000/$tfile.1000 || + error "failed to make test file 1000" + rm -rf $MOUNT/$tdir.1000 || error "failed to remove remote_dir" + + output=$($LFS mdts) + echo "=== START lfs mdts OUTPUT ===" + echo -e "$output" + echo "==== END lfs mdts OUTPUT ====" + + echo -e "$output" | grep -v "MDTS:" | awk '{print $1}' | + sed 's/://g' > $TMP/mdts-actual.txt + sort $TMP/mdts-actual.txt -o $TMP/mdts-actual.txt + + echo -e "0\n1\n1000" > $TMP/mdts-expected.txt + + diff $TMP/mdts-expected.txt $TMP/mdts-actual.txt + result=$? + + rm $TMP/mdts-expected.txt $TMP/mdts-actual.txt + + [ $result -eq 0 ] || error "target_obd proc file is incorrect!" +} +run_test 56b "test target_obd correctness with nonconsecutive MDTs" test_57a() { # bug 22656 do_rpc_nodes $(facet_active_host ost1) load_modules_local @@ -4144,6 +4281,7 @@ count_osts() { } test_58() { # bug 22658 + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" setup_noconfig mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" createmany -o $DIR/$tdir/$tfile-%d 100 @@ -4164,7 +4302,7 @@ test_58() { # bug 22658 unmount_fstype $SINGLEMDS # restart MDS with missing llog files start_mds || error "unable to start MDS" - do_facet mds "$LCTL set_param fail_loc=0" + do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" reformat } run_test 58 "missing llog files must not prevent MDT from mounting" @@ -4247,6 +4385,7 @@ test_61() { # LU-80 done fi + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" setup_noconfig || error "setting up the filesystem failed" client_up || error "starting client failed" @@ -4327,7 +4466,7 @@ test_62() { { skip "Need MDS version at least 2.2.51"; return 0; } echo "disable journal for mds" - do_facet mds $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" + do_facet mds1 $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" start_mds && error "MDT start should fail" echo "disable journal for ost" do_facet ost1 $TUNE2FS -O ^has_journal $ostdev || error "tune2fs failed" @@ -4351,8 +4490,16 @@ test_63() { fi echo "$inode_slab ldisk inodes per page" - [ "$inode_slab" -ge "3" ] || - error "ldisk inode size is too big, $inode_slab objs per page" + if [ "$inode_slab" -ge "3" ] ; then + # If kmalloc-128 is also 1 per page - this is a debug kernel + # and so this is not an error. + local kmalloc128=$(do_facet $SINGLEMDS \ + "awk '/^(kmalloc|size)-128 / { print \\\$5 }' /proc/slabinfo") + # 32 128-byte chunks in 4k + [ "$kmalloc128" -eq "32" ] || + error "ldisk inode size is too big, $inode_slab objs per page" + fi + return } run_test 63 "Verify each page can at least hold 3 ldisk inodes" @@ -4364,7 +4511,7 @@ test_64() { mount_client $MOUNT || error "Unable to mount client" stop_ost2 || error "Unable to stop second ost" echo "$LFS df" - $LFS df --lazy || error "lfs df failed" + $LFS df --lazy umount_client $MOUNT -f || error “unmount $MOUNT failed” cleanup_nocli || error "cleanup_nocli failed with $?" #writeconf to remove all ost2 traces for subsequent tests @@ -6790,6 +6937,109 @@ test_97() { } run_test 97 "ldev returns correct ouput when querying based on role" +test_98() +{ + local mountopt + local temp=$MDS_MOUNT_OPTS + + setup + check_mount || error "mount failed" + mountopt="user_xattr" + for ((x = 1; x <= 400; x++)); do + mountopt="$mountopt,user_xattr" + done + remount_client $mountopt $MOUNT 2>&1 | grep "too long" || + error "Buffer overflow check failed" + cleanup || error "cleanup failed" +} +run_test 98 "Buffer-overflow check while parsing mount_opts" + +test_99() +{ + [[ $(facet_fstype ost1) != ldiskfs ]] && + { skip "Only applicable to ldiskfs-based OSTs" && return; } + [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] || + { skip "Need OST version at least 2.8.57" && return 0; } + + local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \ + --reformat $(ostdevname 1) $(ostvdevname 1)" + do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" && + skip "meta_bg already set" && return + + local opts=ost_opts + if [[ ${!opts} != *mkfsoptions* ]]; then + eval opts=\"${!opts} \ + --mkfsoptions='\\\"-O ^resize_inode,meta_bg\\\"'\" + else + local val=${!opts//--mkfsoptions=\\\"/ \ + --mkfsoptions=\\\"-O ^resize_inode,meta_bg } + eval opts='${val}' + fi + + echo "params: $opts" + + add ost1 $opts || error "add ost1 failed with new params" + + do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" || + error "meta_bg is not set" + + return 0 +} +run_test 99 "Adding meta_bg option" + +test_100() { + reformat + start_mds || error "MDS start failed" + start_ost || error "unable to start OST" + mount_client $MOUNT || error "client start failed" + check_mount || error "check_mount failed" + + # Desired output + # MGS: + # 0@lo + # lustre-MDT0000: + # 0@lo + # lustre-OST0000: + # 0@lo + do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0} + END {exit rc}' || error "lshowmount have no output MGS" + + do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/ + {rc=0} END {exit rc}' || error "lshowmount have no output MDT0" + + do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/ + {rc=0} END {exit rc}' || error "lshowmount have no output OST0" + + cleanup || error "cleanup failed with $?" +} +run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo" + +test_101() { + local createmany_oid + local dev=$FSNAME-OST0000-osc-MDT0000 + setup + + createmany -o $DIR1/$tfile-%d 50000 & + createmany_oid=$! + # MDT->OST reconnection causes MDT<->OST last_id synchornisation + # via osp_precreate_cleanup_orphans. + for ((i = 0; i < 100; i++)); do + for ((k = 0; k < 10; k++)); do + do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \ + "$LCTL --device $dev activate" + done + + ls -asl $MOUNT | grep '???' && + (kill -9 $createmany_oid &>/dev/null; \ + error "File hasn't object on OST") + + kill -s 0 $createmany_oid || break + done + wait $createmany_oid + cleanup +} +run_test 101 "Race MDT->OST reconnection with create" + if ! combined_mgs_mds ; then stop mgs fi