X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=4bf271cdd2193475d341e7f50b6cd977ba0360e9;hb=aa3300dddd42bc0a1cc6eeeeeebf850c6349a7b6;hp=3b4060170f12c6e5e74608ce7fa99bbf1f91987f;hpb=28497055cdac33293122b73e62c00555a32a5f88;p=fs%2Flustre-release.git diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index 3b40601..4bf271cd 100755 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -4,8 +4,8 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: LU-7428 -ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 84" +# bug number for skipped test: LU-8972 +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! is_sles11() # LU-2181 @@ -78,8 +78,8 @@ if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then fi [ $(facet_fstype $SINGLEMDS) = "zfs" ] && -# bug number for skipped test: LU-4444 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 69" +# bug number for skipped test: + ALWAYS_EXCEPT="$ALWAYS_EXCEPT" init_logging @@ -455,17 +455,16 @@ run_test 5c "cleanup after failed mount (bug 2712) (should return errs)" test_5d() { grep " $MOUNT " /etc/mtab && - error false "unexpected entry in mtab before mount" && return 10 + error "unexpected entry in mtab before mount" start_ost || error "OST start failed" start_mds || error "MDS start failed" - stop_ost || error "Unable to stop OST1" + stop_ost -f || error "Unable to stop OST1" mount_client $MOUNT || error "mount_client $MOUNT failed" umount_client $MOUNT -f || error "umount_client $MOUNT failed" cleanup_nocli || error "cleanup_nocli failed with $?" - grep " $MOUNT " /etc/mtab && + ! grep " $MOUNT " /etc/mtab || error "$MOUNT entry in mtab after unmount" - pass } run_test 5d "mount with ost down" @@ -523,6 +522,17 @@ test_5f() { } run_test 5f "mds down, cleanup after failed mount (bug 2712)" +test_5g() { + modprobe lustre + [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] && + { skip "automount of debugfs missing before 2.9.53" && return 0; } + umount /sys/kernel/debug + $LCTL get_param -n devices | egrep -v "error" && \ + error "lctl can't access debugfs data" + grep " debugfs " /etc/mtab || error "debugfs failed to remount" +} +run_test 5g "handle missing debugfs" + test_6() { setup manual_umount_client @@ -702,7 +712,7 @@ run_test 19a "start/stop MDS without OSTs" test_19b() { start_ost || error "Unable to start OST1" - stop_ost || error "Unable to stop OST1" + stop_ost -f || error "Unable to stop OST1" } run_test 19b "start/stop OSTs without MDS" @@ -1531,6 +1541,7 @@ t32_test_cleanup() { destroy_zpool $facet $poolname done fi + combined_mgs_mds || start_mgs || rc=$? return $rc } @@ -1679,7 +1690,6 @@ t32_test() { local mdt2_is_available=false local node=$(facet_active_host $SINGLEMDS) local r="do_node $node" - local node2=$(facet_active_host mds2) local tmp=$TMP/t32 local img_commit local img_kernel @@ -1703,6 +1713,7 @@ t32_test() { local stripe_count local dir + combined_mgs_mds || stop_mgs || error "Unable to stop MGS" trap 'trap - RETURN; t32_test_cleanup' RETURN load_modules @@ -1749,7 +1760,8 @@ t32_test() { ! $mdt2_is_available || poolname_list+=" t32fs-mdt2" for poolname in $poolname_list; do - $r "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $r "modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f -d $tmp $poolname" done @@ -1852,13 +1864,13 @@ t32_test() { mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" fi - add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ + add $SINGLEMDS $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || { error_noexit "Mkfs new MDT failed" return 1 } - [[ $(facet_fstype mds1) != zfs ]] || import_zpool fs2mds + [[ $(facet_fstype mds1) != zfs ]] || import_zpool mds1 $r $TUNEFS --dryrun $fs2mdsdev || { error_noexit "tunefs.lustre before mounting the MDT" @@ -2274,7 +2286,8 @@ t32_test() { if [[ $fstype == zfs ]]; then local poolname=t32fs-mdt1 - $r "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $r "modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f -d $tmp $poolname" fi @@ -2810,7 +2823,8 @@ test_41a() { #bug 14134 return fi - local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) + combined_mgs_mds || + { skip "needs combined MGT and MDT device" && return 0; } start_mdt 1 -o nosvc -n if [ $MDSCOUNT -ge 2 ]; then @@ -2869,6 +2883,7 @@ run_test 41b "mount mds with --nosvc and --nomgs on first mount" test_41c() { local server_version=$(lustre_version_code $SINGLEMDS) + local oss_list=$(comma_list $(osts_nodes)) [[ $server_version -ge $(version_code 2.6.52) ]] || [[ $server_version -ge $(version_code 2.5.26) && @@ -2877,19 +2892,45 @@ test_41c() { $server_version -lt $(version_code 2.5.11) ]] || { skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; } + # ensure mds1 ost1 have been created even if running sub-test standalone cleanup + setup + cleanup || error "cleanup failed" + + # using directly mount command instead of start() function to avoid + # any side effect of // with others/externals tools/features + # ("zpool import", ...) + # MDT concurrent start + + LOAD_MODULES_REMOTE=true load_modules + do_facet $SINGLEMDS "lsmod | grep -q libcfs" || + error "MDT concurrent start: libcfs module not loaded" + + local mds1dev=$(mdsdevname 1) + local mds1mnt=$(facet_mntpt mds1) + local mds1fstype=$(facet_fstype mds1) + local mds1opts=$MDS_MOUNT_OPTS + + if [ $mds1fstype == ldiskfs ] && + ! do_facet mds1 test -b $mds1dev; then + mds1opts=$(csa_add "$mds1opts" -o loop) + fi + if [[ $mds1fstype == zfs ]]; then + import_zpool mds1 || return ${PIPESTATUS[0]} + fi + #define OBD_FAIL_TGT_MOUNT_RACE 0x716 - do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x716" - start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & + do_facet mds1 "$LCTL set_param fail_loc=0x80000716" + + do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts & local pid=$! - start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & - do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x0" - local pid2=$! - wait $pid2 + + do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts local rc2=$? wait $pid local rc=$? + do_facet mds1 "$LCTL set_param fail_loc=0x0" if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then echo "1st MDT start succeed" echo "2nd MDT start failed with $rc2" @@ -2909,17 +2950,33 @@ test_41c() { # OST concurrent start + do_rpc_nodes $oss_list "lsmod | grep -q libcfs" || + error "OST concurrent start: libcfs module not loaded" + + local ost1dev=$(ostdevname 1) + local ost1mnt=$(facet_mntpt ost1) + local ost1fstype=$(facet_fstype ost1) + local ost1opts=$OST_MOUNT_OPTS + + if [ $ost1fstype == ldiskfs ] && + ! do_facet ost1 test -b $ost1dev; then + ost1opts=$(csa_add "$ost1opts" -o loop) + fi + if [[ $ost1fstype == zfs ]]; then + import_zpool ost1 || return ${PIPESTATUS[0]} + fi + #define OBD_FAIL_TGT_MOUNT_RACE 0x716 - do_facet ost1 "$LCTL set_param fail_loc=0x716" - start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & + do_facet ost1 "$LCTL set_param fail_loc=0x80000716" + + do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts & pid=$! - start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & - do_facet ost1 "$LCTL set_param fail_loc=0x0" - pid2=$! - wait $pid2 + + do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts rc2=$? wait $pid rc=$? + do_facet ost1 "$LCTL set_param fail_loc=0x0" if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then echo "1st OST start succeed" echo "2nd OST start failed with $rc2" @@ -2999,7 +3056,7 @@ test_43a() { setup chmod ugo+x $DIR || error "chmod 0 failed" - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \ "$FSNAME.mdt.root_squash" \ "0:0" @@ -3007,7 +3064,7 @@ test_43a() { "$LCTL get_param -n llite.${FSNAME}*.root_squash" \ "0:0" || error "check llite root_squash failed!" - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ "$FSNAME.mdt.nosquash_nids" \ "NONE" @@ -3039,7 +3096,7 @@ test_43a() { # set root squash UID:GID to RUNAS_ID # root should be able to access only files owned by RUNAS_ID # - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \ "$FSNAME.mdt.root_squash" \ "$RUNAS_ID:$RUNAS_ID" @@ -3109,7 +3166,7 @@ test_43a() { local NIDLIST=$($LCTL list_nids all | tr '\n' ' ') NIDLIST="2@gni $NIDLIST 192.168.0.[2,10]@tcp" NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ') - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ "$FSNAME-MDTall.mdt.nosquash_nids" \ "$NIDLIST" @@ -3199,8 +3256,8 @@ test_45() { #17310 df -h $MOUNT & log "sleep 60 sec" sleep 60 - #define OBD_FAIL_PTLRPC_LONG_UNLINK 0x50f - do_facet client "$LCTL set_param fail_loc=0x50f" + #define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f + do_facet client "$LCTL set_param fail_loc=0x8000050f" log "sleep 10 sec" sleep 10 manual_umount_client --force || error "manual_umount_client failed" @@ -3659,7 +3716,7 @@ test_50i() { # prepare MDT/OST, make OSC inactive for OST1 [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return - [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + [ $(facet_fstype mds2) == zfs ] && import_zpool mds2 do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" || error "tunefs MDT2 failed" start_mds || error "Unable to start MDT" @@ -4038,6 +4095,10 @@ test_55() { for i in 1023 2048 do + if ! combined_mgs_mds; then + stop_mgs || error "stopping MGS service failed" + format_mgs || error "formatting MGT failed" + fi add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \ $mdsvdev || exit 10 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \ @@ -4061,7 +4122,7 @@ test_55() { } run_test 55 "check lov_objid size" -test_56() { +test_56a() { local server_version=$(lustre_version_code $SINGLEMDS) local mds_journal_size_orig=$MDSJOURNALSIZE local n @@ -4069,7 +4130,7 @@ test_56() { MDSJOURNALSIZE=16 for num in $(seq 1 $MDSCOUNT); do - reformat_mdt $num + format_mdt $num done add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=10000 --reformat \ $(ostdevname 1) $(ostvdevname 1) @@ -4099,7 +4160,104 @@ test_56() { MDSJOURNALSIZE=$mds_journal_size_orig reformat } -run_test 56 "check big OST indexes and out-of-index-order start" +run_test 56a "check big OST indexes and out-of-index-order start" + +cleanup_56b() { + trap 0 + + umount_client $MOUNT -f || error "unmount client failed" + stop mds1 + stop mds2 + stop mds3 + stopall + reformat +} + +test_56b() { + [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return + + trap cleanup_56b EXIT RETURN ERR + stopall + + if ! combined_mgs_mds ; then + format_mgs + start_mgs + fi + + add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) --index=0 --reformat \ + $(mdsdevname 1) $(mdsvdevname 1) + add mds2 $(mkfs_opts mds2 $(mdsdevname 2)) --index=1 --reformat \ + $(mdsdevname 2) $(mdsvdevname 2) + add mds3 $(mkfs_opts mds3 $(mdsdevname 3)) --index=1000 --reformat \ + $(mdsdevname 3) $(mdsvdevname 3) + format_ost 1 + format_ost 2 + + start_mdt 1 || error "MDT 1 (idx 0) start failed" + start_mdt 2 || error "MDT 2 (idx 1) start failed" + start_mdt 3 || error "MDT 3 (idx 1000) start failed" + start_ost || error "Unable to start first ost" + start_ost2 || error "Unable to start second ost" + + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param mdt.*.enable_remote_dir=1 \ + mdt.*.enable_remote_dir_gid=-1" + + mount_client $MOUNT || error "Unable to mount client" + + $LFS mkdir -c3 $MOUNT/$tdir || error "failed to make testdir" + + echo "This is test file 1!" > $MOUNT/$tdir/$tfile.1 || + error "failed to make test file 1" + echo "This is test file 2!" > $MOUNT/$tdir/$tfile.2 || + error "failed to make test file 2" + echo "This is test file 1000!" > $MOUNT/$tdir/$tfile.1000 || + error "failed to make test file 1000" + + rm -rf $MOUNT/$tdir || error "failed to remove testdir" + + $LFS mkdir -i1000 $MOUNT/$tdir.1000 || + error "create remote dir at idx 1000 failed" + + output=$($LFS df) + echo "=== START lfs df OUTPUT ===" + echo -e "$output" + echo "==== END lfs df OUTPUT ====" + + mdtcnt=$(echo -e "$output" | grep $FSNAME-MDT | wc -l) + ostcnt=$(echo -e "$output" | grep $FSNAME-OST | wc -l) + + echo "lfs df returned mdt count $mdtcnt and ost count $ostcnt" + [ $mdtcnt -eq 3 ] || error "lfs df returned wrong mdt count" + [ $ostcnt -eq 2 ] || error "lfs df returned wrong ost count" + + echo "This is test file 1!" > $MOUNT/$tdir.1000/$tfile.1 || + error "failed to make test file 1" + echo "This is test file 2!" > $MOUNT/$tdir.1000/$tfile.2 || + error "failed to make test file 2" + echo "This is test file 1000!" > $MOUNT/$tdir.1000/$tfile.1000 || + error "failed to make test file 1000" + rm -rf $MOUNT/$tdir.1000 || error "failed to remove remote_dir" + + output=$($LFS mdts) + echo "=== START lfs mdts OUTPUT ===" + echo -e "$output" + echo "==== END lfs mdts OUTPUT ====" + + echo -e "$output" | grep -v "MDTS:" | awk '{print $1}' | + sed 's/://g' > $TMP/mdts-actual.txt + sort $TMP/mdts-actual.txt -o $TMP/mdts-actual.txt + + echo -e "0\n1\n1000" > $TMP/mdts-expected.txt + + diff $TMP/mdts-expected.txt $TMP/mdts-actual.txt + result=$? + + rm $TMP/mdts-expected.txt $TMP/mdts-actual.txt + + [ $result -eq 0 ] || error "target_obd proc file is incorrect!" +} +run_test 56b "test target_obd correctness with nonconsecutive MDTs" test_57a() { # bug 22656 do_rpc_nodes $(facet_active_host ost1) load_modules_local @@ -4134,6 +4292,7 @@ count_osts() { } test_58() { # bug 22658 + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" setup_noconfig mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" createmany -o $DIR/$tdir/$tfile-%d 100 @@ -4154,7 +4313,7 @@ test_58() { # bug 22658 unmount_fstype $SINGLEMDS # restart MDS with missing llog files start_mds || error "unable to start MDS" - do_facet mds "$LCTL set_param fail_loc=0" + do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" reformat } run_test 58 "missing llog files must not prevent MDT from mounting" @@ -4237,6 +4396,7 @@ test_61() { # LU-80 done fi + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" setup_noconfig || error "setting up the filesystem failed" client_up || error "starting client failed" @@ -4317,7 +4477,7 @@ test_62() { { skip "Need MDS version at least 2.2.51"; return 0; } echo "disable journal for mds" - do_facet mds $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" + do_facet mds1 $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" start_mds && error "MDT start should fail" echo "disable journal for ost" do_facet ost1 $TUNE2FS -O ^has_journal $ostdev || error "tune2fs failed" @@ -4333,19 +4493,27 @@ test_63() { return fi - local inode_slab=$(do_facet $SINGLEMDS \ - "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo") + do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ldiskfs + local inode_slab=$(do_facet $SINGLEMDS "cat /proc/slabinfo" | + awk '/ldiskfs_inode_cache/ { print $5 / $6 }') if [ -z "$inode_slab" ]; then skip "ldiskfs module has not been loaded" return fi - echo "$inode_slab ldisk inodes per page" - [ "$inode_slab" -ge "3" ] || - error "ldisk inode size is too big, $inode_slab objs per page" - return + echo "$inode_slab ldiskfs inodes per page" + [ "${inode_slab%.*}" -ge "3" ] && return 0 + + # If kmalloc-128 is also 1 per page - this is a debug kernel + # and so this is not an error. + local kmalloc128=$(do_facet $SINGLEMDS "cat /proc/slabinfo" | + awk '/^(kmalloc|size)-128 / { print $5 / $6 }') + # 32 128-byte chunks in 4k + [ "${kmalloc128%.*}" -lt "32" ] || + error "ldiskfs inode too big, only $inode_slab objs/page, " \ + "kmalloc128 = $kmalloc128 objs/page" } -run_test 63 "Verify each page can at least hold 3 ldisk inodes" +run_test 63 "Verify each page can at least hold 3 ldiskfs inodes" test_64() { start_mds || error "unable to start MDS" @@ -4354,7 +4522,7 @@ test_64() { mount_client $MOUNT || error "Unable to mount client" stop_ost2 || error "Unable to stop second ost" echo "$LFS df" - $LFS df --lazy || error "lfs df failed" + $LFS df --lazy umount_client $MOUNT -f || error “unmount $MOUNT failed” cleanup_nocli || error "cleanup_nocli failed with $?" #writeconf to remove all ost2 traces for subsequent tests @@ -4966,23 +5134,30 @@ test_72() { #LU-2634 local ostdev=$(ostdevname 1) local cmd="$E2FSCK -fnvd $mdsdev" local fn=3 + local add_options [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && skip "ldiskfs only test" && return + if combined_mgs_mds; then + add_options='--reformat' + else + add_options='--reformat --replace' + fi + #tune MDT with "-O extents" for num in $(seq $MDSCOUNT); do add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ - --reformat $(mdsdevname $num) $(mdsvdevname $num) || + $add_options $(mdsdevname $num) $(mdsvdevname $num) || error "add mds $num failed" do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" || error "$TUNE2FS failed on mds${num}" done - add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev || + add ost1 $(mkfs_opts ost1 $ostdev) $add_options $ostdev || error "add $ostdev failed" - start_mgsmds || error "start mds failed" + start_mds || error "start mds failed" start_ost || error "start ost failed" mount_client $MOUNT || error "mount client failed" @@ -5117,12 +5292,32 @@ test_76b() { # LU-4783 } run_test 76b "verify params log setup correctly" +test_76c() { + [[ $(lustre_version_code mgs) -ge $(version_code 2.8.54) ]] || + { skip "Need MDS version at least 2.4.52" && return 0; } + setupall + local MASK_PARAM="mdd.*.changelog_mask" + echo "Change changelog_mask" + do_facet mgs $LCTL set_param -P $MASK_PARAM=-CLOSE || + error "Can't change changlog_mask" + wait_update $(facet_host mds) "$LCTL get_param -n $MASK_PARAM | + grep 'CLOSE'" "" + + echo "Check the value is stored after mds remount" + stop_mds || error "Failed to stop MDS" + start_mds || error "Failed to start MDS" + local CHANGELOG_MASK=$(do_facet mgs $LCTL get_param -n $MASK_PARAM) + echo $CHANGELOG_MASK | grep CLOSE > /dev/null && + error "changelog_mask is not changed" + + stopall +} +run_test 76c "verify changelog_mask is applied with set_param -P" + test_77() { # LU-3445 local server_version=$(lustre_version_code $SINGLEMDS) - - [[ $server_version -ge $(version_code 2.2.60) ]] && - [[ $server_version -le $(version_code 2.4.0) ]] && - skip "Need MDS version < 2.2.60 or > 2.4.0" && return + [[ $server_version -ge $(version_code 2.8.55) ]] || + { skip "Need MDS version 2.8.55+ "; return; } if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && @@ -5143,7 +5338,7 @@ test_77() { # LU-3445 error "start fs2mds failed" mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,) - [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid" + mgsnid="$mgsnid,$mgsnid:$mgsnid" add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \ --failnode=$failnid --fsname=$fsname \ @@ -5164,8 +5359,12 @@ test_78() { skip "only applicable to ldiskfs-based MDTs and OSTs" && return # reformat the Lustre filesystem with a smaller size + local saved_MDSCOUNT=$MDSCOUNT local saved_MDSSIZE=$MDSSIZE + local saved_OSTCOUNT=$OSTCOUNT local saved_OSTSIZE=$OSTSIZE + MDSCOUNT=1 + OSTCOUNT=1 MDSSIZE=$((MDSSIZE - 20000)) OSTSIZE=$((OSTSIZE - 20000)) reformat || error "(1) reformat Lustre filesystem failed" @@ -5180,11 +5379,26 @@ test_78() { local i local file local num_files=100 + mkdir $MOUNT/$tdir || error "(3) mkdir $MOUNT/$tdir failed" + $LFS df; $LFS df -i for i in $(seq $num_files); do file=$MOUNT/$tdir/$tfile-$i - dd if=/dev/urandom of=$file count=1 bs=1M || + dd if=/dev/urandom of=$file count=1 bs=1M || { + $LCTL get_param osc.*.cur*grant* + $LFS df; $LFS df -i; + # stop creating files if there is no more space + if [ ! -e $file ]; then + num_files=$((i - 1)) + break + fi + + $LFS getstripe -v $file + local ost_idx=$(LFS getstripe -i $file) + do_facet ost$((ost_idx + 1)) \ + $LCTL get_param obdfilter.*.*grant* error "(4) create $file failed" + } done # unmount the Lustre filesystem @@ -5296,6 +5510,9 @@ test_78() { # unmount and reformat the Lustre filesystem cleanup || error "(12) cleanup Lustre filesystem failed" combined_mgs_mds || stop_mgs || error "(13) stop mgs failed" + + MDSCOUNT=$saved_MDSCOUNT + OSTCOUNT=$saved_OSTCOUNT reformat || error "(14) reformat Lustre filesystem failed" } run_test 78 "run resize2fs on MDT and OST filesystems" @@ -5443,19 +5660,6 @@ test_81() { # LU-4665 } run_test 81 "sparse OST indexing" -# Wait OSTs to be active on both client and MDT side. -wait_osts_up() { - local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd | - awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'" - wait_update $HOSTNAME "eval $cmd" $OSTCOUNT || - error "wait_update OSTs up on client failed" - - cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u | - awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'" - wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT || - error "wait_update OSTs up on MDT failed" -} - # Here we exercise the stripe placement functionality on a file system that # has formatted the OST with a random index. With the file system the following # functionality is tested: @@ -5640,7 +5844,8 @@ test_82b() { # LU-4665 wait_update $HOSTNAME "$LCTL get_param -n lov.$FSNAME-*.pools.$TESTNAME| sort -u | tr '\n' ' ' " "$ost_targets_uuid" || error "wait_update $ost_pool failed" - pool_list $ost_pool || error "list OST pool $ost_pool failed" + [[ -z $(list_pool $ost_pool) ]] && + error "list OST pool $ost_pool failed" # If [--pool|-p ] is set with [--ost-list|-o ], # then the OSTs must be the members of the pool. @@ -5795,9 +6000,21 @@ test_85() { } run_test 85 "osd_ost init: fail ea_fid_set" +cleanup_86() { + trap 0 + + # ost1 has already registered to the MGS before the reformat. + # So after reformatting it with option "-G", it could not be + # mounted to the MGS. Cleanup the system for subsequent tests. + reformat_and_config +} + test_86() { + local server_version=$(lustre_version_code $SINGLEMDS) [ "$(facet_fstype ost1)" = "zfs" ] && skip "LU-6442: no such mkfs params for ZFS OSTs" && return + [[ $server_version -ge $(version_code 2.7.56) ]] || + { skip "Need server version newer than 2.7.55"; return 0; } local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \ --reformat $(ostdevname 1) $(ostvdevname 1)" @@ -5818,6 +6035,9 @@ test_86() { echo "params: $opts" + trap cleanup_86 EXIT ERR + + stopall add ost1 $opts || error "add ost1 failed with new params" local FOUNDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" | @@ -5825,7 +6045,8 @@ test_86() { [[ $FOUNDSIZE == $NEWSIZE ]] || error "Flex block group size: $FOUNDSIZE, expected: $NEWSIZE" - return 0 + + cleanup_86 } run_test 86 "Replacing mkfs.lustre -G option" @@ -6297,11 +6518,22 @@ run_test 91 "evict-by-nid support" generate_ldev_conf() { # generate an ldev.conf file local ldevconfpath=$1 + local fstype= + local fsldevformat="" touch $ldevconfpath - printf "%s\t-\t%s-MGS0000\t%s\n" \ + + fstype=$(facet_fstype mgs) + if [ "$fstype" == "zfs" ]; then + fsldevformat="$fstype:" + else + fsldevformat="" + fi + + printf "%s\t-\t%s-MGS0000\t%s%s\n" \ $mgs_HOST \ $FSNAME \ - $(mgsdevname) >> $ldevconfpath + $fsldevformat \ + $(mgsdevname) > $ldevconfpath local mdsfo_host=$mdsfailover_HOST; if [ -z "$mdsfo_host" ]; then @@ -6309,11 +6541,19 @@ generate_ldev_conf() { fi for num in $(seq $MDSCOUNT); do - printf "%s\t%s\t%s-MDT%04d\t%s\n" \ + fstype=$(facet_fstype mds$num) + if [ "$fstype" == "zfs" ]; then + fsldevformat="$fstype:" + else + fsldevformat="" + fi + + printf "%s\t%s\t%s-MDT%04d\t%s%s\n" \ $mds_HOST \ $mdsfo_host \ $FSNAME \ $num \ + $fsldevformat \ $(mdsdevname $num) >> $ldevconfpath done @@ -6323,13 +6563,26 @@ generate_ldev_conf() { fi for num in $(seq $OSTCOUNT); do - printf "%s\t%s\t%s-OST%04d\t%s\n" \ + fstype=$(facet_fstype ost$num) + if [ "$fstype" == "zfs" ]; then + fsldevformat="$fstype:" + else + fsldevformat="" + fi + + printf "%s\t%s\t%s-OST%04d\t%s%s\n" \ $ost_HOST \ $ostfo_host \ $FSNAME \ $num \ + $fsldevformat \ $(ostdevname $num) >> $ldevconfpath done + + echo "----- $ldevconfpath -----" + cat $ldevconfpath + echo "--- END $ldevconfpath ---" + } generate_nids() { @@ -6337,11 +6590,37 @@ generate_nids() { # looks like we only have the MGS nid available to us # so just echo that to a file local nidspath=$1 - touch $nidspath - echo -e "${mgs_HOST}\t${MGSNID}" >> $nidspath + echo -e "${mgs_HOST}\t${MGSNID}" > $nidspath + + echo "----- $nidspath -----" + cat $nidspath + echo "--- END $nidspath ---" +} + +compare_ldev_output() { + ldev_output=$1 + expected_output=$2 + + sort $expected_output -o $expected_output + sort $ldev_output -o $ldev_output + + echo "-- START OF LDEV OUTPUT --" + cat $ldev_output + echo "--- END OF LDEV OUTPUT ---" + + echo "-- START OF EXPECTED OUTPUT --" + cat $expected_output + echo "--- END OF EXPECTED OUTPUT ---" + + diff $expected_output $ldev_output + return $? } test_92() { + if [ -z "$LDEV" ]; then + error "ldev is missing!" + fi + local LDEVCONFPATH=$TMP/ldev.conf local NIDSPATH=$TMP/nids @@ -6350,32 +6629,11 @@ test_92() { generate_ldev_conf $LDEVCONFPATH generate_nids $NIDSPATH - echo "----- ldev.conf -----" - cat $LDEVCONFPATH - echo "--- END ldev.conf ---" - - echo "----- /etc/nids -----" - cat $NIDSPATH - echo "--- END /etc/nids ---" - - # ldev can be in our build tree and if we aren't in a - # build tree, use 'which' to try and find it - local LDEV=$LUSTRE/scripts/ldev - [ ! -f "$LDEV" ] && local LDEV=$(which ldev 2> /dev/null) - - echo "ldev path is $LDEV" - - if [ ! -f "$LDEV" ]; then - rm $LDEVCONFPATH $NIDSPATH - error "failed to find ldev!" - fi - # echo the mgs nid and compare it to environment variable MGSNID # also, ldev.conf and nids is a server side thing, use the OSS # hostname local output - output=$(perl $LDEV -c $LDEVCONFPATH -H \ - $ost_HOST -n $NIDSPATH echo %m) + output=$($LDEV -c $LDEVCONFPATH -H $ost_HOST -n $NIDSPATH echo %m) echo "-- START OF LDEV OUTPUT --" echo -e "$output" @@ -6399,6 +6657,557 @@ test_92() { } run_test 92 "ldev returns MGS NID correctly in command substitution" +test_93() { + [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return + + reformat + #start mgs or mgs/mdt0 + if ! combined_mgs_mds ; then + start_mgs + start_mdt 1 + else + start_mdt 1 + fi + + start_ost || error "OST0 start fail" + + #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e + do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e" + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num & + done + + mount_client $MOUNT || error "mount client fails" + wait_osc_import_state mds ost FULL + wait_osc_import_state client ost FULL + check_mount || error "check_mount failed" + + cleanup || error "cleanup failed with $?" +} +run_test 93 "register mulitple MDT at the same time" + +test_94() { + if [ -z "$LDEV" ]; then + error "ldev is missing!" + fi + + local LDEVCONFPATH=$TMP/ldev.conf + local NIDSPATH=$TMP/nids + + generate_ldev_conf $LDEVCONFPATH + generate_nids $NIDSPATH + + local LDEV_OUTPUT=$TMP/ldev-output.txt + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME > $LDEV_OUTPUT + + # ldev failed, error + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT + error "ldev failed to execute!" + fi + + # expected output + local EXPECTED_OUTPUT=$TMP/ldev-expected.txt + + printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT + + for num in $(seq $MDSCOUNT); do + printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT + done + + for num in $(seq $OSTCOUNT); do + printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT + done + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct hostlist!" + fi + + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT +} +run_test 94 "ldev outputs correct labels for file system name query" + +test_95() { + if [ -z "$LDEV" ]; then + error "ldev is missing!" + fi + + local LDEVCONFPATH=$TMP/ldev.conf + local NIDSPATH=$TMP/nids + + generate_ldev_conf $LDEVCONFPATH + generate_nids $NIDSPATH + + # SUCCESS CASES + # file sys filter + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME &>/dev/null + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -F failed!" + fi + + # local filter + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -l &>/dev/null + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -l failed!" + fi + + # foreign filter + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f &>/dev/null + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -f failed!" + fi + + # all filter + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a &>/dev/null + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -a failed!" + fi + + # FAILURE CASES + # all & file sys + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -F $FSNAME &>/dev/null + if [ $? -eq 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -a and -F incorrectly succeeded" + fi + + # all & foreign + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -f &>/dev/null + if [ $? -eq 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -a and -f incorrectly succeeded" + fi + + # all & local + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -l &>/dev/null + if [ $? -eq 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -a and -l incorrectly succeeded" + fi + + # foreign & local + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f -l &>/dev/null + if [ $? -eq 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -f and -l incorrectly succeeded" + fi + + # file sys & local + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -l &>/dev/null + if [ $? -eq 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -F and -l incorrectly succeeded" + fi + + # file sys & foreign + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -f &>/dev/null + if [ $? -eq 0 ]; then + rm $LDEVCONFPATH $NIDSPATH + error "ldev label filtering w/ -F and -f incorrectly succeeded" + fi + + rm $LDEVCONFPATH $NIDSPATH +} +run_test 95 "ldev should only allow one label filter" + +test_96() { + if [ -z "$LDEV" ]; then + error "ldev is missing!" + fi + + local LDEVCONFPATH=$TMP/ldev.conf + local NIDSPATH=$TMP/nids + + generate_ldev_conf $LDEVCONFPATH + generate_nids $NIDSPATH + + local LDEV_OUTPUT=$TMP/ldev-output.txt + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -H $mgs_HOST \ + echo %H-%b | \ + awk '{print $2}' > $LDEV_OUTPUT + + # ldev failed, error + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT + error "ldev failed to execute!" + fi + + # expected output + local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt + + echo "$mgs_HOST-$(facet_fstype mgs)" > $EXPECTED_OUTPUT + + if [ "$mgs_HOST" == "$mds_HOST" ]; then + for num in $(seq $MDSCOUNT); do + echo "$mds_HOST-$(facet_fstype mds$num)" \ + >> $EXPECTED_OUTPUT + done + fi + + if [ "$mgs_HOST" == "$ost_HOST" ]; then + for num in $(seq $OSTCOUNT); do + echo "$ost_HOST-$(facet_fstype ost$num)" \ + >> $EXPECTED_OUTPUT + done + fi + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output!" + fi + + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT +} +run_test 96 "ldev returns hostname and backend fs correctly in command sub" + +test_97() { + if [ -z "$LDEV" ]; then + error "ldev is missing!" + fi + + local LDEVCONFPATH=$TMP/ldev.conf + local NIDSPATH=$TMP/nids + + generate_ldev_conf $LDEVCONFPATH + generate_nids $NIDSPATH + + local LDEV_OUTPUT=$TMP/ldev-output.txt + local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt + + echo -e "\nMDT role" + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT + error "ldev failed to execute for mdt role!" + fi + + for num in $(seq $MDSCOUNT); do + printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT + done + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output for mdt role!" + fi + + echo -e "\nOST role" + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT + error "ldev failed to execute for ost role!" + fi + + rm $EXPECTED_OUTPUT + for num in $(seq $OSTCOUNT); do + printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT + done + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output for ost role!" + fi + + echo -e "\nMGS role" + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT + error "ldev failed to execute for mgs role!" + fi + + printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output for mgs role!" + fi + + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT +} +run_test 97 "ldev returns correct ouput when querying based on role" + +test_98() +{ + local mountopt + local temp=$MDS_MOUNT_OPTS + + setup + check_mount || error "mount failed" + mountopt="user_xattr" + for ((x = 1; x <= 400; x++)); do + mountopt="$mountopt,user_xattr" + done + remount_client $mountopt $MOUNT 2>&1 | grep "too long" || + error "Buffer overflow check failed" + cleanup || error "cleanup failed" +} +run_test 98 "Buffer-overflow check while parsing mount_opts" + +test_99() +{ + [[ $(facet_fstype ost1) != ldiskfs ]] && + { skip "Only applicable to ldiskfs-based OSTs" && return; } + [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] || + { skip "Need OST version at least 2.8.57" && return 0; } + + local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \ + --reformat $(ostdevname 1) $(ostvdevname 1)" + do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" && + skip "meta_bg already set" && return + + local opts=ost_opts + if [[ ${!opts} != *mkfsoptions* ]]; then + eval opts=\"${!opts} \ + --mkfsoptions='\\\"-O ^resize_inode,meta_bg\\\"'\" + else + local val=${!opts//--mkfsoptions=\\\"/ \ + --mkfsoptions=\\\"-O ^resize_inode,meta_bg } + eval opts='${val}' + fi + + echo "params: $opts" + + add ost1 $opts || error "add ost1 failed with new params" + + do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" || + error "meta_bg is not set" + + return 0 +} +run_test 99 "Adding meta_bg option" + +test_100() { + reformat + start_mds || error "MDS start failed" + start_ost || error "unable to start OST" + mount_client $MOUNT || error "client start failed" + check_mount || error "check_mount failed" + + # Desired output + # MGS: + # 0@lo + # lustre-MDT0000: + # 0@lo + # lustre-OST0000: + # 0@lo + do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0} + END {exit rc}' || error "lshowmount have no output MGS" + + do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/ + {rc=0} END {exit rc}' || error "lshowmount have no output MDT0" + + do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/ + {rc=0} END {exit rc}' || error "lshowmount have no output OST0" + + cleanup || error "cleanup failed with $?" +} +run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo" + +test_101() { + local createmany_oid + local dev=$FSNAME-OST0000-osc-MDT0000 + setup + + createmany -o $DIR1/$tfile-%d 50000 & + createmany_oid=$! + # MDT->OST reconnection causes MDT<->OST last_id synchornisation + # via osp_precreate_cleanup_orphans. + for ((i = 0; i < 100; i++)); do + for ((k = 0; k < 10; k++)); do + do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \ + "$LCTL --device $dev activate" + done + + ls -asl $MOUNT | grep '???' && + (kill -9 $createmany_oid &>/dev/null; \ + error "File hasn't object on OST") + + kill -s 0 $createmany_oid || break + done + wait $createmany_oid + cleanup +} +run_test 101 "Race MDT->OST reconnection with create" + +test_102() { + cleanup || error "cleanup failed with $?" + + local mds1dev=$(mdsdevname 1) + local mds1mnt=$(facet_mntpt mds1) + local mds1fstype=$(facet_fstype mds1) + local mds1opts=$MDS_MOUNT_OPTS + + if [ $mds1fstype == ldiskfs ] && + ! do_facet mds1 test -b $mds1dev; then + mds1opts=$(csa_add "$mds1opts" -o loop) + fi + if [[ $mds1fstype == zfs ]]; then + import_zpool mds1 || return ${PIPESTATUS[0]} + fi + + # unload all and only load libcfs to allow fail_loc setting + do_facet mds1 lustre_rmmod || error "unable to unload modules" + do_facet mds1 modprobe libcfs || error "libcfs not loaded" + do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded" + + #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a + do_facet mds1 "$LCTL set_param fail_loc=0x8000060a" + + do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts && + error "mdt start must fail" + do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load" + + do_facet mds1 "$LCTL set_param fail_loc=0x0" + + do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts || + error "mdt start must not fail" + + cleanup || error "cleanup failed with $?" +} +run_test 102 "obdclass module cleanup upon error" + +test_renamefs() { + local newname=$1 + + echo "rename $FSNAME to $newname" + + if [ ! combined_mgs_mds ]; then + local facet=$(mgsdevname) + + do_facet mgs \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(7) Fail to rename MGS" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool mgs $newname-mgs + fi + fi + + for num in $(seq $MDSCOUNT); do + local facet=$(mdsdevname $num) + + do_facet mds${num} \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(8) Fail to rename MDT $num" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool mds${num} $newname-mdt${num} + fi + done + + for num in $(seq $OSTCOUNT); do + local facet=$(ostdevname $num) + + do_facet ost${num} \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(9) Fail to rename OST $num" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool ost${num} $newname-ost${num} + fi + done +} + +test_103_set_pool() { + local pname=$1 + local ost_x=$2 + + do_facet mgs $LCTL pool_add $FSNAME.$pname ${FSNAME}-$ost_x || + error "Fail to add $ost_x to $FSNAME.$pname" + wait_update $HOSTNAME \ + "lctl get_param -n lov.$FSNAME-clilov-*.pools.$pname | + grep $ost_x" "$FSNAME-${ost_x}_UUID" || + error "$ost_x is NOT in pool $FSNAME.$pname" +} + +test_103_check_pool() { + local save_fsname=$1 + local errno=$2 + + stat $DIR/$tdir/test-framework.sh || + error "($errno) Fail to stat" + do_facet mgs $LCTL pool_list $FSNAME.pool1 || + error "($errno) Fail to list $FSNAME.pool1" + do_facet mgs $LCTL pool_list $FSNAME.$save_fsname || + error "($errno) Fail to list $FSNAME.$save_fsname" + do_facet mgs $LCTL pool_list $FSNAME.$save_fsname | + grep ${FSNAME}-OST0000 || + error "($errno) List $FSNAME.$save_fsname is invalid" + + local pname=$($LFS getstripe --pool $DIR/$tdir/d0) + [ "$pname" = "$save_fsname" ] || + error "($errno) Unexpected pool name $pname" +} + +test_103() { + check_mount_and_prep + rm -rf $DIR/$tdir + mkdir $DIR/$tdir || error "(1) Fail to mkdir $DIR/$tdir" + cp $LUSTRE/tests/test-framework.sh $DIR/$tdir || + error "(2) Fail to copy test-framework.sh" + + do_facet mgs $LCTL pool_new $FSNAME.pool1 || + error "(3) Fail to create $FSNAME.pool1" + # name the pool name as the fsname + do_facet mgs $LCTL pool_new $FSNAME.$FSNAME || + error "(4) Fail to create $FSNAME.$FSNAME" + + test_103_set_pool $FSNAME OST0000 + + $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 || + error "(6) Fail to setstripe on $DIR/$tdir/d0" + + KEEP_ZPOOL=true + stopall + + test_renamefs mylustre + + local save_fsname=$FSNAME + FSNAME="mylustre" + setupall + + test_103_check_pool $save_fsname 7 + + if [ $OSTCOUNT -ge 2 ]; then + test_103_set_pool $save_fsname OST0001 + fi + + $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 || + error "(16) Fail to setstripe on $DIR/$tdir/f0" + + stopall + + test_renamefs tfs + + FSNAME="tfs" + setupall + + test_103_check_pool $save_fsname 17 + + stopall + + test_renamefs $save_fsname + + FSNAME=$save_fsname + setupall + KEEP_ZPOOL=false +} +run_test 103 "rename filesystem name" + if ! combined_mgs_mds ; then stop mgs fi