X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=a251e2345a54fa286b4a92526765c8c4581be368;hp=95b7892955768581ab355e95853b58f1864db7bc;hb=75eb91aeabcd167fe586e5e0f707cee5e8966133;hpb=b0d1be42118274d608ed171855de9c00a30c0d22 diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh old mode 100755 new mode 100644 index 95b7892..a251e23 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -4,8 +4,8 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: -ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT" +# bug number for skipped test: LU-8972 +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! is_sles11() # LU-2181 @@ -455,17 +455,16 @@ run_test 5c "cleanup after failed mount (bug 2712) (should return errs)" test_5d() { grep " $MOUNT " /etc/mtab && - error false "unexpected entry in mtab before mount" && return 10 + error "unexpected entry in mtab before mount" start_ost || error "OST start failed" start_mds || error "MDS start failed" - stop_ost || error "Unable to stop OST1" + stop_ost -f || error "Unable to stop OST1" mount_client $MOUNT || error "mount_client $MOUNT failed" umount_client $MOUNT -f || error "umount_client $MOUNT failed" cleanup_nocli || error "cleanup_nocli failed with $?" - grep " $MOUNT " /etc/mtab && + ! grep " $MOUNT " /etc/mtab || error "$MOUNT entry in mtab after unmount" - pass } run_test 5d "mount with ost down" @@ -523,6 +522,17 @@ test_5f() { } run_test 5f "mds down, cleanup after failed mount (bug 2712)" +test_5g() { + modprobe lustre + [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] && + { skip "automount of debugfs missing before 2.9.53" && return 0; } + umount /sys/kernel/debug + $LCTL get_param -n devices | egrep -v "error" && \ + error "lctl can't access debugfs data" + grep " debugfs " /etc/mtab || error "debugfs failed to remount" +} +run_test 5g "handle missing debugfs" + test_6() { setup manual_umount_client @@ -702,7 +712,7 @@ run_test 19a "start/stop MDS without OSTs" test_19b() { start_ost || error "Unable to start OST1" - stop_ost || error "Unable to stop OST1" + stop_ost -f || error "Unable to stop OST1" } run_test 19b "start/stop OSTs without MDS" @@ -1197,27 +1207,8 @@ test_29() { fi # check MDTs too - for num in $(seq $MDSCOUNT); do - local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001) - local MPROC="osc.$mdtosc.active" - local MAX=30 - local WAIT=0 - while [ 1 ]; do - sleep 5 - RESULT=$(do_facet mds${num} "$LCTL get_param -n $MPROC") - [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC" - if [ $RESULT -eq $DEAC ]; then - echo -n "MDT deactivated also after" - echo "$WAIT sec (got $RESULT)" - break - fi - WAIT=$((WAIT + 5)) - if [ $WAIT -eq $MAX ]; then - error "MDT active: wanted $DEAC got $RESULT" - fi - echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated" - done - done + wait_osp_active ost ${FSNAME}-OST0001 1 0 + # test new client starts deactivated umount_client $MOUNT || error "umount_client $MOUNT failed" mount_client $MOUNT || error "mount_client $MOUNT failed" @@ -1376,7 +1367,7 @@ test_32newtarball() { chown $T32_QID.$T32_QID $tmp/src/t32_qf_old # format ost with comma-separated NIDs to verify LU-4460 - local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + local failnid="$(h2nettype 1.2.3.4),$(h2nettype 4.3.2.1)" MGSNID="$MGSNID,$MGSNID" OSTOPT="--failnode=$failnid" formatall setupall @@ -1492,7 +1483,8 @@ t32_check() { local IMGTYPE=$(facet_fstype $SINGLEMDS) - tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\') + tarballs=$($r find $RLUSTRE/tests -maxdepth 1 \ + -name \'disk*-$IMGTYPE.tar.bz2\') if [ -z "$tarballs" ]; then skip "No applicable tarballs found" @@ -1531,6 +1523,7 @@ t32_test_cleanup() { destroy_zpool $facet $poolname done fi + combined_mgs_mds || start_mgs || rc=$? return $rc } @@ -1595,6 +1588,19 @@ t32_verify_quota() { local qval local cmd + # LU-2435: if the underlying zfs doesn't support userobj_accounting, + # lustre will estimate the object count usage. This fails quota + # verification in 32b. The object quota usage should be accurate after + # zfs-0.7.0 is released. + [ $fstype == "zfs" ] && { + local zfs_version=$(do_node $node cat /sys/module/zfs/version) + + [ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && { + echo "Skip quota verify for zfs: $zfs_version" + return 0 + } + } + $LFS quota -u $T32_QID -v $mnt qval=$($LFS quota -v -u $T32_QID $mnt | @@ -1679,7 +1685,6 @@ t32_test() { local mdt2_is_available=false local node=$(facet_active_host $SINGLEMDS) local r="do_node $node" - local node2=$(facet_active_host mds2) local tmp=$TMP/t32 local img_commit local img_kernel @@ -1689,7 +1694,7 @@ t32_test() { local img_blimit local img_ilimit local fsname=t32fs - local nid=$($r $LCTL list_nids | head -1) + local nid local mopts local uuid local nrpcs_orig @@ -1703,9 +1708,12 @@ t32_test() { local stripe_count local dir + combined_mgs_mds || stop_mgs || error "Unable to stop MGS" trap 'trap - RETURN; t32_test_cleanup' RETURN load_modules + nid=$($r $LCTL list_nids | head -1) + mkdir -p $tmp/mnt/lustre || error "mkdir $tmp/mnt/lustre failed" $r mkdir -p $tmp/mnt/{mdt,mdt1,ost} $r tar xjvf $tarball -S -C $tmp || { @@ -1754,6 +1762,10 @@ t32_test() { $ZPOOL import -f -d $tmp $poolname" done + # upgrade zpool to latest supported features, including + # dnode quota accounting in 0.7.0 + $r "$ZPOOL upgrade -a" + mdt_dev=t32fs-mdt1/mdt1 ost_dev=t32fs-ost1/ost1 ! $mdt2_is_available || mdt2_dev=t32fs-mdt2/mdt2 @@ -1853,13 +1865,13 @@ t32_test() { mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" fi - add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ + add $SINGLEMDS $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || { error_noexit "Mkfs new MDT failed" return 1 } - [[ $(facet_fstype mds1) != zfs ]] || import_zpool fs2mds + [[ $(facet_fstype mds1) != zfs ]] || import_zpool mds1 $r $TUNEFS --dryrun $fs2mdsdev || { error_noexit "tunefs.lustre before mounting the MDT" @@ -1907,7 +1919,8 @@ t32_test() { mopts="loop,$mopts" fi fi - $r $MOUNT_CMD -o $mopts $ost_dev $tmp/mnt/ost || { + + $r $MOUNT_CMD -onomgs -o$mopts $ost_dev $tmp/mnt/ost || { error_noexit "Mounting the OST" return 1 } @@ -2276,8 +2289,12 @@ t32_test() { if [[ $fstype == zfs ]]; then local poolname=t32fs-mdt1 $r "modprobe zfs; - $ZPOOL list -H $poolname >/dev/null 2>&1 || + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f -d $tmp $poolname" + + # upgrade zpool to latest supported features, + # including dnode quota accounting in 0.7.0 + $r "$ZPOOL upgrade $poolname" fi # mount a second time to make sure we didnt leave upgrade flag on @@ -2464,9 +2481,9 @@ test_35a() { # bug 12459 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) do_facet mgs "$LCTL conf_param \ - ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || + ${device}.failover.node=$(h2nettype $FAKENID)" || error "Setting ${device}.failover.node=\ - $(h2$NETTYPE $FAKENID) failed." + $(h2nettype $FAKENID) failed." log "Wait for RECONNECT_INTERVAL seconds (10s)" sleep 10 @@ -2522,9 +2539,9 @@ test_35b() { # bug 18674 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) do_facet mgs "$LCTL conf_param \ - ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || + ${device}.failover.node=$(h2nettype $FAKENID)" || error "Set ${device}.failover.node=\ - $(h2$NETTYPE $FAKENID) failed" + $(h2nettype $FAKENID) failed" local at_max_saved=0 # adaptive timeouts may prevent seeing the issue @@ -3208,7 +3225,7 @@ test_43b() { # LU-5690 local client_ip=$(host_nids_address $HOSTNAME $NETTYPE) local host=${client_ip//*./} local net=${client_ip/%$host/} - local nosquash_nids=$(h2$NETTYPE $net[$host,$host,$host]) + local nosquash_nids=$(h2nettype $net[$host,$host,$host]) add $fs2mgs $(mkfs_opts mgs $fs2mgsdev) --fsname=$fsname \ --param mdt.root_squash=$RUNAS_ID:$RUNAS_ID \ @@ -3366,8 +3383,9 @@ cleanup_48() { reformat_and_config } -test_48() { # bug 17636 - reformat +test_48() { # bz-17636 LU-7473 + local count + setup_noconfig check_mount || error "check_mount failed" @@ -3378,14 +3396,36 @@ test_48() { # bug 17636 $GETSTRIPE $MOUNT/widestripe || error "$GETSTRIPE $MOUNT/widestripe failed" - trap cleanup_48 EXIT ERR + # In the future, we may introduce more EAs, such as selinux, enlarged + # LOV EA, and so on. These EA will use some EA space that is shared by + # ACL entries. So here we only check some reasonable ACL entries count, + # instead of the max number that is calculated from the max_ea_size. + if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.8.57) ]; + then + count=28 # hard coded of RPC protocol + elif [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + count=4000 # max_num 4091 max_ea_size = 32768 + elif ! large_xattr_enabled; then + count=450 # max_num 497 max_ea_size = 4012 + else + count=4500 # max_num 8187 max_ea_size = 1048492 + # not create too much (>5000) to save test time + fi - # fill acl buffer for avoid expand lsm to them - getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do - setfacl -m $acl $MOUNT/widestripe + echo "It is expected to hold at least $count ACL entries" + trap cleanup_48 EXIT ERR + for ((i = 0; i < $count; i++)) do + setfacl -m u:$((i + 100)):rw $MOUNT/widestripe || + error "Fail to setfacl for $MOUNT/widestripe at $i" done + cancel_lru_locks mdc stat $MOUNT/widestripe || error "stat $MOUNT/widestripe failed" + local r_count=$(getfacl $MOUNT/widestripe | grep "user:" | wc -l) + count=$((count + 1)) # for the entry "user::rw-" + + [ $count -eq $r_count ] || + error "Expected ACL entries $count, but got $r_count" cleanup_48 } @@ -3705,6 +3745,7 @@ test_50i() { # prepare MDT/OST, make OSC inactive for OST1 [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return + load_modules [ $(facet_fstype mds2) == zfs ] && import_zpool mds2 do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" || error "tunefs MDT2 failed" @@ -3740,9 +3781,18 @@ test_50i() { "$TEST" "${FSNAME}-MDT0001.mdc.active" 0 || error "Unable to deactivate MDT2" + wait_osp_active mds ${FSNAME}-MDT0001 1 0 + $LFS mkdir -i1 $DIR/$tdir/2 && error "mkdir $DIR/$tdir/2 succeeds after deactive MDT" + $LFS mkdir -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir || + error "mkdir $DIR/$tdir/striped_dir fails after deactive MDT2" + + local stripe_count=$($LFS getdirstripe -c $DIR/$tdir/striped_dir) + [ $stripe_count -eq $((MDSCOUNT - 1)) ] || + error "wrong $stripe_count != $((MDSCOUNT -1)) for striped_dir" + # cleanup umount_client $MOUNT || error "Unable to umount client" stop_mds @@ -4098,7 +4148,8 @@ test_55() { sync echo checking size of lov_objid for ost index $i - LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}') + LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | + grep ^User | awk -F 'Size: ' '{print $2}') if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE" else @@ -4118,9 +4169,7 @@ test_56a() { MDSJOURNALSIZE=16 - for num in $(seq 1 $MDSCOUNT); do - format_mdt $num - done + formatall add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=10000 --reformat \ $(ostdevname 1) $(ostvdevname 1) add ost2 $(mkfs_opts ost2 $(ostdevname 2)) --index=1000 --reformat \ @@ -4482,27 +4531,27 @@ test_63() { return fi - local inode_slab=$(do_facet $SINGLEMDS \ - "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo") + do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ldiskfs + local inode_slab=$(do_facet $SINGLEMDS "cat /proc/slabinfo" | + awk '/ldiskfs_inode_cache/ { print $5 / $6 }') if [ -z "$inode_slab" ]; then skip "ldiskfs module has not been loaded" return fi - echo "$inode_slab ldisk inodes per page" - if [ "$inode_slab" -ge "3" ] ; then - # If kmalloc-128 is also 1 per page - this is a debug kernel - # and so this is not an error. - local kmalloc128=$(do_facet $SINGLEMDS \ - "awk '/^(kmalloc|size)-128 / { print \\\$5 }' /proc/slabinfo") - # 32 128-byte chunks in 4k - [ "$kmalloc128" -eq "32" ] || - error "ldisk inode size is too big, $inode_slab objs per page" - fi + echo "$inode_slab ldiskfs inodes per page" + [ "${inode_slab%.*}" -ge "3" ] && return 0 - return + # If kmalloc-128 is also 1 per page - this is a debug kernel + # and so this is not an error. + local kmalloc128=$(do_facet $SINGLEMDS "cat /proc/slabinfo" | + awk '/^(kmalloc|size)-128 / { print $5 / $6 }') + # 32 128-byte chunks in 4k + [ "${kmalloc128%.*}" -lt "32" ] || + error "ldiskfs inode too big, only $inode_slab objs/page, " \ + "kmalloc128 = $kmalloc128 objs/page" } -run_test 63 "Verify each page can at least hold 3 ldisk inodes" +run_test 63 "Verify each page can at least hold 3 ldiskfs inodes" test_64() { start_mds || error "unable to start MDS" @@ -4698,6 +4747,10 @@ test_68() { umount_client $MOUNT || error "umount client failed" + if ! combined_mgs_mds; then + start_mgs || error "start mgs failed" + fi + start_mdt 1 || error "MDT start failed" start_ost || error "Unable to start OST1" @@ -5123,23 +5176,30 @@ test_72() { #LU-2634 local ostdev=$(ostdevname 1) local cmd="$E2FSCK -fnvd $mdsdev" local fn=3 + local add_options [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && skip "ldiskfs only test" && return + if combined_mgs_mds; then + add_options='--reformat' + else + add_options='--reformat --replace' + fi + #tune MDT with "-O extents" for num in $(seq $MDSCOUNT); do add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ - --reformat $(mdsdevname $num) $(mdsvdevname $num) || + $add_options $(mdsdevname $num) $(mdsvdevname $num) || error "add mds $num failed" do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" || error "$TUNE2FS failed on mds${num}" done - add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev || + add ost1 $(mkfs_opts ost1 $ostdev) $add_options $ostdev || error "add $ostdev failed" - start_mgsmds || error "start mds failed" + start_mds || error "start mds failed" start_ost || error "start ost failed" mount_client $MOUNT || error "mount client failed" @@ -5200,6 +5260,10 @@ test_75() { # LU-2374 add mds1 $opts_mds || error "add mds1 failed for new params" add ost1 $opts_ost || error "add ost1 failed for new params" + if ! combined_mgs_mds; then + stop_mgs || error "stop mgs failed" + fi + reformat return 0 } run_test 75 "The order of --index should be irrelevant" @@ -5207,6 +5271,10 @@ run_test 75 "The order of --index should be irrelevant" test_76a() { [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] || { skip "Need MDS version at least 2.4.52" && return 0; } + + if ! combined_mgs_mds; then + start_mgs || error "start mgs failed" + fi setup local MDMB_PARAM="osc.*.max_dirty_mb" echo "Change MGS params" @@ -5296,6 +5364,34 @@ test_76c() { } run_test 76c "verify changelog_mask is applied with set_param -P" +test_76d() { #LU-9399 + setupall + + local xattr_cache="llite.*.xattr_cache" + local cmd="$LCTL get_param -n $xattr_cache | head -1" + local new=$((($(eval $cmd) + 1) % 2)) + + echo "lctl set_param -P llite.*.xattr_cache=$new" + do_facet mgs $LCTL set_param -P $xattr_cache=$new || + error "Can't change xattr_cache" + wait_update $HOSTNAME "$cmd" "$new" + + echo "Check $xattr_cache on client $MOUNT" + umount_client $MOUNT || error "umount $MOUNT failed" + mount_client $MOUNT || error "mount $MOUNT failed" + [ $(eval $cmd) -eq $new ] || + error "$xattr_cache != $new on client $MOUNT" + + echo "Check $xattr_cache on the new client $MOUNT2" + mount_client $MOUNT2 || error "mount $MOUNT2 failed" + [ $(eval $cmd) -eq $new ] || + error "$xattr_cache != $new on client $MOUNT2" + umount_client $MOUNT2 || error "umount $MOUNT2 failed" + + stopall +} +run_test 76d "verify llite.*.xattr_cache can be set by 'set_param -P' correctly" + test_77() { # LU-3445 local server_version=$(lustre_version_code $SINGLEMDS) [[ $server_version -ge $(version_code 2.8.55) ]] || @@ -5312,7 +5408,9 @@ test_77() { # LU-3445 local fs2ostvdev=$(ostvdevname 1_2) local fsname=test1234 local mgsnid - local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + local failnid="$(h2nettype 1.2.3.4),$(h2nettype 4.3.2.1)" + + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \ --reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed" @@ -5320,9 +5418,9 @@ test_77() { # LU-3445 error "start fs2mds failed" mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,) - mgsnid="$mgsnid,$mgsnid:$mgsnid" + mgsnid="0.0.0.0@tcp,$mgsnid,$mgsnid:$mgsnid" - add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \ + add fs2ost --mgsnode=$mgsnid $(mkfs_opts ost1 $fs2ostdev) \ --failnode=$failnid --fsname=$fsname \ --reformat $fs2ostdev $fs2ostvdev || error "add fs2ost failed" @@ -5642,19 +5740,6 @@ test_81() { # LU-4665 } run_test 81 "sparse OST indexing" -# Wait OSTs to be active on both client and MDT side. -wait_osts_up() { - local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd | - awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'" - wait_update $HOSTNAME "eval $cmd" $OSTCOUNT || - error "wait_update OSTs up on client failed" - - cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u | - awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'" - wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT || - error "wait_update OSTs up on MDT failed" -} - # Here we exercise the stripe placement functionality on a file system that # has formatted the OST with a random index. With the file system the following # functionality is tested: @@ -5839,7 +5924,8 @@ test_82b() { # LU-4665 wait_update $HOSTNAME "$LCTL get_param -n lov.$FSNAME-*.pools.$TESTNAME| sort -u | tr '\n' ' ' " "$ost_targets_uuid" || error "wait_update $ost_pool failed" - pool_list $ost_pool || error "list OST pool $ost_pool failed" + wait_update_facet $SINGLEMDS "$LCTL pool_list $ost_pool | wc -l" 4 || + error "wait_update pool_list $ost_pool failed" # If [--pool|-p ] is set with [--ost-list|-o ], # then the OSTs must be the members of the pool. @@ -5869,8 +5955,8 @@ run_test 82b "specify OSTs for file with --pool and --ost-list options" test_83() { [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] || { skip "Need OST version at least 2.6.91" && return 0; } - if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + if [ $(facet_fstype ost1) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based OSTs" return fi @@ -5886,7 +5972,7 @@ test_83() { # Mount the OST as an ldiskfs filesystem. log "mount the OST $dev as a $fstype filesystem" add ost1 $(mkfs_opts ost1 $dev) $FSTYPE_OPT \ - --reformat $dev $dev > /dev/null || + --reformat $dev > /dev/null || error "format ost1 error" if ! test -b $dev; then @@ -6045,11 +6131,11 @@ test_86() { run_test 86 "Replacing mkfs.lustre -G option" test_87() { #LU-6544 - [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.7.56) ]] || - { skip "Need MDS version at least 2.7.56" && return; } + [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] || + { skip "Need MDS version at least 2.9.51" && return; } [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] && { skip "Only applicable to ldiskfs-based MDTs" && return; } - [[ $OSTCOUNT -gt 69 ]] && + [[ $OSTCOUNT -gt 59 ]] && { skip "Ignore wide striping situation" && return; } local mdsdev=$(mdsdevname 1) @@ -6057,23 +6143,22 @@ test_87() { #LU-6544 local file=$DIR/$tfile local mntpt=$(facet_mntpt $SINGLEMDS) local used_xattr_blk=0 - local inode_size=${1:-512} + local inode_size=${1:-1024} local left_size=0 local xtest="trusted.test" local value local orig local i + local stripe_cnt=$(($OSTCOUNT + 2)) - #Please see LU-6544 for MDT inode size calculation - if [ $OSTCOUNT -gt 26 ]; then + #Please see ldiskfs_make_lustre() for MDT inode size calculation + if [ $stripe_cnt -gt 16 ]; then inode_size=2048 - elif [ $OSTCOUNT -gt 5 ]; then - inode_size=1024 fi left_size=$(expr $inode_size - \ 156 - \ 32 - \ - 32 - $OSTCOUNT \* 24 - 16 - 3 - \ + 32 - 40 \* 3 - 32 \* 3 - $stripe_cnt \* 24 - 16 - 3 - \ 24 - 16 - 3 - \ 24 - 18 - $(expr length $tfile) - 16 - 4) if [ $left_size -le 0 ]; then @@ -6087,7 +6172,7 @@ test_87() { #LU-6544 unload_modules reformat - add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$OSTCOUNT \ + add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$stripe_cnt \ --reformat $mdsdev $mdsvdev || error "add mds1 failed" start_mdt 1 > /dev/null || error "start mdt1 failed" for i in $(seq $OSTCOUNT); do @@ -6098,9 +6183,10 @@ test_87() { #LU-6544 check_mount || error "check client $MOUNT failed" #set xattr - $SETSTRIPE -c -1 $file || error "$SETSTRIPE -c -1 $file failed" - $GETSTRIPE $file || error "$GETSTRIPE $file failed" - i=$($GETSTRIPE -c $file) + $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file || + error "Create file with 3 components failed" + $TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed" + i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed" if [ $i -ne $OSTCOUNT ]; then left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24) echo -n "Since only $i out $OSTCOUNT OSTs are used, " @@ -6152,6 +6238,81 @@ test_88() { } run_test 88 "check the default mount options can be overridden" +test_89() { # LU-7131 + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.54) ]] || + { skip "Need MDT version at least 2.9.54" && return 0; } + + local key=failover.node + local val1=192.0.2.254@tcp0 # Reserved IPs, see RFC 5735 + local val2=192.0.2.255@tcp0 + local mdsdev=$(mdsdevname 1) + local params + + stopall + + [ $(facet_fstype mds1) == zfs ] && import_zpool mds1 + # Check that parameters are added correctly + echo "tunefs --param $key=$val1" + do_facet mds "$TUNEFS --param $key=$val1 $mdsdev >/dev/null" || + error "tunefs --param $key=$val1 failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n' | grep -c $key=$val1) = "1" ] || + error "on-disk parameter not added correctly via tunefs" + + # Check that parameters replace existing instances when added + echo "tunefs --param $key=$val2" + do_facet mds "$TUNEFS --param $key=$val2 $mdsdev >/dev/null" || + error "tunefs --param $key=$val2 failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "1" ] || + error "on-disk parameter not replaced via tunefs" + [ $(echo $params | tr ' ' '\n' | grep -c $key=$val2) = "1" ] || + error "on-disk parameter not replaced correctly via tunefs" + + # Check that a parameter is erased properly + echo "tunefs --erase-param $key" + do_facet mds "$TUNEFS --erase-param $key $mdsdev >/dev/null" || + error "tunefs --erase-param $key failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "0" ] || + error "on-disk parameter not erased correctly via tunefs" + + # Check that all the parameters are erased + echo "tunefs --erase-params" + do_facet mds "$TUNEFS --erase-params $mdsdev >/dev/null" || + error "tunefs --erase-params failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ -z $params ] || + error "all on-disk parameters not erased correctly via tunefs" + + # Check the order of options --erase-params and --param + echo "tunefs --param $key=$val1 --erase-params" + do_facet mds \ + "$TUNEFS --param $key=$val1 --erase-params $mdsdev >/dev/null"|| + error "tunefs --param $key=$val1 --erase-params failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n') == "$key=$val1" ] || + error "on-disk param not added correctly with --erase-params" + + reformat +} +run_test 89 "check tunefs --param and --erase-param{s} options" + # $1 test directory # $2 (optional) value of max_mod_rpcs_in_flight to set check_max_mod_rpcs_in_flight() { @@ -7014,6 +7175,303 @@ test_100() { } run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo" +test_101() { + local createmany_oid + local dev=$FSNAME-OST0000-osc-MDT0000 + setup + + createmany -o $DIR1/$tfile-%d 50000 & + createmany_oid=$! + # MDT->OST reconnection causes MDT<->OST last_id synchornisation + # via osp_precreate_cleanup_orphans. + for ((i = 0; i < 100; i++)); do + for ((k = 0; k < 10; k++)); do + do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \ + "$LCTL --device $dev activate" + done + + ls -asl $MOUNT | grep '???' && + (kill -9 $createmany_oid &>/dev/null; \ + error "File hasn't object on OST") + + kill -s 0 $createmany_oid || break + done + wait $createmany_oid + cleanup +} +run_test 101 "Race MDT->OST reconnection with create" + +test_102() { + cleanup || error "cleanup failed with $?" + + local mds1dev=$(mdsdevname 1) + local mds1mnt=$(facet_mntpt mds1) + local mds1fstype=$(facet_fstype mds1) + local mds1opts=$MDS_MOUNT_OPTS + + if [ $mds1fstype == ldiskfs ] && + ! do_facet mds1 test -b $mds1dev; then + mds1opts=$(csa_add "$mds1opts" -o loop) + fi + if [[ $mds1fstype == zfs ]]; then + import_zpool mds1 || return ${PIPESTATUS[0]} + fi + + # unload all and only load libcfs to allow fail_loc setting + do_facet mds1 lustre_rmmod || error "unable to unload modules" + do_facet mds1 modprobe libcfs || error "libcfs not loaded" + do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded" + + #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a + do_facet mds1 "$LCTL set_param fail_loc=0x8000060a" + + do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts && + error "mdt start must fail" + do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load" + + do_facet mds1 "$LCTL set_param fail_loc=0x0" + + do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts || + error "mdt start must not fail" + + cleanup || error "cleanup failed with $?" +} +run_test 102 "obdclass module cleanup upon error" + +test_renamefs() { + local newname=$1 + + echo "rename $FSNAME to $newname" + + if [ ! combined_mgs_mds ]; then + local facet=$(mgsdevname) + + do_facet mgs \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(7) Fail to rename MGS" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool mgs $newname-mgs + fi + fi + + for num in $(seq $MDSCOUNT); do + local facet=$(mdsdevname $num) + + do_facet mds${num} \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(8) Fail to rename MDT $num" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool mds${num} $newname-mdt${num} + fi + done + + for num in $(seq $OSTCOUNT); do + local facet=$(ostdevname $num) + + do_facet ost${num} \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(9) Fail to rename OST $num" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool ost${num} $newname-ost${num} + fi + done +} + +test_103_set_pool() { + local pname=$1 + local ost_x=$2 + + do_facet mgs $LCTL pool_add $FSNAME.$pname ${FSNAME}-$ost_x || + error "Fail to add $ost_x to $FSNAME.$pname" + wait_update $HOSTNAME \ + "lctl get_param -n lov.$FSNAME-clilov-*.pools.$pname | + grep $ost_x" "$FSNAME-${ost_x}_UUID" || + error "$ost_x is NOT in pool $FSNAME.$pname" +} + +test_103_check_pool() { + local save_fsname=$1 + local errno=$2 + + stat $DIR/$tdir/test-framework.sh || + error "($errno) Fail to stat" + do_facet mgs $LCTL pool_list $FSNAME.pool1 || + error "($errno) Fail to list $FSNAME.pool1" + do_facet mgs $LCTL pool_list $FSNAME.$save_fsname || + error "($errno) Fail to list $FSNAME.$save_fsname" + do_facet mgs $LCTL pool_list $FSNAME.$save_fsname | + grep ${FSNAME}-OST0000 || + error "($errno) List $FSNAME.$save_fsname is invalid" + + local pname=$($LFS getstripe --pool $DIR/$tdir/d0) + [ "$pname" = "$save_fsname" ] || + error "($errno) Unexpected pool name $pname" +} + +test_103() { + check_mount_and_prep + rm -rf $DIR/$tdir + mkdir $DIR/$tdir || error "(1) Fail to mkdir $DIR/$tdir" + cp $LUSTRE/tests/test-framework.sh $DIR/$tdir || + error "(2) Fail to copy test-framework.sh" + + do_facet mgs $LCTL pool_new $FSNAME.pool1 || + error "(3) Fail to create $FSNAME.pool1" + # name the pool name as the fsname + do_facet mgs $LCTL pool_new $FSNAME.$FSNAME || + error "(4) Fail to create $FSNAME.$FSNAME" + + test_103_set_pool $FSNAME OST0000 + + $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 || + error "(6) Fail to setstripe on $DIR/$tdir/d0" + + KEEP_ZPOOL=true + stopall + + test_renamefs mylustre + + local save_fsname=$FSNAME + FSNAME="mylustre" + setupall + + test_103_check_pool $save_fsname 7 + + if [ $OSTCOUNT -ge 2 ]; then + test_103_set_pool $save_fsname OST0001 + fi + + $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 || + error "(16) Fail to setstripe on $DIR/$tdir/f0" + + stopall + + test_renamefs tfs + + FSNAME="tfs" + setupall + + test_103_check_pool $save_fsname 17 + + stopall + + test_renamefs $save_fsname + + FSNAME=$save_fsname + setupall + KEEP_ZPOOL=false +} +run_test 103 "rename filesystem name" + +test_104() { # LU-6952 + local mds_mountopts=$MDS_MOUNT_OPTS + local ost_mountopts=$OST_MOUNT_OPTS + local mds_mountfsopts=$MDS_MOUNT_FS_OPTS + local lctl_ver=$(do_facet $SINGLEMDS $LCTL --version | + awk '{ print $2 }') + + [[ $(version_code $lctl_ver) -lt $(version_code 2.9.55) ]] && + { skip "this test needs utils above 2.9.55" && return 0; } + + # specify "acl" in mount options used by mkfs.lustre + if [ -z "$MDS_MOUNT_FS_OPTS" ]; then + MDS_MOUNT_FS_OPTS="acl,user_xattr" + else + + MDS_MOUNT_FS_OPTS="${MDS_MOUNT_FS_OPTS},acl,user_xattr" + fi + + echo "mountfsopt: $MDS_MOUNT_FS_OPTS" + + #reformat/remount the MDT to apply the MDT_MOUNT_FS_OPT options + formatall + if [ -z "$MDS_MOUNT_OPTS" ]; then + MDS_MOUNT_OPTS="-o noacl" + else + MDS_MOUNT_OPTS="${MDS_MOUNT_OPTS},noacl" + fi + + for num in $(seq $MDSCOUNT); do + start mds$num $(mdsdevname $num) $MDS_MOUNT_OPTS || + error "Failed to start MDS" + done + + for num in $(seq $OSTCOUNT); do + start ost$num $(ostdevname $num) $OST_MOUNT_OPTS || + error "Failed to start OST" + done + + mount_client $MOUNT + setfacl -m "d:$RUNAS_ID:rwx" $MOUNT && + error "ACL is applied when FS is mounted with noacl." + + MDS_MOUNT_OPTS=$mds_mountopts + OST_MOUNT_OPTS=$ost_mountopts + MDS_MOUNT_FS_OPTS=$mds_mountfsopts + + formatall + setupall +} +run_test 104 "Make sure user defined options are reflected in mount" + +error_and_umount() { + umount $TMP/$tdir + rmdir $TMP/$tdir + error $* +} + +test_105() { + cleanup + reformat + setup + mkdir -p $TMP/$tdir + mount --bind $DIR $TMP/$tdir || error "mount bind mnt pt failed" + rm -f $TMP/$tdir/$tfile + rm -f $TMP/$tdir/${tfile}1 + + # Files should not be created in ro bind mount point + # remounting from rw to ro + mount -o remount,ro $TMP/$tdir || + error_and_umount "readonly remount of bind mnt pt failed" + touch $TMP/$tdir/$tfile && + error_and_umount "touch succeeds on ro bind mnt pt" + [ -e $TMP/$tdir/$tfile ] && + error_and_umount "file created on ro bind mnt pt" + + # Files should be created in rw bind mount point + # remounting from ro to rw + mount -o remount,rw $TMP/$tdir || + error_and_umount "read-write remount of bind mnt pt failed" + touch $TMP/$tdir/${tfile}1 || + error_and_umount "touch fails on rw bind mnt pt" + [ -e $TMP/$tdir/${tfile}1 ] || + error_and_umount "file not created on rw bind mnt pt" + umount $TMP/$tdir || error "umount of bind mnt pt failed" + rmdir $TMP/$tdir + cleanup || error "cleanup failed with $?" +} +run_test 105 "check file creation for ro and rw bind mnt pt" + +test_107() { + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] || + { skip "Need MDS version > 2.10.50"; return; } + + start_mgsmds || error "start_mgsmds failed" + start_ost || error "unable to start OST" + + # add unknown configuration parameter. + local PARAM="$FSNAME-OST0000.ost.unknown_param=50" + do_facet mgs "$LCTL conf_param $PARAM" + cleanup_nocli || error "cleanup_nocli failed with $?" + load_modules + + # unknown param should be ignored while mounting. + start_ost || error "unable to start OST after unknown param set" + + cleanup || error "cleanup failed with $?" +} +run_test 107 "Unknown config param should not fail target mounting" + if ! combined_mgs_mds ; then stop mgs fi