ONLY=${ONLY:-"$*"}
-# bug number for skipped test:
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
+# bug number for skipped test: LU-2828
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
is_sles11() # LU-2181
SAVE_PWD=$PWD
LUSTRE=${LUSTRE:-`dirname $0`/..}
RLUSTRE=${RLUSTRE:-$LUSTRE}
+LUSTRE_TESTS_API_DIR=${LUSTRE_TESTS_API_DIR:-${LUSTRE}/tests/clientapi}
export MULTIOP=${MULTIOP:-multiop}
. $LUSTRE/tests/test-framework.sh
LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init)
fi
+[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+# bug number for skipped test: LU-2778 LU-2059
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 50h"
+
init_logging
#
# Better reformat if it fails...
writeconf_all $MDSCOUNT 2 ||
{ echo "tunefs failed, reformatting instead" &&
- reformat_and_config && return 1; }
+ reformat_and_config && return 0; }
return 0
}
return
fi
- setup
- check_mount || return 41
- cleanup || return $?
+ setup
+ check_mount || return 41
+ cleanup || return $?
- echo "Remove mds config log"
- if ! combined_mgs_mds ; then
- stop mgs
- fi
+ echo "Remove mds config log"
+ if ! combined_mgs_mds ; then
+ stop mgs
+ fi
- do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' $MGSDEV || return \$?" || return $?
+ do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' \
+ $(mgsdevname) || return \$?" || return $?
- if ! combined_mgs_mds ; then
- start_mgs
- fi
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
- start_ost
- start_mds && return 42
- reformat_and_config
+ start_ost
+ start_mds && return 42
+ reformat_and_config
}
run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
run_test 21d "start mgs then ost and then mds"
test_22() {
- local num
-
start_mds
echo Client mount with ost in logs, but none running
start_ost
# wait until mds connected to ost and open client connection
- for num in $(seq 1 $MDSCOUNT); do
- wait_osc_import_state mds${num} ost FULL
- done
+ wait_osc_import_state mds ost FULL
stop_ost
mount_client $MOUNT
# check_mount will block trying to contact ost
sleep $((TIMEOUT + TIMEOUT + TIMEOUT))
fi
mount_client $MOUNT
- for num in $(seq 1 $MDSCOUNT); do
- wait_osc_import_state mds${num} ost FULL
- done
+ wait_osc_import_state mds ost FULL
wait_osc_import_state client ost FULL
check_mount || return 41
pass
}
run_test 31 "Connect to non-existent node (shouldn't crash)"
+
+T32_QID=60000
+T32_BLIMIT=20480 # Kbytes
+T32_ILIMIT=2
+
#
# This is not really a test but a tool to create new disk
# image tarballs for the upgrade tests.
mkdir $tmp/src
tar cf - -C $src . | tar xf - -C $tmp/src
+ dd if=/dev/zero of=$tmp/src/t32_qf_old bs=1M \
+ count=$(($T32_BLIMIT / 1024 / 2))
+ chown $T32_QID.$T32_QID $tmp/src/t32_qf_old
formatall
setupall
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
+ $LFS quotacheck -ug /mnt/$FSNAME
+ $LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \
+ /mnt/$FSNAME
+
tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME
stopall
popd
$LCTL get_param -n version | head -n 1 |
sed -e 's/^lustre: *//' >$tmp/img/commit
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
+ $LFS quotaon -ug /mnt/$FSNAME
+ $LFS quota -u $T32_QID -v /mnt/$FSNAME
+ $LFS quota -v -u $T32_QID /mnt/$FSNAME |
+ awk 'BEGIN { num='1' } { if ($1 == "'/mnt/$FSNAME'") \
+ { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
+ | tr -d "*" > $tmp/img/bspace
+ $LFS quota -v -u $T32_QID /mnt/$FSNAME |
+ awk 'BEGIN { num='5' } { if ($1 == "'/mnt/$FSNAME'") \
+ { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
+ | tr -d "*" > $tmp/img/ispace
+
stopall
pushd $tmp/src
}
t32_test_cleanup() {
- local node=$(facet_active_host $SINGLEMDS)
- local r="do_node $node"
local tmp=$TMP/t32
local rc=$?
if $shall_cleanup_ost; then
$r umount -d $tmp/mnt/ost || rc=$?
fi
- $r rm -rf $tmp || rc=$?
- rm -rf $tmp || rc=$?
+
+ $r rm -rf $tmp
+ rm -rf $tmp
return $rc
}
all_removed=true
do_rpc_nodes $node check_mem_leak || return 1
if $all_removed; then
- load_modules
+ do_rpc_nodes $node load_modules
return 0
fi
sleep 5
echo wait for devices to go
while ((i < 20)); do
devices=$(do_rpc_nodes $node $LCTL device_list | wc -l)
- echo $device
- ((devices == 0)) && return 1
+ ((devices == 0)) && return 0
sleep 5
i=$((i + 1))
done
return 1
}
+t32_verify_quota() {
+ local node=$1
+ local fsname=$2
+ local mnt=$3
+ local fstype=$(facet_fstype $SINGLEMDS)
+ local qval
+ local cmd
+
+ $LFS quota -u $T32_QID -v $mnt
+
+ qval=$($LFS quota -v -u $T32_QID $mnt |
+ awk 'BEGIN { num='1' } { if ($1 == "'$mnt'") \
+ { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
+ | tr -d "*")
+ [ $qval -eq $img_bspace ] || {
+ echo "bspace, act:$qval, exp:$img_bspace"
+ return 1
+ }
+
+ qval=$($LFS quota -v -u $T32_QID $mnt |
+ awk 'BEGIN { num='5' } { if ($1 == "'$mnt'") \
+ { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
+ | tr -d "*")
+ [ $qval -eq $img_ispace ] || {
+ echo "ispace, act:$qval, exp:$img_ispace"
+ return 1
+ }
+
+ qval=$($LFS quota -v -u $T32_QID $mnt |
+ awk 'BEGIN { num='3' } { if ($1 == "'$mnt'") \
+ { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
+ | tr -d "*")
+ [ $qval -eq $T32_BLIMIT ] || {
+ echo "blimit, act:$qval, exp:$T32_BLIMIT"
+ return 1
+ }
+
+ qval=$($LFS quota -v -u $T32_QID $mnt |
+ awk 'BEGIN { num='7' } { if ($1 == "'$mnt'") \
+ { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
+ | tr -d "*")
+ [ $qval -eq $T32_ILIMIT ] || {
+ echo "ilimit, act:$qval, exp:$T32_ILIMIT"
+ return 1
+ }
+
+ do_node $node $LCTL conf_param $fsname.quota.mdt=ug
+ cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000"
+ cmd=$cmd.quota_slave.enabled
+ wait_update $node "$cmd" "ug" || {
+ echo "Enable mdt quota failed"
+ return 1
+ }
+
+ do_node $node $LCTL conf_param $fsname.quota.ost=ug
+ cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000"
+ cmd=$cmd.quota_slave.enabled
+ wait_update $node "$cmd" "ug" || {
+ echo "Enable ost quota failed"
+ return 1
+ }
+
+ chmod 0777 $mnt
+ runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
+ bs=1M count=$(($T32_BLIMIT / 1024)) oflag=sync && {
+ echo "Write succeed, but expect -EDQUOT"
+ return 1
+ }
+ rm -f $mnt/t32_qf_new
+
+ runas -u $T32_QID -g $T32_QID createmany -m $mnt/t32_qf_ \
+ $T32_ILIMIT && {
+ echo "Create succeed, but expect -EDQUOT"
+ return 1
+ }
+ unlinkmany $mnt/t32_qf_ $T32_ILIMIT
+
+ return 0
+}
+
t32_test() {
local tarball=$1
local writeconf=$2
local node=$(facet_active_host $SINGLEMDS)
local r="do_node $node"
local node2=$(facet_active_host mds2)
- local r2="do_node $node2"
local tmp=$TMP/t32
local img_commit
local img_kernel
local img_arch
+ local img_bspace
+ local img_ispace
local fsname=t32fs
local nid=$($r $LCTL list_nids | head -1)
local mopts
local nrpcs_orig
local nrpcs
local list
+ local fstype=$(facet_fstype $SINGLEMDS)
trap 'trap - RETURN; t32_test_cleanup' RETURN
img_commit=$($r cat $tmp/commit)
img_kernel=$($r cat $tmp/kernel)
img_arch=$($r cat $tmp/arch)
+ img_bspace=$($r cat $tmp/bspace)
+ img_ispace=$($r cat $tmp/ispace)
echo "Upgrading from $(basename $tarball), created with:"
echo " Commit: $img_commit"
echo " Kernel: $img_kernel"
}
if [ "$writeconf" ]; then
mopts=loop,writeconf
+ if [ $fstype == "ldiskfs" ]; then
+ $r $TUNEFS --quota $tmp/mdt || {
+ error_noexit "Enable mdt quota feature"
+ return 1
+ }
+ fi
else
mopts=loop,exclude=$fsname-OST0000
fi
shall_cleanup_mdt=true
if [ "$dne_upgrade" != "no" ]; then
- echo "mkfs new MDT...."
- add mds2 $(mkfs_opts mds2 $(mdsdevname 2) $fsname) --reformat \
- $(mdsdevname 2) $(mdsvdevname 2) > /dev/null || {
+ local fs2mdsdev=$(mdsdevname 1_2)
+ local fs2mdsvdev=$(mdsvdevname 1_2)
+
+ echo "mkfs new MDT on ${fs2mdsdev}...."
+ if [ $(facet_fstype mds1) == ldiskfs ]; then
+ mkfsoptions="--mkfsoptions=\\\"-J size=8\\\""
+ fi
+
+ add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
+ $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || {
error_noexit "Mkfs new MDT failed"
return 1
}
- $r2 $TUNEFS --dryrun $(mdsdevname 2) || {
+ $r $TUNEFS --dryrun $fs2mdsdev || {
error_noexit "tunefs.lustre before mounting the MDT"
return 1
}
- echo "mount new MDT...."
- $r2 mkdir -p $tmp/mnt/mdt1
- $r2 mount -t lustre -o $mopts $(mdsdevname 2) $tmp/mnt/mdt1 || {
+ echo "mount new MDT....$fs2mdsdev"
+ $r mkdir -p $tmp/mnt/mdt1
+ $r mount -t lustre -o $mopts $fs2mdsdev $tmp/mnt/mdt1 || {
error_noexit "mount mdt1 failed"
return 1
}
}
if [ "$writeconf" ]; then
mopts=loop,mgsnode=$nid,$writeconf
+ if [ $fstype == "ldiskfs" ]; then
+ $r $TUNEFS --quota $tmp/ost || {
+ error_noexit "Enable ost quota feature"
+ return 1
+ }
+ fi
else
mopts=loop,mgsnode=$nid
fi
}
if [ "$dne_upgrade" != "no" ]; then
- $r2 $LCTL conf_param \
+ $r $LCTL conf_param \
$fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
return 1
}
- $r2 $LCTL conf_param $fsname-MDT0001.failover.node=$nid || {
+ $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || {
error_noexit "Setting MDT1 \"failover.node\""
return 1
}
- $r2 $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || {
+ $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || {
error_noexit "Setting MDT1 \"lov.stripesize\""
return 1
}
}
shall_cleanup_lustre=true
$LCTL set_param debug="$PTLDEBUG"
+
+ t32_verify_quota $node $fsname $tmp/mnt/lustre || {
+ error_noexit "verify quota failed"
+ return 1
+ }
+
if [ "$dne_upgrade" != "no" ]; then
$LFS mkdir -i 1 $tmp/mnt/lustre/remote_dir || {
error_noexit "set remote dir failed"
popd
fi
+ dd if=/dev/zero of=$tmp/mnt/lustre/tmp_file bs=10k count=10 || {
+ error_noexit "dd failed"
+ return 1
+ }
+ rm -rf $tmp/mnt/lustre/tmp_file || {
+ error_noexit "rm failed"
+ return 1
+ }
+
if $r test -f $tmp/sha1sums; then
# LU-2393 - do both sorts on same node to ensure locale
# is identical
}
shall_cleanup_lustre=false
else
+ if [ "$dne_upgrade" != "no" ]; then
+ $r umount -d $tmp/mnt/mdt1 || {
+ error_noexit "Unmounting the MDT2"
+ return 1
+ }
+ shall_cleanup_mdt1=false
+ fi
+
$r umount -d $tmp/mnt/mdt || {
error_noexit "Unmounting the MDT"
return 1
}
shall_cleanup_mdt=false
+
$r umount -d $tmp/mnt/ost || {
error_noexit "Unmounting the OST"
return 1
run_test 41b "mount mds with --nosvc and --nomgs on first mount"
test_42() { #bug 14693
- setup
- check_mount || return 2
- do_facet mgs $LCTL conf_param lustre.llite.some_wrong_param=10
- umount_client $MOUNT
- mount_client $MOUNT || return 1
- cleanup
- return 0
+ setup
+ check_mount || error "client was not mounted"
+
+ do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10
+ umount_client $MOUNT ||
+ error "unmounting client failed with invalid llite param"
+ mount_client $MOUNT ||
+ error "mounting client failed with invalid llite param"
+
+ do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
+ cleanup || error "stopping $FSNAME failed with invalid sys param"
+ load_modules
+ setup
+ check_mount || "client was not mounted with invalid sys param"
+ cleanup || error "stopping $FSNAME failed with invalid sys param"
+ return 0
}
-run_test 42 "invalid config param should not prevent client from mounting"
+run_test 42 "allow client/server mount/unmount with invalid config param"
test_43() {
[ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
}
run_test 50g "deactivated OST should not cause panic====================="
+# LU-642
+test_50h() {
+ # prepare MDT/OST, make OSC inactive for OST1
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
+ do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" ||
+ error "tunefs OST1 failed"
+ start_mds || error "Unable to start MDT"
+ start_ost || error "Unable to start OST1"
+ start_ost2 || error "Unable to start OST2"
+ mount_client $MOUNT || error "client start failed"
+
+ mkdir -p $DIR/$tdir
+
+ # activatate OSC for OST1
+ local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active"
+ set_conf_param_and_check client \
+ "$TEST" "${FSNAME}-OST0000.osc.active" 1 ||
+ error "Unable to activate OST1"
+
+ mkdir -p $DIR/$tdir/2
+ $LFS setstripe -c -1 -i 0 $DIR/$tdir/2
+ sleep 1 && echo "create a file after OST1 is activated"
+ # create some file
+ createmany -o $DIR/$tdir/2/$tfile-%d 1
+
+ # check OSC import is working
+ stat $DIR/$tdir/2/* >/dev/null 2>&1 ||
+ error "some OSC imports are still not connected"
+
+ # cleanup
+ umount_client $MOUNT || error "Unable to umount client"
+ stop_ost2 || error "Unable to stop OST2"
+ cleanup_nocli
+}
+run_test 50h "LU-642: activate deactivated OST ==="
+
test_51() {
local LOCAL_TIMEOUT=20
local nthrs
shift 4
- setup
check_mount || return 41
# We need to expand $parampat, but it may match multiple parameters, so
load_modules
setup
- cleanup
}
test_53a() {
+ setup
thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16'
+ cleanup
}
run_test 53a "check OSS thread count params"
test_53b() {
- thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' '16'
+ setup
+ local mds=$(do_facet $SINGLEMDS "lctl get_param -N mds.*.*.threads_max \
+ 2>/dev/null")
+ if [ -z "$mds" ]; then
+ #running this on an old MDT
+ thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' 16
+ else
+ thread_sanity MDT $SINGLEMDS 'mds.*.*.' 'mds_num_threads' 16
+ fi
+ cleanup
}
-run_test 53b "check MDT thread count params"
+run_test 53b "check MDS thread count params"
test_54a() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
createmany -o $DIR/$tdir/$tfile-%d 100
# make sure that OSTs do not cancel llog cookies before we unmount the MDS
#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
- do_facet mds "lctl set_param fail_loc=0x601"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x601"
unlinkmany $DIR/$tdir/$tfile-%d 100
- stop mds
- local MNTDIR=$(facet_mntpt mds)
+ stop_mds
+
+ local MNTDIR=$(facet_mntpt $SINGLEMDS)
+ local devname=$(mdsdevname ${SINGLEMDS//mds/})
+ local opts=""
+ if ! do_facet $SINGLEMDS "test -b $devname"; then
+ opts="-o loop"
+ fi
+
# remove all files from the OBJECTS dir
- do_facet mds "mount -t ldiskfs $MDSDEV $MNTDIR"
- do_facet mds "find $MNTDIR/OBJECTS -type f -delete"
- do_facet mds "umount $MNTDIR"
+ do_facet $SINGLEMDS "mount -t ldiskfs $opts $devname $MNTDIR"
+ do_facet $SINGLEMDS "find $MNTDIR/O/1/d* -type f -delete"
+ do_facet $SINGLEMDS "umount $MNTDIR"
# restart MDS with missing llog files
start_mds
do_facet mds "lctl set_param fail_loc=0"
echo "$LFS df"
$LFS df --lazy || error "lfs df failed"
cleanup || return $?
+ #writeconf to remove all ost2 traces for subsequent tests
+ writeconf_or_reformat
}
run_test 64 "check lfs df --lazy "
+test_65() { # LU-2237
+ # Currently, the test is only valid for ldiskfs backend
+ [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
+ skip "non-ldiskfs backend" && return
+
+ local devname=$(mdsdevname ${SINGLEMDS//mds/})
+ local brpt=$(facet_mntpt brpt)
+ local opts=""
+
+ if ! do_facet $SINGLEMDS "test -b $devname"; then
+ opts="-o loop"
+ fi
+
+ stop_mds
+ local obj=$(do_facet $SINGLEMDS \
+ "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" |
+ grep Inode)
+ if [ -z "$obj" ]; then
+ # The MDT may be just re-formatted, mount the MDT for the
+ # first time to guarantee the "last_rcvd" file is there.
+ start_mds || error "fail to mount the MDS for the first time"
+ stop_mds
+ fi
+
+ # remove the "last_rcvd" file
+ do_facet $SINGLEMDS "mkdir -p $brpt"
+ do_facet $SINGLEMDS \
+ "mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
+ do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
+ do_facet $SINGLEMDS "umount $brpt"
+
+ # restart MDS, the "last_rcvd" file should be recreated.
+ start_mds || error "fail to restart the MDS"
+ stop_mds
+ obj=$(do_facet $SINGLEMDS \
+ "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" | grep Inode)
+ [ -n "$obj" ] || error "fail to re-create the last_rcvd"
+}
+run_test 65 "re-create the lost last_rcvd file when server mount"
+
+test_66() {
+ [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] ||
+ { skip "Need MGS version at least 2.3.59"; return 0; }
+
+ setup
+ local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1)
+ local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1)
+
+ echo "replace_nids should fail if MDS, OSTs and clients are UP"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
+ error "replace_nids fail"
+
+ umount_client $MOUNT || error "unmounting client failed"
+ echo "replace_nids should fail if MDS and OSTs are UP"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
+ error "replace_nids fail"
+
+ stop_ost
+ echo "replace_nids should fail if MDS is UP"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
+ error "replace_nids fail"
+
+ stop_mds || error "stopping mds failed"
+
+ if combined_mgs_mds; then
+ start_mdt 1 "-o nosvc" ||
+ error "starting mds with nosvc option failed"
+ fi
+
+ echo "command should accept two parameters"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 &&
+ error "command should accept two params"
+
+ echo "correct device name should be passed"
+ do_facet mgs $LCTL replace_nids $FSNAME-WRONG0000 $OST1_NID &&
+ error "wrong devname"
+
+ echo "wrong nids list should not destroy the system"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" &&
+ error "wrong parse"
+
+ echo "replace OST nid"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID ||
+ error "replace nids failed"
+
+ echo "command should accept two parameters"
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 &&
+ error "command should accept two params"
+
+ echo "wrong nids list should not destroy the system"
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" &&
+ error "wrong parse"
+
+ echo "replace MDS nid"
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID ||
+ error "replace nids failed"
+
+ if ! combined_mgs_mds ; then
+ stop_mgs
+ else
+ stop_mds
+ fi
+
+ setup_noconfig
+ check_mount || error "error after nid replace"
+ cleanup || error "cleanup failed"
+ reformat
+}
+run_test 66 "replace nids"
+
test_70a() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
local MDTIDX=1
}
run_test 71e "start OST0, MDT1, OST1, MDT0"
+test_72() { #LU-2634
+ local mdsdev=$(mdsdevname 1)
+ local ostdev=$(ostdevname 1)
+ local cmd="$E2FSCK -fnvd $mdsdev"
+ local fn=3
+
+ [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ #tune MDT with "-O extents"
+
+ for num in $(seq $MDSCOUNT); do
+ add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
+ --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
+ error "add mds $num failed"
+ $TUNE2FS -O extents $(mdsdevname $num)
+ done
+
+ add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev ||
+ error "add $ostdev failed"
+ start_mgsmds || error "start mds failed"
+ start_ost || error "start ost failed"
+ mount_client $MOUNT || error "mount client failed"
+
+ #create some short symlinks
+ mkdir -p $DIR/$tdir
+ createmany -o $DIR/$tdir/$tfile-%d $fn
+ echo "create $fn short symlinks"
+ for i in $(seq -w 1 $fn); do
+ ln -s $DIR/$tdir/$tfile-$i $MOUNT/$tfile-$i
+ done
+ ls -al $MOUNT
+
+ #umount
+ umount_client $MOUNT || error "umount client failed"
+ stop_mds || error "stop mds failed"
+ stop_ost || error "stop ost failed"
+
+ #run e2fsck
+ run_e2fsck $(facet_active_host $SINGLEMDS) $mdsdev "-n"
+}
+run_test 72 "test fast symlink with extents flag enabled"
+
+test_73() { #LU-3006
+ load_modules
+ do_facet ost1 "$TUNEFS --failnode=1.2.3.4@tcp $(ostdevname 1)" ||
+ error "1st tunefs failed"
+ start_mgsmds || error "start mds failed"
+ start_ost || error "start ost failed"
+ mount_client $MOUNT || error "mount client failed"
+ lctl get_param -n osc.*OST0000-osc-[^M]*.import | grep failover_nids |
+ grep 1.2.3.4@tcp || error "failover nids haven't changed"
+ umount_client $MOUNT || error "umount client failed"
+ stopall
+ reformat
+}
+run_test 73 "failnode to update from mountdata properly"
+
+test_74() { # LU-1606
+ for TESTPROG in $LUSTRE_TESTS_API_DIR/*.c; do
+ gcc -Wall -Werror $LUSTRE_TESTS_API_DIR/simple_test.c \
+ -I$LUSTRE/include \
+ -L$LUSTRE/utils -llustreapi ||
+ error "client api broken"
+ done
+ cleanup || return $?
+}
+run_test 74 "Lustre client api program can compile and link"
+
if ! combined_mgs_mds ; then
stop mgs
fi