# bug number for skipped test:
# a tool to create lustre filesystem images
ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
+if $SHARED_KEY; then
+# bug number for skipped tests: LU-9795 (all below)
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 0 31 32a 32d 35a"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 53a 53b 54b 76a 76b"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 76c 76d 78 103"
+fi
SRCDIR=$(dirname $0)
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+get_lustre_env
# use small MDS + OST size to speed formatting time
# do not use too small MDSSIZE/OSTSIZE, which affect the default journal size
ALWAYS_EXCEPT="$ALWAYS_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+[ $MDSCOUNT -ge 2 ] &&
+# bug number for skipped test: LU-11915
+ ALWAYS_EXCEPT+=" 110"
+# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+
init_logging
#
require_dsh_mds || exit 0
require_dsh_ost || exit 0
-# 8 22 (min)"
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69"
+# 8 22 40 165 (min)
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69 106 111"
assert_DIR
echo "setup double mount lustre success"
}
+generate_name() {
+ cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $1 | head -n 1
+}
+
build_test_filter
if [ "$ONLY" == "setup" ]; then
start_mds || error "Unable to start MDS"
echo "Requeue thread should have started: "
ps -e | grep ll_cfg_requeue
- set_conf_param_and_check ost1 \
- "$LCTL get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \
- "$FSNAME-OST0000.ost.client_cache_seconds" ||
- error "set_conf_param_and_check ost1 failed"
+ set_persistent_param_and_check ost1 \
+ "obdfilter.$FSNAME-OST0000.client_cache_seconds" \
+ "$FSNAME-OST0000.ost.client_cache_seconds"
cleanup_nocli || error "cleanup_nocli failed with rc $?"
}
run_test 27a "Reacquire MGS lock if OST started first"
awk '($3 ~ "mdt" && $4 ~ "MDT0000") { print $4 }')
facet_failover $SINGLEMDS
- set_conf_param_and_check $SINGLEMDS \
- "$LCTL get_param -n mdt.$device.identity_acquire_expire" \
- "$device.mdt.identity_acquire_expire" ||
- error "set_conf_param_and_check $SINGLEMDS failed"
- set_conf_param_and_check client \
- "$LCTL get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\
- "$device.mdc.max_rpcs_in_flight" ||
- error "set_conf_param_and_check client failed"
+ set_persistent_param_and_check $SINGLEMDS \
+ "mdt.$device.identity_acquire_expire" \
+ "$device.mdt.identity_acquire_expire"
+ set_persistent_param_and_check client \
+ "mdc.$device-mdc-*.max_rpcs_in_flight" \
+ "$device.mdc.max_rpcs_in_flight"
check_mount
cleanup || error "cleanup failed with $?"
}
test_28A() { # was test_28
setup
- TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
+ TEST="llite.$FSNAME-*.max_read_ahead_whole_mb"
PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
- ORIG=$($TEST)
+ ORIG=$($LCTL get_param -n $TEST)
FINAL=$(($ORIG + 1))
- set_conf_param_and_check client "$TEST" "$PARAM" $FINAL ||
- error "first set_conf_param_and_check client failed"
+ set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL
FINAL=$(($FINAL + 1))
- set_conf_param_and_check client "$TEST" "$PARAM" $FINAL ||
- error "second set_conf_param_and_check client failed"
+ set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- RESULT=$($TEST)
+ RESULT=$($LCTL get_param -n $TEST)
if [ $RESULT -ne $FINAL ]; then
error "New config not seen: wanted $FINAL got $RESULT"
else
echo "New config success: got $RESULT"
fi
- set_conf_param_and_check client "$TEST" "$PARAM" $ORIG ||
- error "third set_conf_param_and_check client failed"
+ set_persistent_param_and_check client "$TEST" "$PARAM" $ORIG
cleanup || error "cleanup failed with rc $?"
}
run_test 28A "permanent parameter setting"
setup
# In this test we will set three kinds of proc parameters with
- # lctl conf_param:
- # 1. the ones moved from the OFD to the OSD, and only their
- # symlinks kept in obdfilter
- # 2. non-symlink ones in the OFD
- # 3. non-symlink ones in the OSD
+ # lctl set_param -P or lctl conf_param:
+ # 1. non-symlink ones in the OFD
+ # 2. non-symlink ones in the OSD
# Check 1.
- # prepare a symlink parameter in the OFD
- name="writethrough_cache_enable"
- param="$device.ost.$name"
- cmd="$LCTL get_param -n obdfilter.$device.$name"
-
- # conf_param the symlink parameter in the OFD
- old=$(do_facet ost1 $cmd)
- new=$(((old + 1) % 2))
- set_conf_param_and_check ost1 "$cmd" "$param" $new ||
- error "lctl conf_param $device.ost.$param=$new failed"
-
- # conf_param the target parameter in the OSD
- param="$device.osd.$name"
- cmd="$LCTL get_param -n osd-*.$device.$name"
- set_conf_param_and_check ost1 "$cmd" "$param" $old ||
- error "lctl conf_param $device.osd.$param=$old failed"
-
- # Check 2.
# prepare a non-symlink parameter in the OFD
name="client_cache_seconds"
param="$device.ost.$name"
- cmd="$LCTL get_param -n obdfilter.$device.$name"
+ cmd="obdfilter.$device.$name"
- # conf_param the parameter in the OFD
- old=$(do_facet ost1 $cmd)
+ # permanently setting the parameter in the OFD
+ old=$(do_facet ost1 $LCTL get_param -n $cmd)
new=$((old * 2))
- set_conf_param_and_check ost1 "$cmd" "$param" $new ||
- error "lctl conf_param $device.ost.$param=$new failed"
- set_conf_param_and_check ost1 "$cmd" "$param" $old ||
- error "lctl conf_param $device.ost.$param=$old failed"
+ set_persistent_param_and_check ost1 "$cmd" "$param" $new
+ set_persistent_param_and_check ost1 "$cmd" "$param" $old
- # Check 3.
+ # Check 2.
# prepare a non-symlink parameter in the OSD
name="auto_scrub"
param="$device.osd.$name"
- cmd="$LCTL get_param -n osd-*.$device.$name"
+ cmd="osd-*.$device.$name"
# conf_param the parameter in the OSD
- old=$(do_facet ost1 $cmd)
+ old=$(do_facet ost1 $LCTL get_param -n $cmd)
new=$(((old + 1) % 2))
- set_conf_param_and_check ost1 "$cmd" "$param" $new ||
- error "lctl conf_param $device.osd.$param=$new failed"
- set_conf_param_and_check ost1 "$cmd" "$param" $old ||
- error "lctl conf_param $device.osd.$param=$old failed"
+ set_persistent_param_and_check ost1 "$cmd" "$param" $new
+ set_persistent_param_and_check ost1 "$cmd" "$param" $old
cleanup || error "cleanup failed with $?"
}
-run_test 28a "set symlink parameters permanently with conf_param"
+run_test 28a "set symlink parameters permanently with lctl"
test_29() {
[ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return
sleep 10
local PARAM="$FSNAME-OST0001.osc.active"
- local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
- local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
+ # With lctl set_param -P the value $PROC_ACT will be sent to
+ # all nodes. The [!M] filter out the ability to set active
+ # on the MDS servers which is tested with wait_osp_* below.
+ # For ost_server_uuid that only exist on client so filtering
+ # is safe.
+ local PROC_ACT="osc.$FSNAME-OST0001-osc-*.active"
+ local PROC_UUID="osc.$FSNAME-OST0001-osc-[!M]*.ost_server_uuid"
ACTV=$($LCTL get_param -n $PROC_ACT)
DEAC=$((1 - $ACTV))
- set_conf_param_and_check client \
- "$LCTL get_param -n $PROC_ACT" "$PARAM" $DEAC ||
- error "set_conf_param_and_check client failed"
+ set_persistent_param_and_check client $PROC_ACT $PARAM $DEAC
# also check ost_server_uuid status
RESULT=$($LCTL get_param -n $PROC_UUID | grep DEACTIV)
if [ -z "$RESULT" ]; then
# test new client starts deactivated
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- RESULT=$($LCTL get_param -n $PROC_UUID | grep DEACTIV | grep NEW)
- if [ -z "$RESULT" ]; then
- error "New client start active: $(lctl get_param -n $PROC_UUID)"
- else
- echo "New client success: got $RESULT"
- fi
+
+ # the 2nd and 3rd field of ost_server_uuid do not update at the same
+ # time when using lctl set_param -P
+ wait_update_facet client \
+ "$LCTL get_param -n $PROC_UUID | awk '{print \\\$3 }'" \
+ "DEACTIVATED" ||
+ error "New client start active: $($LCTL get_param -n $PROC_UUID)"
+
+ echo "New client success: got '$($LCTL get_param -n $PROC_UUID)'"
# make sure it reactivates
- set_conf_param_and_check client \
- "$LCTL get_param -n $PROC_ACT" "$PARAM" $ACTV ||
- error "lctl get_param $PROC_ACT $PARAM $ACTV failed"
+ set_persistent_param_and_check client $PROC_ACT $PARAM $ACTV
umount_client $MOUNT
stop_ost2 || error "Unable to stop OST2"
setup
echo Big config llog
- TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
- ORIG=$($TEST)
+ TEST="llite.$FSNAME-*.max_read_ahead_whole_mb"
+ ORIG=$($LCTL get_param -n $TEST)
LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
for i in ${LIST[@]}; do
- set_conf_param_and_check client "$TEST" \
- "$FSNAME.llite.max_read_ahead_whole_mb" $i ||
- error "Set $FSNAME.llite.max_read_ahead_whole_mb failed"
+ set_persistent_param_and_check client "$TEST" \
+ "$FSNAME.llite.max_read_ahead_whole_mb" $i
done
# make sure client restart still works
umount_client $MOUNT
mount_client $MOUNT || error "mount_client $MOUNT failed"
- [ "$($TEST)" -ne "$i" ] &&
+ [ "$($LCTL get_param -n $TEST)" -ne "$i" ] &&
error "Param didn't stick across restart $($TEST) != $i"
pass
echo Erase parameter setting
- do_facet mgs "$LCTL conf_param \
- -d $FSNAME.llite.max_read_ahead_whole_mb" ||
- error "Erase param $FSNAME.llite.max_read_ahead_whole_mb failed"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD -d $TEST" ||
+ error "Erase param $TEST failed"
+ else
+ do_facet mgs "$PERM_CMD \
+ -d $FSNAME.llite.max_read_ahead_whole_mb" ||
+ error "Erase param $FSNAME.llite.max_read_ahead_whole_mb failed"
+ fi
umount_client $MOUNT
mount_client $MOUNT || error "mount_client $MOUNT failed"
- FINAL=$($TEST)
+ FINAL=$($LCTL get_param -n $TEST)
echo "deleted (default) value=$FINAL, orig=$ORIG"
# assumes this parameter started at the default value
[ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
cleanup || error "cleanup failed with rc $?"
}
-run_test 30a "Big config llog and conf_param deletion"
+run_test 30a "Big config llog and permanent parameter deletion"
test_30b() {
setup
local TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
- set_conf_param_and_check client "$TEST" \
- "$FSNAME-OST0000.failover.node" $NEW ||
- error "didn't add failover nid $NEW"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ PARAM="osc.$FSNAME-OST0000-osc-[^M]*.import"
+ echo "Setting $PARAM from $TEST to $NEW"
+ do_facet mgs "$PERM_CMD $PARAM='connection=$NEW'" ||
+ error "$PERM_CMD $PARAM failed"
+ else
+ PARAM="$FSNAME-OST0000.failover.node"
+ echo "Setting $PARAM from $TEST to $NEW"
+ do_facet mgs "$PERM_CMD $PARAM='$NEW'" ||
+ error "$PARAM $PARAM failed"
+ fi
+ wait_update_facet client "$TEST" "$NEW" ||
+ error "check $PARAM failed!"
+
local NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids)
- echo $NIDS
local NIDCOUNT=$(echo "$NIDS" | wc -w)
echo "should have $((orignidcount + 1)) entries \
in failover nids string, have $NIDCOUNT"
[ $NIDCOUNT -eq $((orignidcount + 1)) ] ||
error "Failover nid not added"
- do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" ||
- error "conf_param delete failed"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD -d osc.$FSNAME-OST0000-osc-*.import"
+ else
+ do_facet mgs "$PERM_CMD -d $FSNAME-OST0000.failover.node" ||
+ error "$PERM_CMD delete failed"
+ fi
umount_client $MOUNT
mount_client $MOUNT || error "mount_client $MOUNT failed"
NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids)
- echo $NIDS
NIDCOUNT=$(echo "$NIDS" | wc -w)
echo "only $orignidcount final entries should remain \
in failover nids string, have $NIDCOUNT"
}
t32_verify_quota() {
- local node=$1
+ local facet=$1
local fsname=$2
local mnt=$3
local fstype=$(facet_fstype $SINGLEMDS)
# verification in 32b. The object quota usage should be accurate after
# zfs-0.7.0 is released.
[ $fstype == "zfs" ] && {
- local zfs_version=$(do_node $node cat /sys/module/zfs/version)
+ local zfs_version=$(do_facet $facet cat /sys/module/zfs/version)
[ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && {
echo "Skip quota verify for zfs: $zfs_version"
return 1
}
- do_node $node $LCTL conf_param $fsname.quota.mdt=ug
- cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000"
- cmd=$cmd.quota_slave.enabled
- wait_update $node "$cmd" "ug" || {
- echo "Enable mdt quota failed"
- return 1
- }
+ set_persistent_param_and_check $facet \
+ "osd-$fstype.$fsname-MDT0000.quota_slave.enabled" \
+ "$fsname.quota.mdt" ug
- do_node $node $LCTL conf_param $fsname.quota.ost=ug
- cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000"
- cmd=$cmd.quota_slave.enabled
- wait_update $node "$cmd" "ug" || {
- echo "Enable ost quota failed"
- return 1
- }
+ set_persistent_param_and_check $facet \
+ "osd-$fstype.$fsname-OST0000.quota_slave.enabled" \
+ "$fsname.quota.ost" ug
chmod 0777 $mnt
runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
return 1
fi
- $r $LCTL conf_param $fsname-OST0000.osc.max_dirty_mb=15 || {
- error_noexit "Setting \"max_dirty_mb\""
- return 1
- }
- $r $LCTL conf_param $fsname-OST0000.failover.node=$nid || {
- error_noexit "Setting OST \"failover.node\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
- error_noexit "Setting \"max_rpcs_in_flight\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0000.failover.node=$nid || {
- error_noexit "Setting MDT \"failover.node\""
- return 1
- }
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ $r $PERM_CMD osc.$fsname-OST0000*.import=connection=$nid || {
+ error_noexit "Setting OST \"failover.node\""
+ return 1
+ }
+ $r $PERM_CMD mdc.$fsname-MDT0000*.import=connection=$nid || {
+ error_noexit "Setting MDT \"failover.node\""
+ return 1
+ }
+ $r $PERM_CMD osc.$fsname-OST0000-*.max_dirty_mb=15 || {
+ error_noexit "Setting \"max_dirty_mb\""
+ return 1
+ }
+ $r $PERM_CMD mdc.$fsname-MDT0000-*.max_rpcs_in_flight=9 || {
+ error_noexit "Setting \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD lov.$fsname-MDT0000-*.stripesize=4M || {
+ error_noexit "Setting \"lov.stripesize\""
+ return 1
+ }
+ $r $PERM_CMD mdd.$fsname-MDT0000-*.atime_diff=70 || {
+ error_noexit "Setting \"mdd.atime_diff\""
+ return 1
+ }
+ else
+ $r $PERM_CMD $fsname-OST0000.failover.node=$nid || {
+ error_noexit "Setting OST \"failover.node\""
+ return 1
+ }
+
+ $r $PERM_CMD $fsname-MDT0000.failover.node=$nid || {
+ error_noexit "Setting MDT \"failover.node\""
+ return 1
+ }
+
+ $r $PERM_CMD $fsname-OST0000.osc.max_dirty_mb=15 || {
+ error_noexit "Setting \"max_dirty_mb\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
+ error_noexit "Setting \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0000.lov.stripesize=4M || {
+ error_noexit "Setting \"lov.stripesize\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0000.mdd.atime_diff=70 || {
+ error_noexit "Setting \"mdd.atime_diff\""
+ return 1
+ }
+ fi
+
$r $LCTL pool_new $fsname.interop || {
error_noexit "Setting \"interop\""
return 1
}
- $r $LCTL conf_param $fsname-MDT0000.lov.stripesize=4M || {
- error_noexit "Setting \"lov.stripesize\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0000.mdd.atime_diff=70 || {
- error_noexit "Setting \"mdd.atime_diff\""
- return 1
- }
if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then
$r $LCTL lfsck_start -M $fsname-OST0000 || {
fi
if [ "$dne_upgrade" != "no" ]; then
- $r $LCTL conf_param \
- $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
- error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || {
- error_noexit "Setting MDT1 \"failover.node\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || {
- error_noexit "Setting MDT1 \"lov.stripesize\""
- return 1
- }
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ $r $PERM_CMD mdc.$fsname-MDT0001*.import=connection=$nid || {
+ error_noexit "Setting MDT1 \"failover.node\""
+ return 1
+ }
+
+ $r $PERM_CMD mdc.$fsname-MDT0001-*.max_rpcs_in_flight=9 || {
+ error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD lov.$fsname-MDT0001-*.stripesize=4M || {
+ error_noexit "Setting MDT1 \"lov.stripesize\""
+ return 1
+ }
+ else
+ $r $PERM_CMD $fsname-MDT0001.failover.node=$nid || {
+ error_noexit "Setting MDT1 \"failover.node\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
+ error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0001.lov.stripesize=4M || {
+ error_noexit "Setting MDT1 \"lov.stripesize\""
+ return 1
+ }
+ fi
fi
if [ "$writeconf" ]; then
shall_cleanup_lustre=true
$r $LCTL set_param debug="$PTLDEBUG"
+ # Leave re-enabling this to a separate patch for LU-11558
+ # t32_verify_quota $SINGLEMDS $fsname $tmp/mnt/lustre || {
+ # error_noexit "verify quota failed"
+ # return 1
+ #}
+
if $r test -f $tmp/list; then
#
# There is not a Test Framework API to copy files to or
}
rm $tmp/mnt/lustre/dom
- $r $LCTL get_param -n lod.*MDT0000*.dom_stripesize || {
- error_noexit "Getting \"dom_stripesize\""
- return 1
- }
- $r $LCTL conf_param \
- $fsname-MDT0000.lod.dom_stripesize=0 || {
+ set_persistent_param_and_check mds \
+ "lod.*MDT0000*.dom_stripesize" \
+ "$fsname-MDT0000.lod.dom_stripesize" 0 || {
error_noexit "Changing \"dom_stripesize\""
return 1
}
- wait_update $(facet_host mds) "$LCTL get_param \
- -n lod.*MDT0000*.dom_stripesize" 0 || {
- error_noexit "Verifying \"dom_stripesize\""
- return 1
- }
fi
if [ "$dne_upgrade" != "no" ]; then
return 1
}
nrpcs=$((nrpcs_orig + 5))
- $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=$nrpcs || {
+
+ set_persistent_param_and_check client \
+ "mdc.$fsname-MDT0000*.max_rpcs_in_flight" \
+ "$fsname-MDT0000.mdc.max_rpcs_in_flight" $nrpcs || {
error_noexit "Changing \"max_rpcs_in_flight\""
return 1
}
- wait_update $HOSTNAME "$LCTL get_param \
- -n mdc.*MDT0000*.max_rpcs_in_flight" $nrpcs || {
- error_noexit "Verifying \"max_rpcs_in_flight\""
- return 1
- }
umount $tmp/mnt/lustre || {
error_noexit "Unmounting the client"
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
- do_facet mgs "$LCTL conf_param $FSNAME2.sys.timeout=200" ||
- error "$LCTL conf_param $FSNAME2.sys.timeout=200 failed"
+
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD timeout=200" ||
+ error "$PERM_CMD timeout=200 failed"
+ else
+ do_facet mgs "$PERM_CMD $FSNAME2.sys.timeout=200" ||
+ error "$PERM_CMD $FSNAME2.sys.timeout=200 failed"
+ fi
mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed"
$MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed"
echo "ok."
cp /etc/hosts $MOUNT2/ || error "copy /etc/hosts $MOUNT2/ failed"
- $GETSTRIPE $MOUNT2/hosts || error "$GETSTRIPE $MOUNT2/hosts failed"
+ $LFS getstripe $MOUNT2/hosts ||
+ error "$LFS getstripe $MOUNT2/hosts failed"
umount $MOUNT2
stop fs2ost -f
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet mgs "$LCTL conf_param \
- ${device}.failover.node=$(h2nettype $FAKENID)" ||
- error "Setting ${device}.failover.node=\
- $(h2nettype $FAKENID) failed."
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD \
+ mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" ||
+ error "Setting mdc.*${device}*.import=connection=\
+ $(h2nettype $FAKENID) failed."
+ else
+ do_facet mgs "$PERM_CMD \
+ ${device}.failover.node=$(h2nettype $FAKENID)" ||
+ error "Setting ${device}.failover.node=\
+ $(h2nettype $FAKENID) failed."
+ fi
log "Wait for RECONNECT_INTERVAL seconds (10s)"
sleep 10
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet mgs "$LCTL conf_param \
- ${device}.failover.node=$(h2nettype $FAKENID)" ||
- error "Set ${device}.failover.node=\
- $(h2nettype $FAKENID) failed"
+
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD \
+ mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" ||
+ error "Set mdc.*${device}*.import=connection=\
+ $(h2nettype $FAKENID) failed"
+ else
+ do_facet mgs "$PERM_CMD \
+ ${device}.failover.node=$(h2nettype $FAKENID)" ||
+ error "Set ${device}.failover.node=\
+ $(h2nettype $FAKENID) failed"
+ fi
local at_max_saved=0
# adaptive timeouts may prevent seeing the issue
run_test 41c "concurrent mounts of MDT/OST should all fail but one"
test_42() { #bug 14693
+ local PARAM
+
setup
check_mount || error "client was not mounted"
- do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ PARAM="llite.$FSNAME-*.some_wrong_param"
+ else
+ PARAM="$FSNAME.llite.some_wrong_param"
+ fi
+
+ do_facet mgs $PERM_CMD $PARAM=10
umount_client $MOUNT ||
error "unmounting client failed with invalid llite param"
mount_client $MOUNT ||
error "mounting client failed with invalid llite param"
- do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
+ do_facet mgs $PERM_CMD $PARAM=20
cleanup || error "stopping $FSNAME failed with invalid sys param"
setup
check_mount || error "client was not mounted with invalid sys param"
setup
chmod ugo+x $DIR || error "chmod 0 failed"
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.root_squash" \
"$FSNAME.mdt.root_squash" \
"0:0"
wait_update $HOSTNAME \
"$LCTL get_param -n llite.${FSNAME}*.root_squash" \
"0:0" ||
error "check llite root_squash failed!"
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.nosquash_nids" \
"$FSNAME.mdt.nosquash_nids" \
"NONE"
wait_update $HOSTNAME \
"NONE" ||
error "check llite nosquash_nids failed!"
- #
- # create set of test files
- #
- echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
- chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
- chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
+ #
+ # create set of test files
+ #
+ echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
+ chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
+ chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
- echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
- chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
+ echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
+ chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
mkdir $DIR/$tdir-rootdir || error "mkdir failed"
chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
# set root squash UID:GID to RUNAS_ID
# root should be able to access only files owned by RUNAS_ID
#
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.root_squash" \
"$FSNAME.mdt.root_squash" \
"$RUNAS_ID:$RUNAS_ID"
wait_update $HOSTNAME \
local NIDLIST=$($LCTL list_nids all | tr '\n' ' ')
NIDLIST="2@gni $NIDLIST 192.168.0.[2,10]@tcp"
NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.nosquash_nids" \
"$FSNAME-MDTall.mdt.nosquash_nids" \
"$NIDLIST"
wait_update $HOSTNAME \
#second client see all ost's
mount_client $MOUNT2 || error "mount_client failed"
- $SETSTRIPE -c -1 $MOUNT2 || error "$SETSTRIPE -c -1 $MOUNT2 failed"
- $GETSTRIPE $MOUNT2 || error "$GETSTRIPE $MOUNT2 failed"
+ $LFS setstripe -c -1 $MOUNT2 ||
+ error "$LFS setstripe -c -1 $MOUNT2 failed"
+ $LFS getstripe $MOUNT2 || error "$LFS getstripe $MOUNT2 failed"
echo "ok" > $MOUNT2/widestripe
- $GETSTRIPE $MOUNT2/widestripe ||
- error "$GETSTRIPE $MOUNT2/widestripe failed"
+ $LFS getstripe $MOUNT2/widestripe ||
+ error "$LFS getstripe $MOUNT2/widestripe failed"
# fill acl buffer for avoid expand lsm to them
awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd |
while read acl; do
setup_noconfig
check_mount || error "check_mount failed"
- $SETSTRIPE -c -1 $MOUNT || error "$SETSTRIPE -c -1 $MOUNT failed"
- $GETSTRIPE $MOUNT || error "$GETSTRIPE $MOUNT failed"
+ $LFS setstripe -c -1 $MOUNT ||
+ error "$LFS setstripe -c -1 $MOUNT failed"
+ $LFS getstripe $MOUNT || error "$LFS getstripe $MOUNT failed"
echo "ok" > $MOUNT/widestripe
- $GETSTRIPE $MOUNT/widestripe ||
- error "$GETSTRIPE $MOUNT/widestripe failed"
+ $LFS getstripe $MOUNT/widestripe ||
+ error "$LFS getstripe $MOUNT/widestripe failed"
# In the future, we may introduce more EAs, such as selinux, enlarged
# LOV EA, and so on. These EA will use some EA space that is shared by
run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
lazystatfs() {
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
# Test both statfs and lfs df and fail if either one fails
multiop_bg_pause $1 f_
- RC1=$?
+ RC=$?
PID=$!
killall -USR1 multiop
- [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
- wait $PID || { RC1=$?; log "multiop return error "; }
+ [ $RC -ne 0 ] && log "lazystatfs multiop failed"
+ wait $PID || { RC=$?; log "multiop return error "; }
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
$LFS df -l &
PID=$!
sleep 5
- kill -s 0 $PID
- RC2=$?
- if [ $RC2 -eq 0 ]; then
- kill -s 9 $PID
- log "lazystatfs df failed"
+ if kill -s 0 $PID; then
+ RC=1
+ kill -s 9 $PID
+ log "lazystatfs lfs df failed to complete in 5s"
fi
- RC=0
- [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
return $RC
}
# Wait for client to detect down OST
stop_ost || error "Unable to stop OST1"
- wait_osc_import_state mds ost DISCONN
+ wait_osc_import_state client ost DISCONN
+ $LCTL dl
+ log "OSCs should all be DISCONN"
lazystatfs $MOUNT || error "lazystatfs should not return EIO"
wait_osc_import_state mds ost2 FULL
wait_osc_import_ready client ost2
- local PARAM="${FSNAME}-OST0001.osc.active"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ local PARAM="osc.${FSNAME}-OST0001*.active"
+ else
+ local PARAM="${FSNAME}-OST0001.osc.active"
+ fi
- $SETSTRIPE -c -1 $DIR/$tfile || error "$SETSTRIPE failed"
- do_facet mgs $LCTL conf_param $PARAM=0 ||
- error "Unable to deactivate OST"
+ $LFS setstripe -c -1 $DIR/$tfile || error "$LFS setstripe failed"
+ do_facet mgs $PERM_CMD $PARAM=0 || error "Unable to deactivate OST"
umount_client $MOUNT || error "Unable to unmount client"
mount_client $MOUNT || error "Unable to mount client"
# This df should not cause a panic
df -k $MOUNT
- do_facet mgs $LCTL conf_param $PARAM=1 || error "Unable to activate OST"
+ do_facet mgs $PERM_CMD $PARAM=1 || error "Unable to activate OST"
rm -f $DIR/$tfile || error "unable to remove file $DIR/$tfile"
umount_client $MOUNT || error "Unable to unmount client"
stop_ost2 || error "Unable to stop OST2"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
# activatate OSC for OST1
- local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active"
- set_conf_param_and_check client \
- "$TEST" "${FSNAME}-OST0000.osc.active" 1 ||
- error "Unable to activate OST1"
+ set_persistent_param_and_check client \
+ "osc.${FSNAME}-OST0000-osc-[!M]*.active" \
+ "${FSNAME}-OST0000.osc.active" 1
mkdir $DIR/$tdir/2 || error "mkdir $DIR/$tdir/2 failed"
- $SETSTRIPE -c -1 -i 0 $DIR/$tdir/2 ||
- error "$SETSTRIPE $DIR/$tdir/2 failed"
+ $LFS setstripe -c -1 -i 0 $DIR/$tdir/2 ||
+ error "$LFS setstripe $DIR/$tdir/2 failed"
sleep 1 && echo "create a file after OST1 is activated"
- # create some file
- createmany -o $DIR/$tdir/2/$tfile-%d 1
+ # doing some io, shouldn't crash
+ dd if=/dev/zero of=$DIR/$tdir/2/$tfile-io bs=1M count=10
# check OSC import is working
stat $DIR/$tdir/2/* >/dev/null 2>&1 ||
error "some OSC imports are still not connected"
# cleanup
+ rm -rf $DIR/$tdir
umount_client $MOUNT || error "Unable to umount client"
stop_ost2 || error "Unable to stop OST2"
cleanup_nocli || error "cleanup_nocli failed with $?"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
- $LCTL conf_param ${FSNAME}-MDT0000.mdc.active=0 &&
- error "deactive MDC0 succeeds"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ $PERM_CMD mdc.${FSNAME}-MDT0001-mdc-*.active=0 &&
+ error "deactive MDC0 succeeds"
+ else
+ $PERM_CMD ${FSNAME}-MDT0000.mdc.active=0 &&
+ error "deactive MDC0 succeeds"
+ fi
+
# activate MDC for MDT2
- local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active"
- set_conf_param_and_check client \
- "$TEST" "${FSNAME}-MDT0001.mdc.active" 1 ||
- error "Unable to activate MDT2"
+ set_persistent_param_and_check client \
+ "mdc.${FSNAME}-MDT0001-mdc-*.active" \
+ "${FSNAME}-MDT0001.mdc.active" 1
wait_clients_import_state ${CLIENTS:-$HOSTNAME} mds2 FULL
if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.60) ]
rm -rf $DIR/$tdir/2 || error "unlink dir failed"
# deactivate MDC for MDT2
- local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active"
- set_conf_param_and_check client \
- "$TEST" "${FSNAME}-MDT0001.mdc.active" 0 ||
- error "Unable to deactivate MDT2"
+ set_persistent_param_and_check client \
+ "mdc.${FSNAME}-MDT0001-mdc-*.active" \
+ "${FSNAME}-MDT0001.mdc.active" 0
wait_osp_active mds ${FSNAME}-MDT0001 1 0
check_mount || error "check_mount failed"
mkdir $MOUNT/$tdir || error "mkdir $MOUNT/$tdir failed"
- $SETSTRIPE -c -1 $MOUNT/$tdir ||
- error "$SETSTRIPE -c -1 $MOUNT/$tdir failed"
+ $LFS setstripe -c -1 $MOUNT/$tdir ||
+ error "$LFS setstripe -c -1 $MOUNT/$tdir failed"
#define OBD_FAIL_MDS_REINT_DELAY 0x142
do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x142"
touch $MOUNT/$tdir/$tfile &
error "Unable to create temporary file"
sleep 1
- $SETSTRIPE -c -1 -S 1M $DIR/$tdir || error "$SETSTRIPE failed"
+ $LFS setstripe -c -1 -S 1M $DIR/$tdir || error "$LFS setstripe failed"
for (( i=0; i < nrfiles; i++ )); do
multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c ||
setmodopts $modname "$oldvalue"
# Check that $opts took
- tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min")
- tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max")
+ tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" ||
+ echo 0)
tstarted=$(do_facet $facet \
- "$LCTL get_param -n ${paramp}.threads_started")
+ "$LCTL get_param -n ${paramp}.threads_started" || echo 0)
lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' ||
return $?
cleanup
$server_version -lt $(version_code 2.5.11) ]]; then
wait_osc_import_state mds ost1 FULL
wait_osc_import_state mds ost2 FULL
- $SETSTRIPE --stripe-count=-1 $DIR/$tfile ||
+ $LFS setstripe --stripe-count=-1 $DIR/$tfile ||
error "Unable to setstripe $DIR/$tfile"
n=$($LFS getstripe --stripe-count $DIR/$tfile)
[ "$n" -eq 2 ] || error "Stripe count not two: $n"
lxattr=true
for num in $(seq $MDSCOUNT); do
- do_facet mds${num} $TUNE2FS -O large_xattr \
+ do_facet mds${num} $TUNE2FS -O ea_inode \
$(mdsdevname $num) ||
error "tune2fs on mds $num failed"
done
# need to delete this file to avoid problems in other tests
rm -f $file
- stopall || error "stopping systems to turn off large_xattr"
- if $lxattr; then
- for num in $(seq $MDSCOUNT); do
- do_facet mds${num} $TUNE2FS -O ^large_xattr \
- $(mdsdevname $num) ||
- error "tune2fs on mds $num failed"
- done
- fi
+ stopall || error "stopping systems failed"
}
run_test 61 "large xattr"
echo "wrong nids list should not destroy the system"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" &&
error "wrong parse"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "asdfasdf, asdfadf" &&
+ error "wrong parse"
echo "replace OST nid"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID ||
do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" &&
error "wrong parse"
+ local FAKE_NIDS="192.168.0.112@tcp1,192.168.0.112@tcp2"
+ local FAKE_FAILOVER="192.168.0.113@tcp1,192.168.0.113@tcp2"
+ local NIDS_AND_FAILOVER="$MDS_NID,$FAKE_NIDS:$FAKE_FAILOVER"
+ echo "set NIDs with failover"
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $NIDS_AND_FAILOVER ||
+ error "replace nids failed"
+
+
echo "replace MDS nid"
do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID ||
error "replace nids failed"
local ifree=$($LFS df -i $MOUNT | awk '/OST0000/ { print $4 }')
log "On OST0, $ifree inodes available. Want $num_create."
- $SETSTRIPE -i 0 $DIR/$tdir ||
- error "$SETSTRIPE -i 0 $DIR/$tdir failed"
+ $LFS setstripe -i 0 $DIR/$tdir ||
+ error "$LFS setstripe -i 0 $DIR/$tdir failed"
if [ $ifree -lt 10000 ]; then
files=$(( ifree - 50 ))
else
mount_client $MOUNT || error "mount client failed"
touch $DIR/$tdir/$tfile-last || error "create file after reformat"
- local idx=$($GETSTRIPE -i $DIR/$tdir/$tfile-last)
+ local idx=$($LFS getstripe -i $DIR/$tdir/$tfile-last)
[ $idx -ne 0 ] && error "$DIR/$tdir/$tfile-last on $idx not 0" || true
local iused=$($LFS df -i $MOUNT | awk '/OST0000/ { print $3 }')
error "client_cache_count is not saved after remount"
stopall
}
-run_test 76a "set permanent params set_param -P"
+run_test 76a "set permanent params with lctl across mounts"
test_76b() { # LU-4783
[[ $(lustre_version_code mgs) -ge $(version_code 2.5.57) ]] ||
stopall
}
-run_test 76c "verify changelog_mask is applied with set_param -P"
+run_test 76c "verify changelog_mask is applied with lctl set_param -P"
test_76d() { #LU-9399
setupall
stopall
}
-run_test 76d "verify llite.*.xattr_cache can be set by 'set_param -P' correctly"
+run_test 76d "verify llite.*.xattr_cache can be set by 'lctl set_param -P' correctly"
test_77() { # LU-3445
local server_version=$(lustre_version_code $SINGLEMDS)
done
ost_indices=$(comma_list $ost_indices)
- trap "restore_ostindex" EXIT
+ stack_trap "restore_ostindex" EXIT
echo -e "\nFormat $OSTCOUNT OSTs with sparse indices $ost_indices"
OST_INDEX_LIST=[$ost_indices] formatall
error "start ost$i failed"
done
+ # Collect debug information - start of test
+ do_nodes $(comma_list $(mdts_nodes)) \
+ $LCTL get_param osc.*.prealloc_*_id
+
mount_client $MOUNT || error "mount client $MOUNT failed"
wait_osts_up
$LFS df $MOUNT || error "$LFS df $MOUNT failed"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ stack_trap "do_nodes $(comma_list $(mdts_nodes)) \
+ $LCTL get_param osc.*.prealloc_*_id" EXIT
+
# 1. If the file does not exist, new file will be created
# with specified OSTs.
local file=$DIR/$tdir/$tfile-1
- local cmd="$SETSTRIPE -o $ost_indices $file"
+ local cmd="$LFS setstripe -o $ost_indices $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
check_stripe_count $file $OSTCOUNT
# will be attached with specified layout.
file=$DIR/$tdir/$tfile-2
mcreate $file || error "mcreate $file failed"
- cmd="$SETSTRIPE -o $ost_indices $file"
+ cmd="$LFS setstripe -o $ost_indices $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
dd if=/dev/urandom of=$file count=1 bs=1M > /dev/null 2>&1 ||
# be in the OST indices list.
local start_ost_idx=${ost_indices##*,}
file=$DIR/$tdir/$tfile-3
- cmd="$SETSTRIPE -o $ost_indices -i $start_ost_idx $file"
+ cmd="$LFS setstripe -o $ost_indices -i $start_ost_idx $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
check_stripe_count $file $OSTCOUNT
check_start_ost_idx $file $start_ost_idx
file=$DIR/$tdir/$tfile-4
- cmd="$SETSTRIPE"
+ cmd="$LFS setstripe"
cmd+=" -o $(exclude_items_from_list $ost_indices $start_ost_idx)"
cmd+=" -i $start_ost_idx $file"
echo -e "\n$cmd"
eval $cmd && error "index $start_ost_idx should be in $ost_indices"
- # 5. Specifying OST indices for directory should fail with ENOSUPP.
+ # 5. Specifying OST indices for directory should succeed.
local dir=$DIR/$tdir/$tdir
mkdir $dir || error "mkdir $dir failed"
- cmd="$SETSTRIPE -o $ost_indices $dir"
- echo -e "\n$cmd"
- eval $cmd && error "$cmd should fail, specifying OST indices" \
- "for directory is not supported"
+ cmd="$LFS setstripe -o $ost_indices $dir"
+ if [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.11.53) &&
+ $(lustre_version_code client -gt $(version_code 2.11.53)) ]]; then
+ echo -e "\n$cmd"
+ eval $cmd || error "unable to specify OST indices on directory"
+ else
+ echo "need MDS+client version at least 2.11.53"
+ fi
restore_ostindex
}
-run_test 82a "specify OSTs for file (succeed) or directory (fail)"
+run_test 82a "specify OSTs for file (succeed) or directory (succeed)"
cleanup_82b() {
trap 0
# If [--pool|-p <pool_name>] is set with [--ost-list|-o <ost_indices>],
# then the OSTs must be the members of the pool.
local file=$DIR/$tdir/$tfile
- cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file"
+ cmd="$LFS setstripe -p $ost_pool -o $ost_idx_in_list $file"
echo -e "\n$cmd"
eval $cmd && error "OST with index $ost_idx_in_list should be" \
"in OST pool $ost_pool"
# Only select OST $ost_idx_in_list from $ost_pool for file.
ost_idx_in_list=${ost_idx_in_pool#*,}
- cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file"
+ cmd="$LFS setstripe -p $ost_pool -o $ost_idx_in_list $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
- cmd="$GETSTRIPE $file"
+ cmd="$LFS getstripe $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
check_stripe_count $file 2
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ $LFS setstripe -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
error "Create file with 3 components failed"
$TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed"
- i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed"
+ i=$($LFS getstripe -I3 -c $file) || error "get 3rd stripe count failed"
if [ $i -ne $OSTCOUNT ]; then
left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24)
echo -n "Since only $i out $OSTCOUNT OSTs are used, "
start_ost || error "OST0 start fail"
#define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
- do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ do_facet mgs "$LCTL set_param fail_val=10 fail_loc=0x8000090e"
for num in $(seq 2 $MDSCOUNT); do
start_mdt $num &
done
run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
test_101() {
- local createmany_oid
+ local createmany_pid
local dev=$FSNAME-OST0000-osc-MDT0000
setup
- createmany -o $DIR1/$tfile-%d 50000 &
- createmany_oid=$!
+ mkdir $DIR1/$tdir
+ createmany -o $DIR1/$tdir/$tfile-%d 50000 &
+ createmany_pid=$!
# MDT->OST reconnection causes MDT<->OST last_id synchornisation
# via osp_precreate_cleanup_orphans.
for ((i = 0; i < 100; i++)); do
done
ls -asl $MOUNT | grep '???' &&
- (kill -9 $createmany_oid &>/dev/null; \
- error "File hasn't object on OST")
+ { kill -9 $createmany_pid &>/dev/null;
+ error "File has no object on OST"; }
- kill -s 0 $createmany_oid || break
+ kill -s 0 $createmany_pid || break
done
- wait $createmany_oid
+ wait $createmany_pid
+
+ unlinkmany $DIR1/$tdir/$tfile-%d 50000
cleanup
}
run_test 101 "Race MDT->OST reconnection with create"
test_102() {
+ [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.9.53) ]] ||
+ skip "Need server version greater than 2.9.53"
cleanup || error "cleanup failed with $?"
local mds1dev=$(mdsdevname 1)
test_103_set_pool $FSNAME OST0000
- $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 ||
+ $LFS setstripe -p $FSNAME $DIR/$tdir/d0 ||
error "(6) Fail to setstripe on $DIR/$tdir/d0"
if ! combined_mgs_mds ; then
test_103_set_pool $save_fsname OST0001
fi
- $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 ||
+ $LFS setstripe -p $save_fsname $DIR/$tdir/f0 ||
error "(16) Fail to setstripe on $DIR/$tdir/f0"
if ! combined_mgs_mds ; then
umount_mgs_client
test_107() {
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] ||
{ skip "Need MDS version > 2.10.50"; return; }
+ local cmd
start_mgsmds || error "start_mgsmds failed"
start_ost || error "unable to start OST"
# add unknown configuration parameter.
- local PARAM="$FSNAME-OST0000.ost.unknown_param=50"
- do_facet mgs "$LCTL conf_param $PARAM"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ cmd="$PERM_CMD ost.$FSNAME-OST0000*.unknown_param"
+ else
+ cmd="$PERM_CMD $FSNAME-OST0000*.ost.unknown_param"
+ fi
+ do_facet mgs "$cmd=50"
cleanup_nocli || error "cleanup_nocli failed with $?"
load_modules
$rcmd mount -t ldiskfs -o loop $tmp/images/$facet \
$tmp/mnt/$facet ||
error "failed to local mount $facet"
+
$rcmd tar jxf $LUSTRE/tests/zfs_${facet}_2_11.tar.bz2 \
--xattrs --xattrs-include="*.*" \
-C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
test_109_set_params() {
local fsname=$1
- set_conf_param_and_check mds \
- "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ set_persistent_param_and_check mds \
+ "mdd.$fsname-MDT0000.atime_diff" \
"$fsname-MDT0000.mdd.atime_diff" \
"62"
- set_conf_param_and_check mds \
- "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ set_persistent_param_and_check mds \
+ "mdd.$fsname-MDT0000.atime_diff" \
"$fsname-MDT0000.mdd.atime_diff" \
"63"
- set_conf_param_and_check client \
- "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ set_persistent_param_and_check client \
+ "llite.$fsname*.max_read_ahead_mb" \
"$fsname.llite.max_read_ahead_mb" \
"32"
- set_conf_param_and_check client \
- "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ set_persistent_param_and_check client \
+ "llite.$fsname*.max_read_ahead_mb" \
"$fsname.llite.max_read_ahead_mb" \
"64"
create_pool $fsname.pool1 || error "create pool failed"
}
run_test 109b "test lctl clear_conf one config"
+test_110()
+{
+ [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ skip "Only applicable to ldiskfs-based MDTs"
+
+ do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir ||
+ skip "large_dir option is not supported on MDS"
+ do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir ||
+ skip "large_dir option is not supported on OSS"
+
+ stopall # stop all targets before modifying the target counts
+ stack_trap "MDSCOUNT=$MDSCOUNT OSTCOUNT=$OSTCOUNT" EXIT
+ MDSCOUNT=1
+ OSTCOUNT=1
+
+ # ext4_dir_entry_2 struct size:264
+ # dx_root struct size:8
+ # dx_node struct size:8
+ # dx_entry struct size:8
+ # For 1024 bytes block size.
+ # First level directory entries: 126
+ # Second level directory entries: 127
+ # Entries in leaf: 3
+ # For 2 levels limit: 48006
+ # For 3 levels limit : 6096762
+ # Create 80000 files to safely exceed 2-level htree limit.
+ CONF_SANITY_110_LINKS=${CONF_SANITY_110_LINKS:-80000}
+
+ # can fit at most 3 filenames per 1KB leaf block, but each
+ # leaf/index block will only be 3/4 full before split at each level
+ (( MDSSIZE < CONF_SANITY_110_LINKS / 3 * 4/3 * 4/3 )) &&
+ CONF_SANITY_110_LINKS=$((MDSSIZE * 3 * 3/4 * 3/4))
+
+ local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
+ --reformat $(mdsdevname 1) $(mdsvdevname 1)"
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536\\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536 }"
+ fi
+ echo "MDT params: $opts"
+ add mds1 $opts || error "add mds1 failed with new params"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir\\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir }"
+ fi
+ echo "OST params: $opts"
+ add ost1 $opts || error "add ost1 failed with new params"
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS
+
+ MOUNT_2=yes mountcli || error "mount clients failed"
+
+ mkdir -v $DIR/$tdir || error "cannot create $DIR/$tdir"
+ local pids count=0 group=0
+
+ echo "creating $CONF_SANITY_110_LINKS in total"
+ while (( count < CONF_SANITY_110_LINKS )); do
+ local len=$((253 - $(wc -c <<<"$tfile-$group-40000-")))
+ local dir=DIR$((group % 2 + 1))
+ local target=${!dir}/$tdir/$tfile-$group
+ local long=$target-$(generate_name $len)-
+ local create=$((CONF_SANITY_110_LINKS - count))
+
+ (( create > 40000 )) && create=40000
+ touch $target || error "creating $target failed"
+ echo "creating $create hard links to $target"
+ createmany -l $target $long $create &
+ pids+=" $!"
+
+ count=$((count + create))
+ group=$((group + 1))
+ done
+ echo "waiting for PIDs$pids to complete"
+ wait $pids || error "createmany failed after $group groups"
+
+ cleanup
+
+ run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n
+}
+run_test 110 "Adding large_dir with 3-level htree"
+
+test_111() {
+ [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ skip "Only applicable to ldiskfs-based MDTs"
+
+ is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) &&
+ skip "This test can not be executed on flakey dev"
+
+ do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir ||
+ skip "large_dir option is not supported on MDS"
+
+ do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir ||
+ skip "large_dir option is not supported on OSS"
+
+ # cleanup before changing target counts
+ cleanup
+ stack_trap "MDSSIZE=$MDSSIZE MDSCOUNT=$MDSCOUNT OSTCOUNT=$OSTCOUNT" EXIT
+ MDSCOUNT=1
+ OSTCOUNT=1
+ (( MDSSIZE < 2400000 )) && MDSSIZE=2400000 # need at least 2.4GB
+
+ local mdsdev=$(mdsdevname 1)
+
+ local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
+ --reformat $(mdsdevname 1) $(mdsvdevname 1)"
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -i 1048576 \\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -i 1048576 }"
+ fi
+ echo "MDT params: $opts"
+ __touch_device mds 1
+ add mds1 $opts || error "add mds1 failed with new params"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir \\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ --mkfsoptions=\\\"-O large_dir }"
+ fi
+ echo "OST params: $opts"
+ __touch_device ost 1
+ add ost1 $opts || error "add ost1 failed with new params"
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS
+
+ MOUNT_2=yes mountcli
+ mkdir $DIR/$tdir || error "cannot create $DIR/$tdir"
+ lfs df $DIR/$tdir
+ lfs df -i $DIR/$tdir
+
+ local group=0
+
+ local start=$SECONDS
+ local dirsize=0
+ local dirmax=$((2 << 30))
+ local needskip=0
+ local taken=0
+ local rate=0
+ local left=0
+ local num=0
+ while (( !needskip & dirsize < dirmax )); do
+ local pids=""
+
+ for cli in ${CLIENTS//,/ }; do
+ local len=$((253 - $(wc -c <<<"$cli-$group-60000-")))
+ local target=$cli-$group
+ local long=$DIR/$tdir/$target-$(generate_name $len)-
+
+ RPWD=$DIR/$tdir do_node $cli touch $target ||
+ error "creating $target failed"
+ echo "creating 60000 hardlinks to $target"
+ RPWD=$DIR/$tdir do_node $cli createmany -l $target $long 60000 &
+ pids+=" $!"
+
+ group=$((group + 1))
+ target=$cli-$group
+ long=$DIR2/$tdir/$target-$(generate_name $len)-
+
+ RPWD=$DIR2/$tdir do_node $cli touch $target ||
+ error "creating $target failed"
+ echo "creating 60000 hardlinks to $target"
+ RPWD=$DIR2/$tdir do_node $cli createmany -l $target $long 60000 &
+ pids+=" $!"
+
+ group=$((group + 1))
+ done
+ echo "waiting for PIDs$pids to complete"
+ wait $pids || error "createmany failed after $group groups"
+ dirsize=$(stat -c %s $DIR/$tdir)
+ taken=$((SECONDS - start))
+ rate=$((dirsize / taken))
+ left=$(((dirmax - dirsize) / rate))
+ num=$((group * 60000))
+ echo "estimate ${left}s left after $num files / ${taken}s"
+ # if the estimated time remaining is too large (it may change
+ # over time as the create rate is not constant) then exit
+ # without declaring a failure.
+ (( left > 1200 )) && needskip=1
+ done
+
+ cleanup
+
+ (( $needskip )) && skip "ETA ${left}s after $num files / ${taken}s is too long"
+
+ run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n
+}
+run_test 111 "Adding large_dir with over 2GB directory"
+
+
cleanup_115()
{
trap 0
do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
local mds_opts="$(mkfs_opts mds1 ${mdsdev}) --device-size=$IMAGESIZE \
- --mkfsoptions='-O lazy_itable_init,large_xattr,^resize_inode,meta_bg \
+ --mkfsoptions='-O lazy_itable_init,ea_inode,^resize_inode,meta_bg \
-i 1024'"
add mds1 $mds_opts --mgs --reformat $mdsdev ||
{ skip_env "format large MDT failed"; return 0; }
}
run_test 116 "big size MDT support"
+test_117() {
+ setup
+ do_facet ost1 "$LCTL set_param ost.OSS.ost_io.nrs_policies=fifo"
+ do_facet ost1 "$LCTL get_param -n ost.OSS.ost_io.nrs_tbf_rule" &&
+ error "get_param should fail"
+ cleanup || error "cleanup failed with rc $?"
+}
+run_test 117 "lctl get_param return errors properly"
+
+test_120() { # LU-11130
+ [ "$MDSCOUNT" -lt 2 ] && skip "mdt count < 2"
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test"
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] &&
+ skip "Need DNE2 capable MD target with LU-11130 fix"
+
+ setup
+
+ local mds1host=$(facet_active_host mds1)
+ local mds1dev=$(mdsdevname 1)
+
+ $LFS mkdir -i 1 $DIR/$tdir
+ $LFS mkdir -i 0 $DIR/$tdir/mds1dir
+
+ ln -s foo $DIR/$tdir/bar
+ mv $DIR/$tdir/bar $DIR/$tdir/mds1dir/bar2 ||
+ error "cross-target rename failed"
+
+ stopall
+
+ run_e2fsck $mds1host $mds1dev "-n"
+}
+run_test 120 "cross-target rename should not create bad symlinks"
+
+test_122() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] ||
+ { skip "Need OST version at least 2.11.53" && return 0; }
+
+
+ reformat
+ LOAD_MODULES_REMOTE=true load_modules
+#define OBD_FAIL_OFD_SET_OID 0x1e0
+ do_facet ost1 $LCTL set_param fail_loc=0x00001e0
+
+ setupall
+ $LFS mkdir -i1 -c1 $DIR/$tdir
+ $LFS setstripe -i0 -c1 $DIR/$tdir
+ do_facet ost1 $LCTL set_param fail_loc=0
+ createmany -o $DIR/$tdir/file_ 1000 ||
+ error "Fail to create a new sequence"
+
+ reformat
+}
+run_test 122 "Check OST sequence update"
+
+test_123aa() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setupall
+
+ # test old logid format until removal from llog_ioctl.c::str2logid()
+ if [ $MGS_VERSION -lt $(version_code 3.1.53) ]; then
+ do_facet mgs $LCTL dl | grep MGS
+ do_facet mgs "$LCTL --device %MGS llog_print \
+ \\\\\\\$$FSNAME-client 1 10" ||
+ error "old llog_print failed"
+ fi
+
+ # test new logid format
+ if [ $MGS_VERSION -ge $(version_code 2.9.53) ]; then
+ do_facet mgs "$LCTL --device MGS llog_print $FSNAME-client" ||
+ error "new llog_print failed"
+ fi
+}
+run_test 123aa "llog_print works with FIDs and simple names"
+
+test_123ab() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [[ $MGS_VERSION -gt $(version_code 2.11.51) ]] ||
+ skip "Need server with working llog_print support"
+
+ [ -d $MOUNT/.lustre ] || setupall
+
+ local yaml
+ local orig_val
+
+ orig_val=$(do_facet mgs $LCTL get_param jobid_name)
+ do_facet mgs $LCTL set_param -P jobid_name="testname"
+
+ yaml=$(do_facet mgs $LCTL --device MGS llog_print params |
+ grep jobid_name | tail -n 1)
+
+ local param=$(awk '{ print $10 }' <<< "$yaml")
+ local val=$(awk '{ print $12 }' <<< "$yaml")
+ #return to the default
+ do_facet mgs $LCTL set_param -P jobid_name=$orig_val
+ [ $val = "testname" ] || error "bad value: $val"
+ [ $param = "jobid_name," ] || error "Bad param: $param"
+}
+run_test 123ab "llog_print params output values from set_param -P"
+
+test_123ac() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ local start=10
+ local end=50
+
+ [ -d $MOUNT/.lustre ] || setupall
+
+ # - { index: 10, event: add_uuid, nid: 192.168.20.1@tcp(0x20000c0a81401,
+ # node: 192.168.20.1@tcp }
+ do_facet mgs $LCTL --device MGS \
+ llog_print --start $start --end $end $FSNAME-client | tr -d , |
+ while read DASH BRACE INDEX idx EVENT BLAH BLAH BLAH; do
+ (( idx >= start )) || error "llog_print index $idx < $start"
+ (( idx <= end )) || error "llog_print index $idx > $end"
+ done
+}
+run_test 123ac "llog_print with --start and --end"
+
+test_123ad() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ # older versions of lctl may not print all records properly
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ [ -d $MOUNT/.lustre ] || setupall
+
+ # append a new record, to avoid issues if last record was cancelled
+ local old=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$old
+
+ # logid: [0x3:0xa:0x0]:0
+ # flags: 4 (plain)
+ # records_count: 72
+ # last_index: 72
+ local num=$(do_facet mgs $LCTL --device MGS llog_info $FSNAME-client |
+ awk '/last_index:/ { print $2 - 1 }')
+
+ # - { index: 71, event: set_timeout, num: 0x14, param: sys.timeout=20 }
+ local last=$(do_facet mgs $LCTL --device MGS llog_print $FSNAME-client |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( last == num )) || error "llog_print only showed $last/$num records"
+}
+run_test 123ad "llog_print shows all records"
+
+test_123F() {
+ setupall
+ local yaml_file="$TMP/$tfile.yaml"
+ do_facet mgs rm "$yaml_file"
+ local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist" |
+ sed 's/config_log://')
+
+ # set jobid_var to a different value for test
+ local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P jobid_var="testname"
+
+ for i in $cfgfiles params; do
+ do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
+ done
+
+ echo "Unmounting FS"
+ stopall
+ echo "Writeconf"
+ writeconf_all
+ echo "Remounting"
+ mountmgs
+ mountmds
+ mountoss
+ mountcli
+
+ # Reapply the config from before
+ echo "Setting configuration parameters"
+ do_facet mgs "lctl set_param -F $yaml_file"
+
+ local set_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P $orig_val
+
+ [ $set_val == "jobid_var=testname" ] ||
+ error "$set_val is not testname"
+
+ do_facet mgs rm "$yaml_file"
+}
+run_test 123F "clear and reset all parameters using set_param -F"
+
+test_124()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [ -z $mds2failover_HOST ] && skip "needs MDT failover setup" && return
+
+ setup
+ cleanup
+
+ load_modules
+ if combined_mgs_mds; then
+ start_mdt 1 "-o nosvc" ||
+ error "starting mds with nosvc option failed"
+ fi
+ local nid=$(do_facet mds2 $LCTL list_nids | head -1)
+ local failover_nid=$(do_node $mds2failover_HOST $LCTL list_nids | head -1)
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0001 $nid:$failover_nid ||
+ error "replace_nids execution error"
+
+ if combined_mgs_mds; then
+ stop_mdt 1
+ fi
+
+ setup
+ fail mds2
+ echo "lfs setdirstripe"
+ $LFS setdirstripe -i 1 $MOUNT/$tdir || error "setdirstirpe error"
+ echo ok
+}
+run_test 124 "check failover after replace_nids"
+
if ! combined_mgs_mds ; then
stop mgs
fi