ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-8972
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101"
+# bug number for skipped test:
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-is_sles11() # LU-2181
-{
- if [ -r /etc/SuSE-release ]
- then
- local vers=$(grep VERSION /etc/SuSE-release | awk '{print $3}')
- local patchlev=$(grep PATCHLEVEL /etc/SuSE-release |
- awk '{ print $3 }')
- if [ $vers -eq 11 ] && [ $patchlev -eq 2 ]
- then
- return 0
- fi
- fi
- return 1
-}
-
-if [ "$FAILURE_MODE" = "HARD" ]; then
- CONFIG_EXCEPTIONS="24a " &&
- echo "Except the tests: $CONFIG_EXCEPTIONS for " \
- "FAILURE_MODE=$FAILURE_MODE, b=23573" &&
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
-fi
-
# bug number for skipped test:
# a tool to create lustre filesystem images
ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
+if $SHARED_KEY; then
+# bug number for skipped tests: LU-9795 (all below)
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 0 31 32a 32d 35a"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 53a 53b 54b 76a 76b"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 76c 76d 78 103"
+fi
SRCDIR=$(dirname $0)
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
STORED_MDSSIZE=$MDSSIZE
STORED_OSTSIZE=$OSTSIZE
MDSSIZE=200000
+[ $(facet_fstype $SINGLEMDS) = "zfs" ] && MDSSIZE=400000
OSTSIZE=200000
+[ $(facet_fstype ost1) = "zfs" ] && OSTSIZE=400000
fs2mds_HOST=$mds_HOST
fs2ost_HOST=$ost_HOST
[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
# bug number for skipped test:
ALWAYS_EXCEPT="$ALWAYS_EXCEPT"
+# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
init_logging
}
start_mgs () {
- echo "start mgs"
- start mgs $(mgsdevname) $MGS_MOUNT_OPTS
+ echo "start mgs service on $(facet_active_host mgs)"
+ start mgs $(mgsdevname) $MGS_MOUNT_OPTS $@
}
start_mdt() {
}
run_test 9 "test ptldebug and subsystem for mkfs"
-is_blkdev () {
- local facet=$1
- local dev=$2
- local size=${3:-""}
-
- local rc=0
- do_facet $facet "test -b $dev" || rc=1
- if [[ "$size" ]]; then
- local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k \
- count=1 skip=$size 2>&1" |
- awk '($3 == "in") { print $1 }')
- [[ $in = "1+0" ]] || rc=1
- fi
- return $rc
-}
-
#
# Test 16 was to "verify that lustre will correct the mode of OBJECTS".
# But with new MDS stack we don't care about the mode of local objects
[ -e $DIR/$tfile ] && error "$DIR/$tfile exists incorrectly"
remount_client rw $MOUNT || error "remount_client with rw failed"
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
- MCNT=$(grep -c $MOUNT /etc/mtab)
+ MCNT=$(grep -c $MOUNT' ' /etc/mtab)
[ "$MCNT" -ne 1 ] && error "$MOUNT in /etc/mtab $MCNT times"
umount_client $MOUNT
stop_mds || error "Unable to stop MDS"
fi
mount_client $MOUNT || error "mount_client $MOUNT failed"
wait_osc_import_state mds ost FULL
- wait_osc_import_state client ost FULL
+ wait_osc_import_ready client ost
check_mount || error "check_mount failed"
pass
"MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
ps -ef | grep mount
fi
- stop_mds || error "stopping MDSes failed"
- stop_ost || error "stopping OSSes failed"
+ cleanup || error "cleanup failed with rc $?"
}
run_test 23a "interrupt client during recovery mount delay"
-umount_client $MOUNT
-cleanup_nocli
-
test_23b() { # was test_23
start_mds || error "MDS start failed"
start_ost || error "Unable to start OST1"
start_mds || error "Unable to start MDS"
echo "Requeue thread should have started: "
ps -e | grep ll_cfg_requeue
- set_conf_param_and_check ost1 \
- "$LCTL get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \
- "$FSNAME-OST0000.ost.client_cache_seconds" ||
- error "set_conf_param_and_check ost1 failed"
+ set_persistent_param_and_check ost1 \
+ "obdfilter.$FSNAME-OST0000.client_cache_seconds" \
+ "$FSNAME-OST0000.ost.client_cache_seconds"
cleanup_nocli || error "cleanup_nocli failed with rc $?"
}
run_test 27a "Reacquire MGS lock if OST started first"
awk '($3 ~ "mdt" && $4 ~ "MDT0000") { print $4 }')
facet_failover $SINGLEMDS
- set_conf_param_and_check $SINGLEMDS \
- "$LCTL get_param -n mdt.$device.identity_acquire_expire" \
- "$device.mdt.identity_acquire_expire" ||
- error "set_conf_param_and_check $SINGLEMDS failed"
- set_conf_param_and_check client \
- "$LCTL get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\
- "$device.mdc.max_rpcs_in_flight" ||
- error "set_conf_param_and_check client failed"
+ set_persistent_param_and_check $SINGLEMDS \
+ "mdt.$device.identity_acquire_expire" \
+ "$device.mdt.identity_acquire_expire"
+ set_persistent_param_and_check client \
+ "mdc.$device-mdc-*.max_rpcs_in_flight" \
+ "$device.mdc.max_rpcs_in_flight"
check_mount
cleanup || error "cleanup failed with $?"
}
run_test 27b "Reacquire MGS lock after failover"
-test_28() {
+test_28A() { # was test_28
setup
- TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
+ TEST="llite.$FSNAME-*.max_read_ahead_whole_mb"
PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
- ORIG=$($TEST)
+ ORIG=$($LCTL get_param -n $TEST)
FINAL=$(($ORIG + 1))
- set_conf_param_and_check client "$TEST" "$PARAM" $FINAL ||
- error "first set_conf_param_and_check client failed"
+ set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL
FINAL=$(($FINAL + 1))
- set_conf_param_and_check client "$TEST" "$PARAM" $FINAL ||
- error "second set_conf_param_and_check client failed"
+ set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- RESULT=$($TEST)
+ RESULT=$($LCTL get_param -n $TEST)
if [ $RESULT -ne $FINAL ]; then
error "New config not seen: wanted $FINAL got $RESULT"
else
echo "New config success: got $RESULT"
fi
- set_conf_param_and_check client "$TEST" "$PARAM" $ORIG ||
- error "third set_conf_param_and_check client failed"
+ set_persistent_param_and_check client "$TEST" "$PARAM" $ORIG
cleanup || error "cleanup failed with rc $?"
}
-run_test 28 "permanent parameter setting"
+run_test 28A "permanent parameter setting"
test_28a() { # LU-4221
[[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] ||
setup
# In this test we will set three kinds of proc parameters with
- # lctl conf_param:
- # 1. the ones moved from the OFD to the OSD, and only their
- # symlinks kept in obdfilter
- # 2. non-symlink ones in the OFD
- # 3. non-symlink ones in the OSD
+ # lctl set_param -P or lctl conf_param:
+ # 1. non-symlink ones in the OFD
+ # 2. non-symlink ones in the OSD
# Check 1.
- # prepare a symlink parameter in the OFD
- name="writethrough_cache_enable"
- param="$device.ost.$name"
- cmd="$LCTL get_param -n obdfilter.$device.$name"
-
- # conf_param the symlink parameter in the OFD
- old=$(do_facet ost1 $cmd)
- new=$(((old + 1) % 2))
- set_conf_param_and_check ost1 "$cmd" "$param" $new ||
- error "lctl conf_param $device.ost.$param=$new failed"
-
- # conf_param the target parameter in the OSD
- param="$device.osd.$name"
- cmd="$LCTL get_param -n osd-*.$device.$name"
- set_conf_param_and_check ost1 "$cmd" "$param" $old ||
- error "lctl conf_param $device.osd.$param=$old failed"
-
- # Check 2.
# prepare a non-symlink parameter in the OFD
name="client_cache_seconds"
param="$device.ost.$name"
- cmd="$LCTL get_param -n obdfilter.$device.$name"
+ cmd="obdfilter.$device.$name"
- # conf_param the parameter in the OFD
- old=$(do_facet ost1 $cmd)
+ # permanently setting the parameter in the OFD
+ old=$(do_facet ost1 $LCTL get_param -n $cmd)
new=$((old * 2))
- set_conf_param_and_check ost1 "$cmd" "$param" $new ||
- error "lctl conf_param $device.ost.$param=$new failed"
- set_conf_param_and_check ost1 "$cmd" "$param" $old ||
- error "lctl conf_param $device.ost.$param=$old failed"
+ set_persistent_param_and_check ost1 "$cmd" "$param" $new
+ set_persistent_param_and_check ost1 "$cmd" "$param" $old
- # Check 3.
+ # Check 2.
# prepare a non-symlink parameter in the OSD
name="auto_scrub"
param="$device.osd.$name"
- cmd="$LCTL get_param -n osd-*.$device.$name"
+ cmd="osd-*.$device.$name"
# conf_param the parameter in the OSD
- old=$(do_facet ost1 $cmd)
+ old=$(do_facet ost1 $LCTL get_param -n $cmd)
new=$(((old + 1) % 2))
- set_conf_param_and_check ost1 "$cmd" "$param" $new ||
- error "lctl conf_param $device.osd.$param=$new failed"
- set_conf_param_and_check ost1 "$cmd" "$param" $old ||
- error "lctl conf_param $device.osd.$param=$old failed"
+ set_persistent_param_and_check ost1 "$cmd" "$param" $new
+ set_persistent_param_and_check ost1 "$cmd" "$param" $old
cleanup || error "cleanup failed with $?"
}
-run_test 28a "set symlink parameters permanently with conf_param"
+run_test 28a "set symlink parameters permanently with lctl"
test_29() {
[ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return
sleep 10
local PARAM="$FSNAME-OST0001.osc.active"
- local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
- local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
+ # With lctl set_param -P the value $PROC_ACT will be sent to
+ # all nodes. The [!M] filter out the ability to set active
+ # on the MDS servers which is tested with wait_osp_* below.
+ # For ost_server_uuid that only exist on client so filtering
+ # is safe.
+ local PROC_ACT="osc.$FSNAME-OST0001-osc-*.active"
+ local PROC_UUID="osc.$FSNAME-OST0001-osc-[!M]*.ost_server_uuid"
ACTV=$($LCTL get_param -n $PROC_ACT)
DEAC=$((1 - $ACTV))
- set_conf_param_and_check client \
- "$LCTL get_param -n $PROC_ACT" "$PARAM" $DEAC ||
- error "set_conf_param_and_check client failed"
+ set_persistent_param_and_check client $PROC_ACT $PARAM $DEAC
# also check ost_server_uuid status
RESULT=$($LCTL get_param -n $PROC_UUID | grep DEACTIV)
if [ -z "$RESULT" ]; then
# test new client starts deactivated
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- RESULT=$($LCTL get_param -n $PROC_UUID | grep DEACTIV | grep NEW)
- if [ -z "$RESULT" ]; then
- error "New client start active: $(lctl get_param -n $PROC_UUID)"
- else
- echo "New client success: got $RESULT"
- fi
+
+ # the 2nd and 3rd field of ost_server_uuid do not update at the same
+ # time when using lctl set_param -P
+ wait_update_facet client \
+ "$LCTL get_param -n $PROC_UUID | awk '{print \\\$3 }'" \
+ "DEACTIVATED" ||
+ error "New client start active: $($LCTL get_param -n $PROC_UUID)"
+
+ echo "New client success: got '$($LCTL get_param -n $PROC_UUID)'"
# make sure it reactivates
- set_conf_param_and_check client \
- "$LCTL get_param -n $PROC_ACT" "$PARAM" $ACTV ||
- error "lctl get_param $PROC_ACT $PARAM $ACTV failed"
+ set_persistent_param_and_check client $PROC_ACT $PARAM $ACTV
umount_client $MOUNT
stop_ost2 || error "Unable to stop OST2"
setup
echo Big config llog
- TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
- ORIG=$($TEST)
+ TEST="llite.$FSNAME-*.max_read_ahead_whole_mb"
+ ORIG=$($LCTL get_param -n $TEST)
LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
for i in ${LIST[@]}; do
- set_conf_param_and_check client "$TEST" \
- "$FSNAME.llite.max_read_ahead_whole_mb" $i ||
- error "Set $FSNAME.llite.max_read_ahead_whole_mb failed"
+ set_persistent_param_and_check client "$TEST" \
+ "$FSNAME.llite.max_read_ahead_whole_mb" $i
done
# make sure client restart still works
umount_client $MOUNT
mount_client $MOUNT || error "mount_client $MOUNT failed"
- [ "$($TEST)" -ne "$i" ] &&
+ [ "$($LCTL get_param -n $TEST)" -ne "$i" ] &&
error "Param didn't stick across restart $($TEST) != $i"
pass
echo Erase parameter setting
- do_facet mgs "$LCTL conf_param \
- -d $FSNAME.llite.max_read_ahead_whole_mb" ||
- error "Erase param $FSNAME.llite.max_read_ahead_whole_mb failed"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD -d $TEST" ||
+ error "Erase param $TEST failed"
+ else
+ do_facet mgs "$PERM_CMD \
+ -d $FSNAME.llite.max_read_ahead_whole_mb" ||
+ error "Erase param $FSNAME.llite.max_read_ahead_whole_mb failed"
+ fi
umount_client $MOUNT
mount_client $MOUNT || error "mount_client $MOUNT failed"
- FINAL=$($TEST)
+ FINAL=$($LCTL get_param -n $TEST)
echo "deleted (default) value=$FINAL, orig=$ORIG"
# assumes this parameter started at the default value
[ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
cleanup || error "cleanup failed with rc $?"
}
-run_test 30a "Big config llog and conf_param deletion"
+run_test 30a "Big config llog and permanent parameter deletion"
test_30b() {
setup
local TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
- set_conf_param_and_check client "$TEST" \
- "$FSNAME-OST0000.failover.node" $NEW ||
- error "didn't add failover nid $NEW"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ PARAM="osc.$FSNAME-OST0000-osc-[^M]*.import"
+ echo "Setting $PARAM from $TEST to $NEW"
+ do_facet mgs "$PERM_CMD $PARAM='connection=$NEW'" ||
+ error "$PERM_CMD $PARAM failed"
+ else
+ PARAM="$FSNAME-OST0000.failover.node"
+ echo "Setting $PARAM from $TEST to $NEW"
+ do_facet mgs "$PERM_CMD $PARAM='$NEW'" ||
+ error "$PARAM $PARAM failed"
+ fi
+ wait_update_facet client "$TEST" "$NEW" ||
+ error "check $PARAM failed!"
+
local NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids)
- echo $NIDS
local NIDCOUNT=$(echo "$NIDS" | wc -w)
echo "should have $((orignidcount + 1)) entries \
in failover nids string, have $NIDCOUNT"
[ $NIDCOUNT -eq $((orignidcount + 1)) ] ||
error "Failover nid not added"
- do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" ||
- error "conf_param delete failed"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD -d osc.$FSNAME-OST0000-osc-*.import"
+ else
+ do_facet mgs "$PERM_CMD -d $FSNAME-OST0000.failover.node" ||
+ error "$PERM_CMD delete failed"
+ fi
umount_client $MOUNT
mount_client $MOUNT || error "mount_client $MOUNT failed"
NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids)
- echo $NIDS
NIDCOUNT=$(echo "$NIDS" | wc -w)
echo "only $orignidcount final entries should remain \
in failover nids string, have $NIDCOUNT"
local node=$1
local all_removed=false
local i=0
+ local fstype=$(facet_fstype $SINGLEMDS)
+
+ [ $fstype == "zfs" ] && do_rpc_nodes $node "service zed stop"
while ((i < 20)); do
echo "Unloading modules on $node: Attempt $i"
- do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) &&
+ do_rpc_nodes $node $LUSTRE_RMMOD $fstype &&
all_removed=true
do_rpc_nodes $node check_mem_leak || return 1
if $all_removed; then
do_rpc_nodes $node load_modules
return 0
fi
+ if [ $fstype == "zfs" ]; then
+ do_rpc_nodes $node "$ZPOOL status -v"
+ fi
sleep 5
i=$((i + 1))
done
}
t32_verify_quota() {
- local node=$1
+ local facet=$1
local fsname=$2
local mnt=$3
local fstype=$(facet_fstype $SINGLEMDS)
# verification in 32b. The object quota usage should be accurate after
# zfs-0.7.0 is released.
[ $fstype == "zfs" ] && {
- local zfs_version=$(do_node $node cat /sys/module/zfs/version)
+ local zfs_version=$(do_facet $facet cat /sys/module/zfs/version)
[ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && {
echo "Skip quota verify for zfs: $zfs_version"
return 1
}
- do_node $node $LCTL conf_param $fsname.quota.mdt=ug
- cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000"
- cmd=$cmd.quota_slave.enabled
- wait_update $node "$cmd" "ug" || {
- echo "Enable mdt quota failed"
- return 1
- }
+ set_persistent_param_and_check $facet \
+ "osd-$fstype.$fsname-MDT0000.quota_slave.enabled" \
+ "$fsname.quota.mdt" ug
- do_node $node $LCTL conf_param $fsname.quota.ost=ug
- cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000"
- cmd=$cmd.quota_slave.enabled
- wait_update $node "$cmd" "ug" || {
- echo "Enable ost quota failed"
- return 1
- }
+ set_persistent_param_and_check $facet \
+ "osd-$fstype.$fsname-OST0000.quota_slave.enabled" \
+ "$fsname.quota.ost" ug
chmod 0777 $mnt
runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
return 1
fi
- $r $LCTL conf_param $fsname-OST0000.osc.max_dirty_mb=15 || {
- error_noexit "Setting \"max_dirty_mb\""
- return 1
- }
- $r $LCTL conf_param $fsname-OST0000.failover.node=$nid || {
- error_noexit "Setting OST \"failover.node\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
- error_noexit "Setting \"max_rpcs_in_flight\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0000.failover.node=$nid || {
- error_noexit "Setting MDT \"failover.node\""
- return 1
- }
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ $r $PERM_CMD osc.$fsname-OST0000*.import=connection=$nid || {
+ error_noexit "Setting OST \"failover.node\""
+ return 1
+ }
+ $r $PERM_CMD mdc.$fsname-MDT0000*.import=connection=$nid || {
+ error_noexit "Setting MDT \"failover.node\""
+ return 1
+ }
+ $r $PERM_CMD osc.$fsname-OST0000-*.max_dirty_mb=15 || {
+ error_noexit "Setting \"max_dirty_mb\""
+ return 1
+ }
+ $r $PERM_CMD mdc.$fsname-MDT0000-*.max_rpcs_in_flight=9 || {
+ error_noexit "Setting \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD lov.$fsname-MDT0000-*.stripesize=4M || {
+ error_noexit "Setting \"lov.stripesize\""
+ return 1
+ }
+ $r $PERM_CMD mdd.$fsname-MDT0000-*.atime_diff=70 || {
+ error_noexit "Setting \"mdd.atime_diff\""
+ return 1
+ }
+ else
+ $r $PERM_CMD $fsname-OST0000.failover.node=$nid || {
+ error_noexit "Setting OST \"failover.node\""
+ return 1
+ }
+
+ $r $PERM_CMD $fsname-MDT0000.failover.node=$nid || {
+ error_noexit "Setting MDT \"failover.node\""
+ return 1
+ }
+
+ $r $PERM_CMD $fsname-OST0000.osc.max_dirty_mb=15 || {
+ error_noexit "Setting \"max_dirty_mb\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
+ error_noexit "Setting \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0000.lov.stripesize=4M || {
+ error_noexit "Setting \"lov.stripesize\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0000.mdd.atime_diff=70 || {
+ error_noexit "Setting \"mdd.atime_diff\""
+ return 1
+ }
+ fi
+
$r $LCTL pool_new $fsname.interop || {
error_noexit "Setting \"interop\""
return 1
}
- $r $LCTL conf_param $fsname-MDT0000.lov.stripesize=4M || {
- error_noexit "Setting \"lov.stripesize\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0000.mdd.atime_diff=70 || {
- error_noexit "Setting \"mdd.atime_diff\""
- return 1
- }
if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then
$r $LCTL lfsck_start -M $fsname-OST0000 || {
fi
if [ "$dne_upgrade" != "no" ]; then
- $r $LCTL conf_param \
- $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
- error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || {
- error_noexit "Setting MDT1 \"failover.node\""
- return 1
- }
- $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || {
- error_noexit "Setting MDT1 \"lov.stripesize\""
- return 1
- }
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ $r $PERM_CMD mdc.$fsname-MDT0001*.import=connection=$nid || {
+ error_noexit "Setting MDT1 \"failover.node\""
+ return 1
+ }
+
+ $r $PERM_CMD mdc.$fsname-MDT0001-*.max_rpcs_in_flight=9 || {
+ error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD lov.$fsname-MDT0001-*.stripesize=4M || {
+ error_noexit "Setting MDT1 \"lov.stripesize\""
+ return 1
+ }
+ else
+ $r $PERM_CMD $fsname-MDT0001.failover.node=$nid || {
+ error_noexit "Setting MDT1 \"failover.node\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
+ error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $PERM_CMD $fsname-MDT0001.lov.stripesize=4M || {
+ error_noexit "Setting MDT1 \"lov.stripesize\""
+ return 1
+ }
+ fi
fi
if [ "$writeconf" ]; then
shall_cleanup_lustre=true
$r $LCTL set_param debug="$PTLDEBUG"
+ # Leave re-enabling this to a separate patch for LU-11558
+ # t32_verify_quota $SINGLEMDS $fsname $tmp/mnt/lustre || {
+ # error_noexit "verify quota failed"
+ # return 1
+ #}
+
if $r test -f $tmp/list; then
#
# There is not a Test Framework API to copy files to or
}
rm $tmp/mnt/lustre/dom
- $r $LCTL get_param -n lod.*MDT0000*.dom_stripesize || {
- error_noexit "Getting \"dom_stripesize\""
- return 1
- }
- $r $LCTL conf_param \
- $fsname-MDT0000.lod.dom_stripesize=0 || {
+ set_persistent_param_and_check mds \
+ "lod.*MDT0000*.dom_stripesize" \
+ "$fsname-MDT0000.lod.dom_stripesize" 0 || {
error_noexit "Changing \"dom_stripesize\""
return 1
}
- wait_update $(facet_host mds) "$LCTL get_param \
- -n lod.*MDT0000*.dom_stripesize" 0 || {
- error_noexit "Verifying \"dom_stripesize\""
- return 1
- }
fi
if [ "$dne_upgrade" != "no" ]; then
mdt_index=$($LFS getdirstripe -i $dir)
stripe_cnt=$($LFS getdirstripe -c $dir)
if [ $mdt_index = 0 -a $stripe_cnt -le 1 ]; then
- $LFS mv -M 1 $dir || {
+ $LFS migrate -m 1 $dir || {
popd
error_noexit "migrate MDT1 failed"
return 1
mdt_index=$($LFS getdirstripe -i $dir)
stripe_cnt=$($LFS getdirstripe -c $dir)
if [ $mdt_index = 1 -a $stripe_cnt -le 1 ]; then
- $LFS mv -M 0 $dir || {
+ $LFS migrate -m 0 $dir || {
popd
error_noexit "migrate MDT0 failed"
return 1
return 1
}
nrpcs=$((nrpcs_orig + 5))
- $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=$nrpcs || {
+
+ set_persistent_param_and_check client \
+ "mdc.$fsname-MDT0000*.max_rpcs_in_flight" \
+ "$fsname-MDT0000.mdc.max_rpcs_in_flight" $nrpcs || {
error_noexit "Changing \"max_rpcs_in_flight\""
return 1
}
- wait_update $HOSTNAME "$LCTL get_param \
- -n mdc.*MDT0000*.max_rpcs_in_flight" $nrpcs || {
- error_noexit "Verifying \"max_rpcs_in_flight\""
- return 1
- }
umount $tmp/mnt/lustre || {
error_noexit "Unmounting the client"
error_noexit "Unmounting the MDT2"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-mdt2"
+ fi
shall_cleanup_mdt1=false
fi
error_noexit "Unmounting the MDT"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-mdt1"
+ fi
shall_cleanup_mdt=false
$r $UMOUNT $tmp/mnt/ost || {
error_noexit "Unmounting the OST"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-ost1"
+ fi
shall_cleanup_ost=false
t32_reload_modules $node || {
run_test 32d "convert ff test"
test_32e() {
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.56) ]] ||
+ { skip "Need MDS version at least 2.10.56"; return 0; }
+
local tarballs
local tarball
local rc=0
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
- do_facet mgs "$LCTL conf_param $FSNAME2.sys.timeout=200" ||
- error "$LCTL conf_param $FSNAME2.sys.timeout=200 failed"
+
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD timeout=200" ||
+ error "$PERM_CMD timeout=200 failed"
+ else
+ do_facet mgs "$PERM_CMD $FSNAME2.sys.timeout=200" ||
+ error "$PERM_CMD $FSNAME2.sys.timeout=200 failed"
+ fi
mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed"
$MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed"
echo "ok."
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet mgs "$LCTL conf_param \
- ${device}.failover.node=$(h2nettype $FAKENID)" ||
- error "Setting ${device}.failover.node=\
- $(h2nettype $FAKENID) failed."
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD \
+ mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" ||
+ error "Setting mdc.*${device}*.import=connection=\
+ $(h2nettype $FAKENID) failed."
+ else
+ do_facet mgs "$PERM_CMD \
+ ${device}.failover.node=$(h2nettype $FAKENID)" ||
+ error "Setting ${device}.failover.node=\
+ $(h2nettype $FAKENID) failed."
+ fi
log "Wait for RECONNECT_INTERVAL seconds (10s)"
sleep 10
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet mgs "$LCTL conf_param \
- ${device}.failover.node=$(h2nettype $FAKENID)" ||
- error "Set ${device}.failover.node=\
- $(h2nettype $FAKENID) failed"
+
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs "$PERM_CMD \
+ mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" ||
+ error "Set mdc.*${device}*.import=connection=\
+ $(h2nettype $FAKENID) failed"
+ else
+ do_facet mgs "$PERM_CMD \
+ ${device}.failover.node=$(h2nettype $FAKENID)" ||
+ error "Set ${device}.failover.node=\
+ $(h2nettype $FAKENID) failed"
+ fi
local at_max_saved=0
# adaptive timeouts may prevent seeing the issue
run_test 41c "concurrent mounts of MDT/OST should all fail but one"
test_42() { #bug 14693
+ local PARAM
+
setup
check_mount || error "client was not mounted"
- do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ PARAM="llite.$FSNAME-*.some_wrong_param"
+ else
+ PARAM="$FSNAME.llite.some_wrong_param"
+ fi
+
+ do_facet mgs $PERM_CMD $PARAM=10
umount_client $MOUNT ||
error "unmounting client failed with invalid llite param"
mount_client $MOUNT ||
error "mounting client failed with invalid llite param"
- do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
+ do_facet mgs $PERM_CMD $PARAM=20
cleanup || error "stopping $FSNAME failed with invalid sys param"
setup
check_mount || error "client was not mounted with invalid sys param"
setup
chmod ugo+x $DIR || error "chmod 0 failed"
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.root_squash" \
"$FSNAME.mdt.root_squash" \
"0:0"
wait_update $HOSTNAME \
"$LCTL get_param -n llite.${FSNAME}*.root_squash" \
"0:0" ||
error "check llite root_squash failed!"
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.nosquash_nids" \
"$FSNAME.mdt.nosquash_nids" \
"NONE"
wait_update $HOSTNAME \
"NONE" ||
error "check llite nosquash_nids failed!"
- #
- # create set of test files
- #
- echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
- chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
- chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
+ #
+ # create set of test files
+ #
+ echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
+ chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
+ chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
- echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
- chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
+ echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
+ chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
mkdir $DIR/$tdir-rootdir || error "mkdir failed"
chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
# set root squash UID:GID to RUNAS_ID
# root should be able to access only files owned by RUNAS_ID
#
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.root_squash" \
"$FSNAME.mdt.root_squash" \
"$RUNAS_ID:$RUNAS_ID"
wait_update $HOSTNAME \
local NIDLIST=$($LCTL list_nids all | tr '\n' ' ')
NIDLIST="2@gni $NIDLIST 192.168.0.[2,10]@tcp"
NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
- set_conf_param_and_check mds1 \
- "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ set_persistent_param_and_check mds1 \
+ "mdt.$FSNAME-MDT0000.nosquash_nids" \
"$FSNAME-MDTall.mdt.nosquash_nids" \
"$NIDLIST"
wait_update $HOSTNAME \
--reformat $fs2mgsdev $fs2mgsvdev || error "add fs2mgs failed"
start $fs2mgs $fs2mgsdev $MGS_MOUNT_OPTS || error "start fs2mgs failed"
stop $fs2mgs -f || error "stop fs2mgs failed"
+ cleanup || error "cleanup failed with $?"
}
run_test 43b "parse nosquash_nids with commas in expr_list"
-umount_client $MOUNT
-cleanup_nocli
-
test_44() { # 16317
setup
check_mount || error "check_mount"
# wait until osts in sync
for (( i=2; i<=$OSTCOUNT; i++ )); do
wait_osc_import_state mds ost$i FULL
- wait_osc_import_state client ost$i FULL
+ wait_osc_import_ready client ost$i
done
#second client see all ost's
run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
lazystatfs() {
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
# Test both statfs and lfs df and fail if either one fails
multiop_bg_pause $1 f_
- RC1=$?
+ RC=$?
PID=$!
killall -USR1 multiop
- [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
- wait $PID || { RC1=$?; log "multiop return error "; }
+ [ $RC -ne 0 ] && log "lazystatfs multiop failed"
+ wait $PID || { RC=$?; log "multiop return error "; }
- $LFS df &
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
+ $LFS df -l &
PID=$!
sleep 5
- kill -s 0 $PID
- RC2=$?
- if [ $RC2 -eq 0 ]; then
- kill -s 9 $PID
- log "lazystatfs df failed"
+ if kill -s 0 $PID; then
+ RC=1
+ kill -s 9 $PID
+ log "lazystatfs lfs df failed to complete in 5s"
fi
- RC=0
- [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
return $RC
}
# Wait for client to detect down OST
stop_ost || error "Unable to stop OST1"
- wait_osc_import_state mds ost DISCONN
+ wait_osc_import_state client ost DISCONN
+ $LCTL dl
+ log "OSCs should all be DISCONN"
lazystatfs $MOUNT || error "lazystatfs should not return EIO"
setup
start_ost2 || error "Unable to start OST2"
wait_osc_import_state mds ost2 FULL
- wait_osc_import_state client ost2 FULL
+ wait_osc_import_ready client ost2
- local PARAM="${FSNAME}-OST0001.osc.active"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ local PARAM="osc.${FSNAME}-OST0001*.active"
+ else
+ local PARAM="${FSNAME}-OST0001.osc.active"
+ fi
$SETSTRIPE -c -1 $DIR/$tfile || error "$SETSTRIPE failed"
- do_facet mgs $LCTL conf_param $PARAM=0 ||
- error "Unable to deactivate OST"
+ do_facet mgs $PERM_CMD $PARAM=0 || error "Unable to deactivate OST"
umount_client $MOUNT || error "Unable to unmount client"
mount_client $MOUNT || error "Unable to mount client"
# This df should not cause a panic
df -k $MOUNT
- do_facet mgs $LCTL conf_param $PARAM=1 || error "Unable to activate OST"
+ do_facet mgs $PERM_CMD $PARAM=1 || error "Unable to activate OST"
rm -f $DIR/$tfile || error "unable to remove file $DIR/$tfile"
umount_client $MOUNT || error "Unable to unmount client"
stop_ost2 || error "Unable to stop OST2"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
# activatate OSC for OST1
- local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active"
- set_conf_param_and_check client \
- "$TEST" "${FSNAME}-OST0000.osc.active" 1 ||
- error "Unable to activate OST1"
+ set_persistent_param_and_check client \
+ "osc.${FSNAME}-OST0000-osc-[!M]*.active" \
+ "${FSNAME}-OST0000.osc.active" 1
mkdir $DIR/$tdir/2 || error "mkdir $DIR/$tdir/2 failed"
$SETSTRIPE -c -1 -i 0 $DIR/$tdir/2 ||
error "$SETSTRIPE $DIR/$tdir/2 failed"
sleep 1 && echo "create a file after OST1 is activated"
- # create some file
- createmany -o $DIR/$tdir/2/$tfile-%d 1
+ # doing some io, shouldn't crash
+ dd if=/dev/zero of=$DIR/$tdir/2/$tfile-io bs=1M count=10
# check OSC import is working
stat $DIR/$tdir/2/* >/dev/null 2>&1 ||
error "some OSC imports are still not connected"
# cleanup
+ rm -rf DIR/$tdir
umount_client $MOUNT || error "Unable to umount client"
stop_ost2 || error "Unable to stop OST2"
cleanup_nocli || error "cleanup_nocli failed with $?"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
- $LCTL conf_param ${FSNAME}-MDT0000.mdc.active=0 &&
- error "deactive MDC0 succeeds"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ $PERM_CMD mdc.${FSNAME}-MDT0001-mdc-*.active=0 &&
+ error "deactive MDC0 succeeds"
+ else
+ $PERM_CMD ${FSNAME}-MDT0000.mdc.active=0 &&
+ error "deactive MDC0 succeeds"
+ fi
+
# activate MDC for MDT2
- local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active"
- set_conf_param_and_check client \
- "$TEST" "${FSNAME}-MDT0001.mdc.active" 1 ||
- error "Unable to activate MDT2"
+ set_persistent_param_and_check client \
+ "mdc.${FSNAME}-MDT0001-mdc-*.active" \
+ "${FSNAME}-MDT0001.mdc.active" 1
wait_clients_import_state ${CLIENTS:-$HOSTNAME} mds2 FULL
if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.60) ]
rm -rf $DIR/$tdir/2 || error "unlink dir failed"
# deactivate MDC for MDT2
- local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active"
- set_conf_param_and_check client \
- "$TEST" "${FSNAME}-MDT0001.mdc.active" 0 ||
- error "Unable to deactivate MDT2"
+ set_persistent_param_and_check client \
+ "mdc.${FSNAME}-MDT0001-mdc-*.active" \
+ "${FSNAME}-MDT0001.mdc.active" 0
wait_osp_active mds ${FSNAME}-MDT0001 1 0
done
echo
+ # sync all the data and make sure no pending data on the client,
+ # thus the SOM xattr would not be changed any more.
+ cancel_lru_locks osc
+
# backup files
echo backup files to $TMP/$tdir
local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
setmodopts $modname "$oldvalue"
# Check that $opts took
- tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min")
- tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max")
+ tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" ||
+ echo 0)
tstarted=$(do_facet $facet \
- "$LCTL get_param -n ${paramp}.threads_started")
+ "$LCTL get_param -n ${paramp}.threads_started" || echo 0)
lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' ||
return $?
cleanup
setup_noconfig
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
createmany -o $DIR/$tdir/$tfile-%d 100
- # make sure that OSTs do not cancel llog cookies before we unmount the MDS
-#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x601"
unlinkmany $DIR/$tdir/$tfile-%d 100
stop_mds || error "Unable to stop MDS"
lxattr=true
for num in $(seq $MDSCOUNT); do
- do_facet mds${num} $TUNE2FS -O large_xattr \
+ do_facet mds${num} $TUNE2FS -O ea_inode \
$(mdsdevname $num) ||
error "tune2fs on mds $num failed"
done
# need to delete this file to avoid problems in other tests
rm -f $file
- stopall || error "stopping systems to turn off large_xattr"
- if $lxattr; then
- for num in $(seq $MDSCOUNT); do
- do_facet mds${num} $TUNE2FS -O ^large_xattr \
- $(mdsdevname $num) ||
- error "tune2fs on mds $num failed"
- done
- fi
+ stopall || error "stopping systems failed"
}
run_test 61 "large xattr"
local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1)
local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1)
+ # add EXCLUDE records to config log, they are not to be
+ # removed by lctl replace_nids
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n osc.$FSNAME-OST0000-osc-MDT0000.active" \
+ "$FSNAME-OST0000.osc.active" \
+ "0"
+
echo "replace_nids should fail if MDS, OSTs and clients are UP"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
error "replace_nids fail"
echo "wrong nids list should not destroy the system"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" &&
error "wrong parse"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "asdfasdf, asdfadf" &&
+ error "wrong parse"
echo "replace OST nid"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID ||
do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" &&
error "wrong parse"
+ local FAKE_NIDS="192.168.0.112@tcp1,192.168.0.112@tcp2"
+ local FAKE_FAILOVER="192.168.0.113@tcp1,192.168.0.113@tcp2"
+ local NIDS_AND_FAILOVER="$MDS_NID,$FAKE_NIDS:$FAKE_FAILOVER"
+ echo "set NIDs with failover"
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $NIDS_AND_FAILOVER ||
+ error "replace nids failed"
+
+
echo "replace MDS nid"
do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID ||
error "replace nids failed"
stop_mds || error "Unable to stop MDS"
fi
- setup_noconfig
+ start_mgsmds || error "start mgsmds failed"
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n osc.$FSNAME-OST0000-osc-MDT0000.active" \
+ "$FSNAME-OST0000.osc.active" \
+ "1"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "mount client failed"
+
check_mount || error "error after nid replace"
cleanup || error "cleanup failed"
reformat
lctl --net tcp4 add_route 10.3.3.4@tcp 1 3
VERIFY_LNET_CONFIG
- lustre_routes_conversion $legacy $new > /dev/null
+ $LUSTRE_ROUTES_CONVERSION $legacy $new > /dev/null
if [ -f $new ]; then
# verify the conversion output
cmp -s $new $verify > /dev/null
local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM |
head -1)
echo "max_dirty_mb: $MAX_DIRTY_MB"
- local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB))
+ local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB - 10))
echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB"
do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB
wait_update $HOSTNAME "$LCTL get_param -n $MDMB_PARAM |
error "client_cache_count is not saved after remount"
stopall
}
-run_test 76a "set permanent params set_param -P"
+run_test 76a "set permanent params with lctl across mounts"
test_76b() { # LU-4783
[[ $(lustre_version_code mgs) -ge $(version_code 2.5.57) ]] ||
stopall
}
-run_test 76c "verify changelog_mask is applied with set_param -P"
+run_test 76c "verify changelog_mask is applied with lctl set_param -P"
test_76d() { #LU-9399
setupall
stopall
}
-run_test 76d "verify llite.*.xattr_cache can be set by 'set_param -P' correctly"
+run_test 76d "verify llite.*.xattr_cache can be set by 'lctl set_param -P' correctly"
test_77() { # LU-3445
local server_version=$(lustre_version_code $SINGLEMDS)
echo -e "\n$cmd"
eval $cmd && error "index $start_ost_idx should be in $ost_indices"
- # 5. Specifying OST indices for directory should fail with ENOSUPP.
+ # 5. Specifying OST indices for directory should succeed.
local dir=$DIR/$tdir/$tdir
mkdir $dir || error "mkdir $dir failed"
cmd="$SETSTRIPE -o $ost_indices $dir"
- echo -e "\n$cmd"
- eval $cmd && error "$cmd should fail, specifying OST indices" \
- "for directory is not supported"
+ if [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.11.53) &&
+ $(lustre_version_code client -gt $(version_code 2.11.53)) ]]; then
+ echo -e "\n$cmd"
+ eval $cmd || error "unable to specify OST indices on directory"
+ else
+ echo "need MDS+client version at least 2.11.53"
+ fi
restore_ostindex
}
-run_test 82a "specify OSTs for file (succeed) or directory (fail)"
+run_test 82a "specify OSTs for file (succeed) or directory (succeed)"
cleanup_82b() {
trap 0
run_test 86 "Replacing mkfs.lustre -G option"
test_87() { #LU-6544
- [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] ||
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.51) ]] ||
{ skip "Need MDS version at least 2.9.51" && return; }
[[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
{ skip "ldiskfs only test" && return; }
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
error "Create file with 3 components failed"
$TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed"
i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed"
run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
test_101() {
- local createmany_oid
+ local createmany_pid
local dev=$FSNAME-OST0000-osc-MDT0000
setup
- createmany -o $DIR1/$tfile-%d 50000 &
- createmany_oid=$!
+ mkdir $DIR1/$tdir
+ createmany -o $DIR1/$tdir/$tfile-%d 50000 &
+ createmany_pid=$!
# MDT->OST reconnection causes MDT<->OST last_id synchornisation
# via osp_precreate_cleanup_orphans.
for ((i = 0; i < 100; i++)); do
done
ls -asl $MOUNT | grep '???' &&
- (kill -9 $createmany_oid &>/dev/null; \
- error "File hasn't object on OST")
+ { kill -9 $createmany_pid &>/dev/null;
+ error "File has no object on OST"; }
- kill -s 0 $createmany_oid || break
+ kill -s 0 $createmany_pid || break
done
- wait $createmany_oid
+ wait $createmany_pid
+
+ unlinkmany $DIR1/$tdir/$tfile-%d 50000
cleanup
}
run_test 101 "Race MDT->OST reconnection with create"
test_102() {
+ [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.9.53) ]] ||
+ skip "Need server version greater than 2.9.53"
cleanup || error "cleanup failed with $?"
local mds1dev=$(mdsdevname 1)
#shows that osp code is buggy
do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0
- cleanupall
+ stopall
}
run_test 106 "check osp llog processing when catalog is wrapped"
test_107() {
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] ||
{ skip "Need MDS version > 2.10.50"; return; }
+ local cmd
start_mgsmds || error "start_mgsmds failed"
start_ost || error "unable to start OST"
# add unknown configuration parameter.
- local PARAM="$FSNAME-OST0000.ost.unknown_param=50"
- do_facet mgs "$LCTL conf_param $PARAM"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ cmd="$PERM_CMD ost.$FSNAME-OST0000*.unknown_param"
+ else
+ cmd="$PERM_CMD $FSNAME-OST0000*.ost.unknown_param"
+ fi
+ do_facet mgs "$cmd=50"
cleanup_nocli || error "cleanup_nocli failed with $?"
load_modules
}
run_test 107 "Unknown config param should not fail target mounting"
+t_108_prep() {
+ local facet
+
+ $rcmd rm -rf $tmp > /dev/null 2>&1
+ $rcmd mkdir -p $tmp/{mnt,images} || error "failed to mkdir remotely"
+
+ for facet in $facets; do
+ [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+ $rcmd $ZPOOL -f export lustre-$facet > /dev/null 2>&1
+ $rcmd mkdir $tmp/mnt/$facet ||
+ error "failed to mkdir $tmp/mnt/$facet"
+ $rcmd dd if=/dev/zero of=$tmp/images/$facet \
+ seek=199 bs=1M count=1 ||
+ error "failed to create $tmp/images/$facet"
+ done
+}
+
+t_108_mkfs() {
+ local role=$1
+ local idx=$2
+ local bkfs=$3
+ local mgs=$4
+ local facet=${role}$((idx + 1))
+ local pool=""
+ [ $# -eq 5 ] && pool=$5
+
+ do_facet $SINGLEMDS $MKFS --fsname=lustre --$mgs \
+ --$role --index=$idx --replace --backfstype=$bkfs \
+ --device-size=200000 --reformat $pool $tmp/images/$facet ||
+ error "failed to mkfs for $facet"
+}
+
+t_108_check() {
+ echo "mounting client..."
+ mount -t lustre ${nid}:/lustre $MOUNT ||
+ error "failed to mount lustre"
+
+ echo "check list"
+ ls -l $MOUNT/local_dir || error "failed to list"
+
+ echo "check truncate && write"
+ echo "dummmmmmmmmmmmm" > $MOUNT/remote_dir/fsx.c ||
+ error "failed to tuncate & write"
+
+ echo "check create"
+ touch $MOUNT/foooo ||
+ error "failed to create"
+
+ echo "check read && write && append"
+ sha1sum $MOUNT/conf-sanity.sh |
+ awk '{ print $1 }' > $MOUNT/checksum.new ||
+ error "failed to read(1)"
+ sha1sum $MOUNT/remote_dir/unlinkmany.c |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(2)"
+ sha1sum $MOUNT/striped_dir/lockahead_test.o |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(3)"
+
+ echo "verify data"
+ diff $MOUNT/checksum.new $MOUNT/checksum.src ||
+ error "failed to verify data"
+
+ echo "done."
+}
+
+t_108_cleanup() {
+ trap 0
+ local facet
+
+ echo "cleanup..."
+ umount -f $MOUNT || error "failed to umount client"
+ for facet in $facets; do
+ $rcmd umount -f $tmp/mnt/$facet ||
+ error "failed to umount $facet"
+ if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+ $rcmd $ZPOOL export -f lustre-$facet ||
+ error "failed to export lustre-$facet"
+ fi
+ done
+
+ $rcmd rm -rf $tmp || error "failed to rm the dir $tmp"
+}
+
+test_108a() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing" && return
+
+ [ $(facet_fstype $SINGLEMDS) != "zfs" ] &&
+ skip "zfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 zfs mgs lustre-mdt1/mdt1
+ t_108_mkfs mdt 1 zfs mgsnode=$nid lustre-mdt2/mdt2
+ t_108_mkfs ost 0 zfs mgsnode=$nid lustre-ost1/ost1
+ t_108_mkfs ost 1 zfs mgsnode=$nid lustre-ost2/ost2
+
+ for facet in $facets; do
+ $rcmd zfs set mountpoint=$tmp/mnt/$facet canmount=on \
+ lustre-$facet/$facet ||
+ error "failed to zfs set for $facet (1)"
+ $rcmd zfs mount lustre-$facet/$facet ||
+ error "failed to local mount $facet"
+ $rcmd tar jxf $LUSTRE/tests/ldiskfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="trusted.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd zfs umount lustre-$facet/$facet ||
+ error "failed to local umount $facet"
+ $rcmd zfs set canmount=off lustre-$facet/$facet ||
+ error "failed to zfs set $facet (2)"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids lustre-MDT0000 $nid
+ $rcmd lctl replace_nids lustre-MDT0001 $nid
+ $rcmd lctl replace_nids lustre-OST0000 $nid
+ $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o abort_recov lustre-$facet/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ # ZFS backend can detect migration and trigger OI scrub automatically
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108a "migrate from ldiskfs to ZFS"
+
+test_108b() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing" && return
+
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local scrub_list="MDT0000 MDT0001 OST0000 OST0001"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 ldiskfs mgs
+ t_108_mkfs mdt 1 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 0 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 1 ldiskfs mgsnode=$nid
+
+ for facet in $facets; do
+ $rcmd mount -t ldiskfs -o loop $tmp/images/$facet \
+ $tmp/mnt/$facet ||
+ error "failed to local mount $facet"
+
+ $rcmd tar jxf $LUSTRE/tests/zfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="*.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd umount $tmp/mnt/$facet ||
+ error "failed to local umount $facet"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids lustre-MDT0000 $nid
+ $rcmd lctl replace_nids lustre-MDT0001 $nid
+ $rcmd lctl replace_nids lustre-OST0000 $nid
+ $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o loop,abort_recov $tmp/images/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ for facet in $scrub_list; do
+ $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub ||
+ error "failed to start OI scrub on $facet"
+ done
+
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108b "migrate from ZFS to ldiskfs"
+
+
+#
+# set number of permanent parameters
+#
+test_109_set_params() {
+ local fsname=$1
+
+ set_persistent_param_and_check mds \
+ "mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "62"
+ set_persistent_param_and_check mds \
+ "mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "63"
+ set_persistent_param_and_check client \
+ "llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "32"
+ set_persistent_param_and_check client \
+ "llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "64"
+ create_pool $fsname.pool1 || error "create pool failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+ do_facet mgs $LCTL pool_remove $fsname.pool1 OST0000 ||
+ error "pool_remove failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+}
+
+#
+# check permanent parameters
+#
+test_109_test_params() {
+ local fsname=$1
+
+ local atime_diff=$(do_facet mds $LCTL \
+ get_param -n mdd.$fsname-MDT0000.atime_diff)
+ [ $atime_diff == 63 ] || error "wrong mdd parameter after clear_conf"
+ local max_read_ahead_mb=$(do_facet client $LCTL \
+ get_param -n llite.$fsname*.max_read_ahead_mb)
+ [ $max_read_ahead_mb == 64 ] ||
+ error "wrong llite parameter after clear_conf"
+ local ost_in_pool=$(do_facet mds $LCTL pool_list $fsname.pool1 |
+ grep -v "^Pool:" | sed 's/_UUID//')
+ [ $ost_in_pool = "$fsname-OST0000" ] ||
+ error "wrong pool after clear_conf"
+}
+
+#
+# run lctl clear_conf, store CONFIGS before and after that
+#
+test_109_clear_conf()
+{
+ local clear_conf_arg=$1
+
+ local mgsdev
+ if ! combined_mgs_mds ; then
+ mgsdev=$MGSDEV
+ stop_mgs || error "stop_mgs failed"
+ start_mgs "-o nosvc" || error "start_mgs nosvc failed"
+ else
+ mgsdev=$(mdsdevname 1)
+ start_mdt 1 "-o nosvc" || error "start_mdt 1 nosvc failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf1; mkdir -p $TMP/${tdir}/conf1;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf1\\\" \
+ $mgsdev"
+
+ #
+ # the command being tested
+ #
+ do_facet mgs $LCTL clear_conf $clear_conf_arg ||
+ error "clear_conf failed"
+ if ! combined_mgs_mds ; then
+ stop_mgs || error "stop_mgs failed"
+ else
+ stop_mdt 1 || error "stop_mdt 1 failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf2; mkdir -p $TMP/${tdir}/conf2;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf2\\\" \
+ $mgsdev"
+}
+
+test_109_file_shortened() {
+ local file=$1
+ local sizes=($(do_facet mgs "stat -c %s " \
+ "$TMP/${tdir}/conf1/CONFIGS/$file" \
+ "$TMP/${tdir}/conf2/CONFIGS/$file"))
+ [ ${sizes[1]} -lt ${sizes[0]} ] && return 0
+ return 1
+}
+
+test_109a()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME
+ #
+ # make sure that all configs are cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client ||
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109a "test lctl clear_conf fsname"
+
+test_109b()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME-MDT0000
+ #
+ # make sure that only one config is cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client &&
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109b "test lctl clear_conf one config"
+
+cleanup_115()
+{
+ trap 0
+ stopall
+ rm -f $TMP/$tdir/lustre-mdt
+ formatall
+}
+
+test_115() {
+ IMAGESIZE=$((3072 << 30)) # 3072 GiB
+
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ stopall
+ # We need MDT size 3072GB, because it is smallest
+ # partition that can store 2B inodes
+ do_facet $SINGLEMDS "mkdir -p $TMP/$tdir"
+ local mdsimgname=$TMP/$tdir/lustre-mdt
+ do_facet $SINGLEMDS "rm -f $mdsimgname"
+ do_facet $SINGLEMDS "touch $mdsimgname"
+ trap cleanup_115 RETURN EXIT
+ do_facet $SINGLEMDS "$TRUNCATE $mdsimgname $IMAGESIZE" ||
+ { skip "Backend FS doesn't support sparse files"; return 0; }
+ local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
+ do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
+
+ local mds_opts="$(mkfs_opts mds1 ${mdsdev}) --device-size=$IMAGESIZE \
+ --mkfsoptions='-O lazy_itable_init,ea_inode,^resize_inode,meta_bg \
+ -i 1024'"
+ add mds1 $mds_opts --mgs --reformat $mdsdev ||
+ { skip_env "format large MDT failed"; return 0; }
+ add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
+ --reformat $(ostdevname 1) $(ostvdevname 1)
+
+ start $SINGLEMDS ${mdsdev} $MDS_MOUNT_OPTS || error "start MDS failed"
+ start_ost || error "start OSS failed"
+ mount_client $MOUNT || error "mount client failed"
+
+ mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir fail"
+ for goal in $(do_facet $SINGLEMDS "ls /sys/fs/ldiskfs/*/inode_goal"); do
+ do_facet $SINGLEMDS "echo 2147483947 >> $goal; grep . $goal"
+ done
+
+ touch $DIR/$tdir/$tfile
+
+ # Add > 5k bytes to xattr
+ for i in {1..30}; do
+ ln $DIR/$tdir/$tfile $DIR/$tdir/$(printf "link%0250d" $i) ||
+ error "Can't make link"
+ done
+
+ sync; sleep 5; sync
+
+ local inode_num=$(do_facet $SINGLEMDS \
+ "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsimgname" |
+ awk '/link =/ { print $4 }' |
+ sed -e 's/>//' -e 's/<//' -e 's/\"//')
+ echo "inode num: $inode_num"
+ [ $inode_num -ge 2147483947 ] || error "inode $inode_num too small"
+ do_facet $SINGLEMDS "losetup -d $mdsdev"
+ cleanup_115
+}
+run_test 115 "Access large xattr with inodes number over 2TB"
+
+test_116() {
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.59) ] &&
+ skip "Need server version at least 2.10.59" && return
+
+ do_facet $SINGLEMDS which mkfs.xfs || {
+ skip_env "No mkfs.xfs installed"
+ return
+ }
+
+ stopall
+ load_modules
+
+ local tmpmnt=$TMP/$tdir
+ local mdtimg=$tfile-mdt0
+
+ do_facet $SINGLEMDS mkdir -p $tmpmnt
+ stack_trap "do_facet $SINGLEMDS rmdir $tmpmnt" EXIT
+
+ do_facet $SINGLEMDS touch $TMP/$mdtimg
+ stack_trap "do_facet $SINGLEMDS rm -f $TMP/$mdtimg" EXIT
+ do_facet $SINGLEMDS mkfs -t xfs -d file,size=1t,name=$TMP/$mdtimg ||
+ error "mkfs temporary xfs image"
+
+ do_facet $SINGLEMDS mount $TMP/$mdtimg $tmpmnt ||
+ error "mount temporary xfs image"
+ stack_trap "do_facet $SINGLEMDS umount $tmpmnt" EXIT
+ local old_mdssize=$MDSSIZE
+ local old_mdsisize=$MDSISIZE
+
+ MDSSIZE=$((17 * 1024 * 1024 * 1024)) # 17T MDT
+ MDSISIZE=$((16 << 20))
+ local opts17t="$(mkfs_opts $SINGLEMDS)"
+
+ MDSSIZE=$old_mdssize
+ MDSISIZE=$old_mdsisize
+ do_facet $SINGLEMDS $MKFS $opts17t $tmpmnt/$mdtimg ||
+ error "failed to mkfs for $tmpmnt/$mdtimg"
+
+ do_facet $SINGLEMDS $TUNE2FS -l $tmpmnt/$mdtimg |
+ grep -qw 'features.*extent' || error "extent should be enabled"
+}
+run_test 116 "big size MDT support"
+
+test_117() {
+ setup
+ do_facet ost1 "$LCTL set_param ost.OSS.ost_io.nrs_policies=fifo"
+ do_facet ost1 "$LCTL get_param -n ost.OSS.ost_io.nrs_tbf_rule" &&
+ error "get_param should fail"
+ cleanup || error "cleanup failed with rc $?"
+}
+run_test 117 "lctl get_param return errors properly"
+
+test_120() { # LU-11130
+ [ "$MDSCOUNT" -lt 2 ] && skip "mdt count < 2"
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test"
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] &&
+ skip "Need DNE2 capable MD target with LU-11130 fix"
+
+ setup
+
+ local mds1host=$(facet_active_host mds1)
+ local mds1dev=$(mdsdevname 1)
+
+ $LFS mkdir -i 1 $DIR/$tdir
+ $LFS mkdir -i 0 $DIR/$tdir/mds1dir
+
+ ln -s foo $DIR/$tdir/bar
+ mv $DIR/$tdir/bar $DIR/$tdir/mds1dir/bar2 ||
+ error "cross-target rename failed"
+
+ stopall
+
+ run_e2fsck $mds1host $mds1dev "-n"
+}
+run_test 120 "cross-target rename should not create bad symlinks"
+
+test_122() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] ||
+ { skip "Need OST version at least 2.11.53" && return 0; }
+
+
+ reformat
+ LOAD_MODULES_REMOTE=true load_modules
+#define OBD_FAIL_OFD_SET_OID 0x1e0
+ do_facet ost1 $LCTL set_param fail_loc=0x00001e0
+
+ setupall
+ $LFS mkdir -i1 -c1 $DIR/$tdir
+ $LFS setstripe -i0 -c1 $DIR/$tdir
+ do_facet ost1 $LCTL set_param fail_loc=0
+ createmany -o $DIR/$tdir/file_ 1000 ||
+ error "Fail to create a new sequence"
+
+ reformat
+}
+run_test 122 "Check OST sequence update"
+
+test_123() {
+ setupall
+ local yaml_file="$TMP/$tfile.yaml"
+ do_facet mgs rm "$yaml_file"
+ local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist |"\
+ " sed 's/config_log://'")
+
+ # set jobid_var to a different value for test
+ local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P jobid_var="testname"
+
+ for i in $cfgfiles params; do
+ do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
+ done
+
+ echo "Unmounting FS"
+ stopall
+ echo "Writeconf"
+ writeconf_all
+ echo "Remounting"
+ mountmgs
+ mountmds
+ mountoss
+ mountcli
+
+ # Reapply the config from before
+ echo "Setting configuration parameters"
+ do_facet mgs "lctl set_param -F $yaml_file"
+
+ local set_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P $orig_val
+
+ [ $set_val == "jobid_var=testname" ] ||
+ error "$set_val is not testname"
+
+ do_facet mgs rm "$yaml_file"
+}
+run_test 123 "clear and reset all parameters using set_param -F"
+
+test_124()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [ -z $mds2failover_HOST ] && skip "needs MDT failover setup" && return
+
+ setup
+ cleanup
+
+ load_modules
+ if combined_mgs_mds; then
+ start_mdt 1 "-o nosvc" ||
+ error "starting mds with nosvc option failed"
+ fi
+ local nid=$(do_facet mds2 $LCTL list_nids | head -1)
+ local failover_nid=$(do_node $mds2failover_HOST $LCTL list_nids | head -1)
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0001 $nid:$failover_nid ||
+ error "replace_nids execution error"
+
+ if combined_mgs_mds; then
+ stop_mdt 1
+ fi
+
+ setup
+ fail mds2
+ echo "lfs setdirstripe"
+ $LFS setdirstripe -i 1 $MOUNT/$tdir || error "setdirstirpe error"
+ echo ok
+}
+run_test 124 "check failover after replace_nids"
+
if ! combined_mgs_mds ; then
stop mgs
fi
reformat
complete $SECONDS
+check_and_cleanup_lustre
exit_status