# bug number for skipped test:
# a tool to create lustre filesystem images
ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
+if $SHARED_KEY; then
+# bug number for skipped tests: LU-9795 (all below)
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 0 31 32a 32d 35a"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 53a 53b 54b 76a 76b"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 76c 76d 78 103"
+fi
SRCDIR=$(dirname $0)
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+get_lustre_env
# use small MDS + OST size to speed formatting time
# do not use too small MDSSIZE/OSTSIZE, which affect the default journal size
ALWAYS_EXCEPT="$ALWAYS_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+[ $MDSCOUNT -ge 2 ] &&
+# bug number for skipped test: LU-11915
+ ALWAYS_EXCEPT+=" 110"
+# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+
init_logging
#
require_dsh_mds || exit 0
require_dsh_ost || exit 0
-# 8 22 (min)"
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69"
+# 8 22 40 165 (min)
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69 106 111"
assert_DIR
echo "setup double mount lustre success"
}
+generate_name() {
+ cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $1 | head -n 1
+}
+
build_test_filter
if [ "$ONLY" == "setup" ]; then
local PARAM="$FSNAME-OST0001.osc.active"
# With lctl set_param -P the value $PROC_ACT will be sent to
- # all nodes. The [^M] filter out the ability to set active
+ # all nodes. The [!M] filter out the ability to set active
# on the MDS servers which is tested with wait_osp_* below.
# For ost_server_uuid that only exist on client so filtering
# is safe.
# test new client starts deactivated
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- RESULT=$($LCTL get_param -n $PROC_UUID | grep DEACTIV | grep NEW)
- if [ -z "$RESULT" ]; then
- error "New client start active: $(lctl get_param -n $PROC_UUID)"
- else
- echo "New client success: got $RESULT"
- fi
+
+ # the 2nd and 3rd field of ost_server_uuid do not update at the same
+ # time when using lctl set_param -P
+ wait_update_facet client \
+ "$LCTL get_param -n $PROC_UUID | awk '{print \\\$3 }'" \
+ "DEACTIVATED" ||
+ error "New client start active: $($LCTL get_param -n $PROC_UUID)"
+
+ echo "New client success: got '$($LCTL get_param -n $PROC_UUID)'"
# make sure it reactivates
set_persistent_param_and_check client $PROC_ACT $PARAM $ACTV
pass
echo Erase parameter setting
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs "$PERM_CMD -d $TEST" ||
error "Erase param $TEST failed"
else
local TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
PARAM="osc.$FSNAME-OST0000-osc-[^M]*.import"
echo "Setting $PARAM from $TEST to $NEW"
do_facet mgs "$PERM_CMD $PARAM='connection=$NEW'" ||
[ $NIDCOUNT -eq $((orignidcount + 1)) ] ||
error "Failover nid not added"
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs "$PERM_CMD -d osc.$FSNAME-OST0000-osc-*.import"
else
do_facet mgs "$PERM_CMD -d $FSNAME-OST0000.failover.node" ||
}
t32_verify_quota() {
- local node=$1
+ local facet=$1
local fsname=$2
local mnt=$3
local fstype=$(facet_fstype $SINGLEMDS)
# verification in 32b. The object quota usage should be accurate after
# zfs-0.7.0 is released.
[ $fstype == "zfs" ] && {
- local zfs_version=$(do_node $node cat /sys/module/zfs/version)
+ local zfs_version=$(do_facet $facet cat /sys/module/zfs/version)
[ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && {
echo "Skip quota verify for zfs: $zfs_version"
return 1
}
- set_persistent_param_and_check $node \
+ set_persistent_param_and_check $facet \
"osd-$fstype.$fsname-MDT0000.quota_slave.enabled" \
- $fsname.quota.mdt" ug
+ "$fsname.quota.mdt" ug
- set_persistent_param_and_check $node \
+ set_persistent_param_and_check $facet \
"osd-$fstype.$fsname-OST0000.quota_slave.enabled" \
- $fsname.quota.ost" ug
+ "$fsname.quota.ost" ug
chmod 0777 $mnt
runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
return 1
fi
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
$r $PERM_CMD osc.$fsname-OST0000*.import=connection=$nid || {
error_noexit "Setting OST \"failover.node\""
return 1
fi
if [ "$dne_upgrade" != "no" ]; then
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
$r $PERM_CMD mdc.$fsname-MDT0001*.import=connection=$nid || {
error_noexit "Setting MDT1 \"failover.node\""
return 1
shall_cleanup_lustre=true
$r $LCTL set_param debug="$PTLDEBUG"
+ # Leave re-enabling this to a separate patch for LU-11558
+ # t32_verify_quota $SINGLEMDS $fsname $tmp/mnt/lustre || {
+ # error_noexit "verify quota failed"
+ # return 1
+ #}
+
if $r test -f $tmp/list; then
#
# There is not a Test Framework API to copy files to or
}
nrpcs=$((nrpcs_orig + 5))
- set_persistent_param_and_check $HOSTNAME \
+ set_persistent_param_and_check client \
"mdc.$fsname-MDT0000*.max_rpcs_in_flight" \
"$fsname-MDT0000.mdc.max_rpcs_in_flight" $nrpcs || {
error_noexit "Changing \"max_rpcs_in_flight\""
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs "$PERM_CMD timeout=200" ||
error "$PERM_CMD timeout=200 failed"
else
echo "ok."
cp /etc/hosts $MOUNT2/ || error "copy /etc/hosts $MOUNT2/ failed"
- $GETSTRIPE $MOUNT2/hosts || error "$GETSTRIPE $MOUNT2/hosts failed"
+ $LFS getstripe $MOUNT2/hosts ||
+ error "$LFS getstripe $MOUNT2/hosts failed"
umount $MOUNT2
stop fs2ost -f
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs "$PERM_CMD \
mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" ||
error "Setting mdc.*${device}*.import=connection=\
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs "$PERM_CMD \
mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" ||
error "Set mdc.*${device}*.import=connection=\
setup
check_mount || error "client was not mounted"
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
PARAM="llite.$FSNAME-*.some_wrong_param"
else
PARAM="$FSNAME.llite.some_wrong_param"
#second client see all ost's
mount_client $MOUNT2 || error "mount_client failed"
- $SETSTRIPE -c -1 $MOUNT2 || error "$SETSTRIPE -c -1 $MOUNT2 failed"
- $GETSTRIPE $MOUNT2 || error "$GETSTRIPE $MOUNT2 failed"
+ $LFS setstripe -c -1 $MOUNT2 ||
+ error "$LFS setstripe -c -1 $MOUNT2 failed"
+ $LFS getstripe $MOUNT2 || error "$LFS getstripe $MOUNT2 failed"
echo "ok" > $MOUNT2/widestripe
- $GETSTRIPE $MOUNT2/widestripe ||
- error "$GETSTRIPE $MOUNT2/widestripe failed"
+ $LFS getstripe $MOUNT2/widestripe ||
+ error "$LFS getstripe $MOUNT2/widestripe failed"
# fill acl buffer for avoid expand lsm to them
awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd |
while read acl; do
setup_noconfig
check_mount || error "check_mount failed"
- $SETSTRIPE -c -1 $MOUNT || error "$SETSTRIPE -c -1 $MOUNT failed"
- $GETSTRIPE $MOUNT || error "$GETSTRIPE $MOUNT failed"
+ $LFS setstripe -c -1 $MOUNT ||
+ error "$LFS setstripe -c -1 $MOUNT failed"
+ $LFS getstripe $MOUNT || error "$LFS getstripe $MOUNT failed"
echo "ok" > $MOUNT/widestripe
- $GETSTRIPE $MOUNT/widestripe ||
- error "$GETSTRIPE $MOUNT/widestripe failed"
+ $LFS getstripe $MOUNT/widestripe ||
+ error "$LFS getstripe $MOUNT/widestripe failed"
# In the future, we may introduce more EAs, such as selinux, enlarged
# LOV EA, and so on. These EA will use some EA space that is shared by
run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
lazystatfs() {
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
# Test both statfs and lfs df and fail if either one fails
multiop_bg_pause $1 f_
- RC1=$?
+ RC=$?
PID=$!
killall -USR1 multiop
- [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
- wait $PID || { RC1=$?; log "multiop return error "; }
+ [ $RC -ne 0 ] && log "lazystatfs multiop failed"
+ wait $PID || { RC=$?; log "multiop return error "; }
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
$LFS df -l &
PID=$!
sleep 5
- kill -s 0 $PID
- RC2=$?
- if [ $RC2 -eq 0 ]; then
- kill -s 9 $PID
- log "lazystatfs df failed"
+ if kill -s 0 $PID; then
+ RC=1
+ kill -s 9 $PID
+ log "lazystatfs lfs df failed to complete in 5s"
fi
- RC=0
- [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
return $RC
}
# Wait for client to detect down OST
stop_ost || error "Unable to stop OST1"
- wait_osc_import_state mds ost DISCONN
+ wait_osc_import_state client ost DISCONN
+ $LCTL dl
+ log "OSCs should all be DISCONN"
lazystatfs $MOUNT || error "lazystatfs should not return EIO"
wait_osc_import_state mds ost2 FULL
wait_osc_import_ready client ost2
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
local PARAM="osc.${FSNAME}-OST0001*.active"
else
local PARAM="${FSNAME}-OST0001.osc.active"
fi
- $SETSTRIPE -c -1 $DIR/$tfile || error "$SETSTRIPE failed"
+ $LFS setstripe -c -1 $DIR/$tfile || error "$LFS setstripe failed"
do_facet mgs $PERM_CMD $PARAM=0 || error "Unable to deactivate OST"
umount_client $MOUNT || error "Unable to unmount client"
"${FSNAME}-OST0000.osc.active" 1
mkdir $DIR/$tdir/2 || error "mkdir $DIR/$tdir/2 failed"
- $SETSTRIPE -c -1 -i 0 $DIR/$tdir/2 ||
- error "$SETSTRIPE $DIR/$tdir/2 failed"
+ $LFS setstripe -c -1 -i 0 $DIR/$tdir/2 ||
+ error "$LFS setstripe $DIR/$tdir/2 failed"
sleep 1 && echo "create a file after OST1 is activated"
- # create some file
- createmany -o $DIR/$tdir/2/$tfile-%d 1
+ # doing some io, shouldn't crash
+ dd if=/dev/zero of=$DIR/$tdir/2/$tfile-io bs=1M count=10
# check OSC import is working
stat $DIR/$tdir/2/* >/dev/null 2>&1 ||
error "some OSC imports are still not connected"
# cleanup
+ rm -rf $DIR/$tdir
umount_client $MOUNT || error "Unable to umount client"
stop_ost2 || error "Unable to stop OST2"
cleanup_nocli || error "cleanup_nocli failed with $?"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
$PERM_CMD mdc.${FSNAME}-MDT0001-mdc-*.active=0 &&
error "deactive MDC0 succeeds"
else
check_mount || error "check_mount failed"
mkdir $MOUNT/$tdir || error "mkdir $MOUNT/$tdir failed"
- $SETSTRIPE -c -1 $MOUNT/$tdir ||
- error "$SETSTRIPE -c -1 $MOUNT/$tdir failed"
+ $LFS setstripe -c -1 $MOUNT/$tdir ||
+ error "$LFS setstripe -c -1 $MOUNT/$tdir failed"
#define OBD_FAIL_MDS_REINT_DELAY 0x142
do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x142"
touch $MOUNT/$tdir/$tfile &
error "Unable to create temporary file"
sleep 1
- $SETSTRIPE -c -1 -S 1M $DIR/$tdir || error "$SETSTRIPE failed"
+ $LFS setstripe -c -1 -S 1M $DIR/$tdir || error "$LFS setstripe failed"
for (( i=0; i < nrfiles; i++ )); do
multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c ||
setmodopts $modname "$oldvalue"
# Check that $opts took
- tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min")
- tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max")
+ tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" ||
+ echo 0)
tstarted=$(do_facet $facet \
- "$LCTL get_param -n ${paramp}.threads_started")
+ "$LCTL get_param -n ${paramp}.threads_started" || echo 0)
lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' ||
return $?
cleanup
$server_version -lt $(version_code 2.5.11) ]]; then
wait_osc_import_state mds ost1 FULL
wait_osc_import_state mds ost2 FULL
- $SETSTRIPE --stripe-count=-1 $DIR/$tfile ||
+ $LFS setstripe --stripe-count=-1 $DIR/$tfile ||
error "Unable to setstripe $DIR/$tfile"
n=$($LFS getstripe --stripe-count $DIR/$tfile)
[ "$n" -eq 2 ] || error "Stripe count not two: $n"
echo "wrong nids list should not destroy the system"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" &&
error "wrong parse"
+ do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "asdfasdf, asdfadf" &&
+ error "wrong parse"
echo "replace OST nid"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID ||
do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" &&
error "wrong parse"
+ local FAKE_NIDS="192.168.0.112@tcp1,192.168.0.112@tcp2"
+ local FAKE_FAILOVER="192.168.0.113@tcp1,192.168.0.113@tcp2"
+ local NIDS_AND_FAILOVER="$MDS_NID,$FAKE_NIDS:$FAKE_FAILOVER"
+ echo "set NIDs with failover"
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $NIDS_AND_FAILOVER ||
+ error "replace nids failed"
+
+
echo "replace MDS nid"
do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID ||
error "replace nids failed"
local ifree=$($LFS df -i $MOUNT | awk '/OST0000/ { print $4 }')
log "On OST0, $ifree inodes available. Want $num_create."
- $SETSTRIPE -i 0 $DIR/$tdir ||
- error "$SETSTRIPE -i 0 $DIR/$tdir failed"
+ $LFS setstripe -i 0 $DIR/$tdir ||
+ error "$LFS setstripe -i 0 $DIR/$tdir failed"
if [ $ifree -lt 10000 ]; then
files=$(( ifree - 50 ))
else
mount_client $MOUNT || error "mount client failed"
touch $DIR/$tdir/$tfile-last || error "create file after reformat"
- local idx=$($GETSTRIPE -i $DIR/$tdir/$tfile-last)
+ local idx=$($LFS getstripe -i $DIR/$tdir/$tfile-last)
[ $idx -ne 0 ] && error "$DIR/$tdir/$tfile-last on $idx not 0" || true
local iused=$($LFS df -i $MOUNT | awk '/OST0000/ { print $3 }')
done
ost_indices=$(comma_list $ost_indices)
- trap "restore_ostindex" EXIT
+ stack_trap "restore_ostindex" EXIT
echo -e "\nFormat $OSTCOUNT OSTs with sparse indices $ost_indices"
OST_INDEX_LIST=[$ost_indices] formatall
error "start ost$i failed"
done
+ # Collect debug information - start of test
+ do_nodes $(comma_list $(mdts_nodes)) \
+ $LCTL get_param osc.*.prealloc_*_id
+
mount_client $MOUNT || error "mount client $MOUNT failed"
wait_osts_up
$LFS df $MOUNT || error "$LFS df $MOUNT failed"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ stack_trap "do_nodes $(comma_list $(mdts_nodes)) \
+ $LCTL get_param osc.*.prealloc_*_id" EXIT
+
# 1. If the file does not exist, new file will be created
# with specified OSTs.
local file=$DIR/$tdir/$tfile-1
- local cmd="$SETSTRIPE -o $ost_indices $file"
+ local cmd="$LFS setstripe -o $ost_indices $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
check_stripe_count $file $OSTCOUNT
# will be attached with specified layout.
file=$DIR/$tdir/$tfile-2
mcreate $file || error "mcreate $file failed"
- cmd="$SETSTRIPE -o $ost_indices $file"
+ cmd="$LFS setstripe -o $ost_indices $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
dd if=/dev/urandom of=$file count=1 bs=1M > /dev/null 2>&1 ||
# be in the OST indices list.
local start_ost_idx=${ost_indices##*,}
file=$DIR/$tdir/$tfile-3
- cmd="$SETSTRIPE -o $ost_indices -i $start_ost_idx $file"
+ cmd="$LFS setstripe -o $ost_indices -i $start_ost_idx $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
check_stripe_count $file $OSTCOUNT
check_start_ost_idx $file $start_ost_idx
file=$DIR/$tdir/$tfile-4
- cmd="$SETSTRIPE"
+ cmd="$LFS setstripe"
cmd+=" -o $(exclude_items_from_list $ost_indices $start_ost_idx)"
cmd+=" -i $start_ost_idx $file"
echo -e "\n$cmd"
# 5. Specifying OST indices for directory should succeed.
local dir=$DIR/$tdir/$tdir
mkdir $dir || error "mkdir $dir failed"
- cmd="$SETSTRIPE -o $ost_indices $dir"
+ cmd="$LFS setstripe -o $ost_indices $dir"
if [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.11.53) &&
$(lustre_version_code client -gt $(version_code 2.11.53)) ]]; then
echo -e "\n$cmd"
# If [--pool|-p <pool_name>] is set with [--ost-list|-o <ost_indices>],
# then the OSTs must be the members of the pool.
local file=$DIR/$tdir/$tfile
- cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file"
+ cmd="$LFS setstripe -p $ost_pool -o $ost_idx_in_list $file"
echo -e "\n$cmd"
eval $cmd && error "OST with index $ost_idx_in_list should be" \
"in OST pool $ost_pool"
# Only select OST $ost_idx_in_list from $ost_pool for file.
ost_idx_in_list=${ost_idx_in_pool#*,}
- cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file"
+ cmd="$LFS setstripe -p $ost_pool -o $ost_idx_in_list $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
- cmd="$GETSTRIPE $file"
+ cmd="$LFS getstripe $file"
echo -e "\n$cmd"
eval $cmd || error "$cmd failed"
check_stripe_count $file 2
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ $LFS setstripe -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
error "Create file with 3 components failed"
$TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed"
- i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed"
+ i=$($LFS getstripe -I3 -c $file) || error "get 3rd stripe count failed"
if [ $i -ne $OSTCOUNT ]; then
left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24)
echo -n "Since only $i out $OSTCOUNT OSTs are used, "
start_ost || error "OST0 start fail"
#define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
- do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ do_facet mgs "$LCTL set_param fail_val=10 fail_loc=0x8000090e"
for num in $(seq 2 $MDSCOUNT); do
start_mdt $num &
done
mount_client $MOUNT || error "mount client fails"
wait_osc_import_state mds ost FULL
- wait_osc_import_state client ost FULL
+ wait_osc_import_ready client ost
check_mount || error "check_mount failed"
cleanup || error "cleanup failed with $?"
test_103_set_pool $FSNAME OST0000
- $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 ||
+ $LFS setstripe -p $FSNAME $DIR/$tdir/d0 ||
error "(6) Fail to setstripe on $DIR/$tdir/d0"
if ! combined_mgs_mds ; then
test_103_set_pool $save_fsname OST0001
fi
- $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 ||
+ $LFS setstripe -p $save_fsname $DIR/$tdir/f0 ||
error "(16) Fail to setstripe on $DIR/$tdir/f0"
if ! combined_mgs_mds ; then
umount_mgs_client
start_ost || error "unable to start OST"
# add unknown configuration parameter.
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
cmd="$PERM_CMD ost.$FSNAME-OST0000*.unknown_param"
else
cmd="$PERM_CMD $FSNAME-OST0000*.ost.unknown_param"
$rcmd mount -t ldiskfs -o loop $tmp/images/$facet \
$tmp/mnt/$facet ||
error "failed to local mount $facet"
+
$rcmd tar jxf $LUSTRE/tests/zfs_${facet}_2_11.tar.bz2 \
--xattrs --xattrs-include="*.*" \
-C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
}
run_test 109b "test lctl clear_conf one config"
+test_110()
+{
+ [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ skip "Only applicable to ldiskfs-based MDTs"
+
+ do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir ||
+ skip "large_dir option is not supported on MDS"
+ do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir ||
+ skip "large_dir option is not supported on OSS"
+
+ stopall # stop all targets before modifying the target counts
+ stack_trap "MDSCOUNT=$MDSCOUNT OSTCOUNT=$OSTCOUNT" EXIT
+ MDSCOUNT=1
+ OSTCOUNT=1
+
+ # ext4_dir_entry_2 struct size:264
+ # dx_root struct size:8
+ # dx_node struct size:8
+ # dx_entry struct size:8
+ # For 1024 bytes block size.
+ # First level directory entries: 126
+ # Second level directory entries: 127
+ # Entries in leaf: 3
+ # For 2 levels limit: 48006
+ # For 3 levels limit : 6096762
+ # Create 80000 files to safely exceed 2-level htree limit.
+ CONF_SANITY_110_LINKS=${CONF_SANITY_110_LINKS:-80000}
+
+ # can fit at most 3 filenames per 1KB leaf block, but each
+ # leaf/index block will only be 3/4 full before split at each level
+ (( MDSSIZE < CONF_SANITY_110_LINKS / 3 * 4/3 * 4/3 )) &&
+ CONF_SANITY_110_LINKS=$((MDSSIZE * 3 * 3/4 * 3/4))
+
+ local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
+ --reformat $(mdsdevname 1) $(mdsvdevname 1)"
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536\\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536 }"
+ fi
+ echo "MDT params: $opts"
+ add mds1 $opts || error "add mds1 failed with new params"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir\\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir }"
+ fi
+ echo "OST params: $opts"
+ add ost1 $opts || error "add ost1 failed with new params"
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS
+
+ MOUNT_2=yes mountcli || error "mount clients failed"
+
+ mkdir -v $DIR/$tdir || error "cannot create $DIR/$tdir"
+ local pids count=0 group=0
+
+ echo "creating $CONF_SANITY_110_LINKS in total"
+ while (( count < CONF_SANITY_110_LINKS )); do
+ local len=$((253 - $(wc -c <<<"$tfile-$group-40000-")))
+ local dir=DIR$((group % 2 + 1))
+ local target=${!dir}/$tdir/$tfile-$group
+ local long=$target-$(generate_name $len)-
+ local create=$((CONF_SANITY_110_LINKS - count))
+
+ (( create > 40000 )) && create=40000
+ touch $target || error "creating $target failed"
+ echo "creating $create hard links to $target"
+ createmany -l $target $long $create &
+ pids+=" $!"
+
+ count=$((count + create))
+ group=$((group + 1))
+ done
+ echo "waiting for PIDs$pids to complete"
+ wait $pids || error "createmany failed after $group groups"
+
+ cleanup
+
+ run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n
+}
+run_test 110 "Adding large_dir with 3-level htree"
+
+test_111() {
+ [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ skip "Only applicable to ldiskfs-based MDTs"
+
+ is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) &&
+ skip "This test can not be executed on flakey dev"
+
+ do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir ||
+ skip "large_dir option is not supported on MDS"
+
+ do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir ||
+ skip "large_dir option is not supported on OSS"
+
+ # cleanup before changing target counts
+ cleanup
+ stack_trap "MDSSIZE=$MDSSIZE MDSCOUNT=$MDSCOUNT OSTCOUNT=$OSTCOUNT" EXIT
+ MDSCOUNT=1
+ OSTCOUNT=1
+ (( MDSSIZE < 2400000 )) && MDSSIZE=2400000 # need at least 2.4GB
+
+ local mdsdev=$(mdsdevname 1)
+
+ local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
+ --reformat $(mdsdevname 1) $(mdsvdevname 1)"
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -i 1048576 \\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -i 1048576 }"
+ fi
+ echo "MDT params: $opts"
+ __touch_device mds 1
+ add mds1 $opts || error "add mds1 failed with new params"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir \\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ --mkfsoptions=\\\"-O large_dir }"
+ fi
+ echo "OST params: $opts"
+ __touch_device ost 1
+ add ost1 $opts || error "add ost1 failed with new params"
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS
+
+ MOUNT_2=yes mountcli
+ mkdir $DIR/$tdir || error "cannot create $DIR/$tdir"
+ lfs df $DIR/$tdir
+ lfs df -i $DIR/$tdir
+
+ local group=0
+
+ local start=$SECONDS
+ local dirsize=0
+ local dirmax=$((2 << 30))
+ local needskip=0
+ local taken=0
+ local rate=0
+ local left=0
+ local num=0
+ while (( !needskip & dirsize < dirmax )); do
+ local pids=""
+
+ for cli in ${CLIENTS//,/ }; do
+ local len=$((253 - $(wc -c <<<"$cli-$group-60000-")))
+ local target=$cli-$group
+ local long=$DIR/$tdir/$target-$(generate_name $len)-
+
+ RPWD=$DIR/$tdir do_node $cli touch $target ||
+ error "creating $target failed"
+ echo "creating 60000 hardlinks to $target"
+ RPWD=$DIR/$tdir do_node $cli createmany -l $target $long 60000 &
+ pids+=" $!"
+
+ group=$((group + 1))
+ target=$cli-$group
+ long=$DIR2/$tdir/$target-$(generate_name $len)-
+
+ RPWD=$DIR2/$tdir do_node $cli touch $target ||
+ error "creating $target failed"
+ echo "creating 60000 hardlinks to $target"
+ RPWD=$DIR2/$tdir do_node $cli createmany -l $target $long 60000 &
+ pids+=" $!"
+
+ group=$((group + 1))
+ done
+ echo "waiting for PIDs$pids to complete"
+ wait $pids || error "createmany failed after $group groups"
+ dirsize=$(stat -c %s $DIR/$tdir)
+ taken=$((SECONDS - start))
+ rate=$((dirsize / taken))
+ left=$(((dirmax - dirsize) / rate))
+ num=$((group * 60000))
+ echo "estimate ${left}s left after $num files / ${taken}s"
+ # if the estimated time remaining is too large (it may change
+ # over time as the create rate is not constant) then exit
+ # without declaring a failure.
+ (( left > 1200 )) && needskip=1
+ done
+
+ cleanup
+
+ (( $needskip )) && skip "ETA ${left}s after $num files / ${taken}s is too long"
+
+ run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n
+}
+run_test 111 "Adding large_dir with over 2GB directory"
+
+
cleanup_115()
{
trap 0
}
run_test 117 "lctl get_param return errors properly"
+test_120() { # LU-11130
+ [ "$MDSCOUNT" -lt 2 ] && skip "mdt count < 2"
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test"
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] &&
+ skip "Need DNE2 capable MD target with LU-11130 fix"
+
+ setup
+
+ local mds1host=$(facet_active_host mds1)
+ local mds1dev=$(mdsdevname 1)
+
+ $LFS mkdir -i 1 $DIR/$tdir
+ $LFS mkdir -i 0 $DIR/$tdir/mds1dir
+
+ ln -s foo $DIR/$tdir/bar
+ mv $DIR/$tdir/bar $DIR/$tdir/mds1dir/bar2 ||
+ error "cross-target rename failed"
+
+ stopall
+
+ run_e2fsck $mds1host $mds1dev "-n"
+}
+run_test 120 "cross-target rename should not create bad symlinks"
+
test_122() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
[[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] ||
}
run_test 122 "Check OST sequence update"
+test_123aa() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setupall
+
+ # test old logid format until removal from llog_ioctl.c::str2logid()
+ if [ $MGS_VERSION -lt $(version_code 3.1.53) ]; then
+ do_facet mgs $LCTL dl | grep MGS
+ do_facet mgs "$LCTL --device %MGS llog_print \
+ \\\\\\\$$FSNAME-client 1 10" ||
+ error "old llog_print failed"
+ fi
+
+ # test new logid format
+ if [ $MGS_VERSION -ge $(version_code 2.9.53) ]; then
+ do_facet mgs "$LCTL --device MGS llog_print $FSNAME-client" ||
+ error "new llog_print failed"
+ fi
+}
+run_test 123aa "llog_print works with FIDs and simple names"
+
+test_123ab() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [[ $MGS_VERSION -gt $(version_code 2.11.51) ]] ||
+ skip "Need server with working llog_print support"
+
+ [ -d $MOUNT/.lustre ] || setupall
+
+ local yaml
+ local orig_val
+
+ orig_val=$(do_facet mgs $LCTL get_param jobid_name)
+ do_facet mgs $LCTL set_param -P jobid_name="testname"
+
+ yaml=$(do_facet mgs $LCTL --device MGS llog_print params |
+ grep jobid_name | tail -n 1)
+
+ local param=$(awk '{ print $10 }' <<< "$yaml")
+ local val=$(awk '{ print $12 }' <<< "$yaml")
+ #return to the default
+ do_facet mgs $LCTL set_param -P jobid_name=$orig_val
+ [ $val = "testname" ] || error "bad value: $val"
+ [ $param = "jobid_name," ] || error "Bad param: $param"
+}
+run_test 123ab "llog_print params output values from set_param -P"
+
+test_123ac() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ local start=10
+ local end=50
+
+ [ -d $MOUNT/.lustre ] || setupall
+
+ # - { index: 10, event: add_uuid, nid: 192.168.20.1@tcp(0x20000c0a81401,
+ # node: 192.168.20.1@tcp }
+ do_facet mgs $LCTL --device MGS \
+ llog_print --start $start --end $end $FSNAME-client | tr -d , |
+ while read DASH BRACE INDEX idx EVENT BLAH BLAH BLAH; do
+ (( idx >= start )) || error "llog_print index $idx < $start"
+ (( idx <= end )) || error "llog_print index $idx > $end"
+ done
+}
+run_test 123ac "llog_print with --start and --end"
+
+test_123ad() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ # older versions of lctl may not print all records properly
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ [ -d $MOUNT/.lustre ] || setupall
+
+ # append a new record, to avoid issues if last record was cancelled
+ local old=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$old
+
+ # logid: [0x3:0xa:0x0]:0
+ # flags: 4 (plain)
+ # records_count: 72
+ # last_index: 72
+ local num=$(do_facet mgs $LCTL --device MGS llog_info $FSNAME-client |
+ awk '/last_index:/ { print $2 - 1 }')
+
+ # - { index: 71, event: set_timeout, num: 0x14, param: sys.timeout=20 }
+ local last=$(do_facet mgs $LCTL --device MGS llog_print $FSNAME-client |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( last == num )) || error "llog_print only showed $last/$num records"
+}
+run_test 123ad "llog_print shows all records"
+
+test_123F() {
+ setupall
+ local yaml_file="$TMP/$tfile.yaml"
+ do_facet mgs rm "$yaml_file"
+ local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist" |
+ sed 's/config_log://')
+
+ # set jobid_var to a different value for test
+ local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P jobid_var="testname"
+
+ for i in $cfgfiles params; do
+ do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
+ done
+
+ echo "Unmounting FS"
+ stopall
+ echo "Writeconf"
+ writeconf_all
+ echo "Remounting"
+ mountmgs
+ mountmds
+ mountoss
+ mountcli
+
+ # Reapply the config from before
+ echo "Setting configuration parameters"
+ do_facet mgs "lctl set_param -F $yaml_file"
+
+ local set_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P $orig_val
+
+ [ $set_val == "jobid_var=testname" ] ||
+ error "$set_val is not testname"
+
+ do_facet mgs rm "$yaml_file"
+}
+run_test 123F "clear and reset all parameters using set_param -F"
+
+test_124()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [ -z $mds2failover_HOST ] && skip "needs MDT failover setup" && return
+
+ setup
+ cleanup
+
+ load_modules
+ if combined_mgs_mds; then
+ start_mdt 1 "-o nosvc" ||
+ error "starting mds with nosvc option failed"
+ fi
+ local nid=$(do_facet mds2 $LCTL list_nids | head -1)
+ local failover_nid=$(do_node $mds2failover_HOST $LCTL list_nids | head -1)
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0001 $nid:$failover_nid ||
+ error "replace_nids execution error"
+
+ if combined_mgs_mds; then
+ stop_mdt 1
+ fi
+
+ setup
+ fail mds2
+ echo "lfs setdirstripe"
+ $LFS setdirstripe -i 1 $MOUNT/$tdir || error "setdirstirpe error"
+ echo ok
+}
+run_test 124 "check failover after replace_nids"
+
if ! combined_mgs_mds ; then
stop mgs
fi