ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 32newtarball"
# bug number for skipped test: LU-11915
-ALWAYS_EXCEPT="$ALWAYS_EXCEPT 110"
+ALWAYS_EXCEPT="$ALWAYS_EXCEPT 110 115"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
if $SHARED_KEY; then
start $facet ${dev} $MDS_MOUNT_OPTS $@ || return 94
}
+stop_mdt_no_force() {
+ local num=$1
+ local facet=mds$num
+ local dev=$(mdsdevname $num)
+ shift 1
+
+ echo "stop mds service on `facet_active_host $facet`"
+ stop $facet || return 97
+}
+
stop_mdt() {
local num=$1
local facet=mds$num
}
mount_client() {
- local MOUNTPATH=$1
- echo "mount $FSNAME on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH || return 96
-}
+ local mountpath=$1
+ local mountopt="$2"
-remount_client() {
- local mountopt="remount,$1"
- local MOUNTPATH=$2
- echo "remount '$1' lustre on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH "$mountopt" || return 96
+ echo "mount $FSNAME ${mountopt:+with opts $mountopt} on $mountpath....."
+ zconf_mount $HOSTNAME $mountpath $mountopt || return 96
}
umount_client() {
mount_client $MOUNT || error "mount_client $MOUNT failed"
check_mount || error "check_mount failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed."
- remount_client ro $MOUNT || error "remount_client with ro failed"
+ mount_client $MOUNT remount,ro || error "remount client with ro failed"
touch $DIR/$tfile && error "$DIR/$tfile created incorrectly"
[ -e $DIR/$tfile ] && error "$DIR/$tfile exists incorrectly"
- remount_client rw $MOUNT || error "remount_client with rw failed"
+ mount_client $MOUNT remount,rw || error "remount client with rw failed"
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
MCNT=$(grep -c $MOUNT' ' /etc/mtab)
[ "$MCNT" -ne 1 ] && error "$MOUNT in /etc/mtab $MCNT times"
start_mds || error "MDS start failed"
wait_osc_import_state mds ost2 FULL
+ local zkeeper=${KEEP_ZPOOL}
+ stack_trap "KEEP_ZPOOL=$zkeeper" EXIT
+ KEEP_ZPOOL="true"
+
stop_ost || error "Unable to stop OST1"
stop_ost2 || error "Unable to stop OST2"
stop_mds || error "Unable to stop MDS"
stop_mgs
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
+ KEEP_ZPOOL="${zkeeper}"
+
start_mgs || error "unable to start MGS"
}
run_test 21d "start mgs then ost and then mds"
echo $T32_BLIMIT > $tmp/img/blimit
echo $T32_ILIMIT > $tmp/img/ilimit
+ $MULTIOP /mnt/$FSNAME/orph_file Ouw_c&
+ pid=$!
+ sync
+ stop_mdt_no_force 1
+ debugfs -R "ls /PENDING" ${MDSDEV1:-$MDSDEV}
+ cp ${MDSDEV1:-$MDSDEV} $tmp/img
+ start_mdt 1
+ kill -s USR1 $pid
+ wait $pid
+
stopall
pushd $tmp/src
uname -r >$tmp/img/kernel
uname -m >$tmp/img/arch
- mv ${MDSDEV1:-$MDSDEV} $tmp/img
for num in $(seq 2 $MDSCOUNT); do
local devname=$(mdsdevname $num)
local facet=mds$num
$LFS setdirstripe -D -c2 $tmp/mnt/lustre/striped_dir
pushd $tmp/mnt/lustre
- tar -cf - . --exclude=./striped_dir \
- --exclude=./striped_dir_old \
- --exclude=./remote_dir |
+ tar -c --exclude=./striped_dir \
+ --exclude=./striped_dir_old \
+ --exclude=./remote_dir -f - .|
tar -xvf - -C striped_dir 1>/dev/null || {
error_noexit "cp to striped dir failed"
return 1
}
shall_cleanup_lustre=false
else
+ $MOUNT_CMD $nid:/$fsname $tmp/mnt/lustre || {
+ error_noexit "Mounting the client"
+ return 1
+ }
+
+ [[ $(do_facet mds1 pgrep orph_.*-MDD | wc -l) == 0 ]] ||
+ error "MDD orphan cleanup thread not quit"
+
+ umount $tmp/mnt/lustre || {
+ error_noexit "Unmounting the client"
+ return 1
+ }
+
if [[ "$dne_upgrade" != "no" ]] || $mdt2_is_available; then
$r $UMOUNT $tmp/mnt/mdt1 || {
error_noexit "Unmounting the MDT2"
t32_check
for tarball in $tarballs; do
+ banner "testing $tarball upgrade"
t32_test $tarball || let "rc += $?"
done
return $rc
t32_check
for tarball in $tarballs; do
+ banner "testing $tarball upgrade with writeconf"
t32_test $tarball writeconf || let "rc += $?"
done
return $rc
t32_check
for tarball in $tarballs; do
# Do not support 1_8 and 2_1 direct upgrade to DNE2 anymore */
- echo $tarball | grep "1_8" && continue
- echo $tarball | grep "2_1" && continue
+ [[ "$tarball" =~ "1_8" ]] && echo "skip $tarball" && continue
+ [[ "$tarball" =~ "2_1" ]] && echo "skip $tarball" && continue
+ banner "testing $tarball upgrade with DNE"
load_modules
dne_upgrade=yes t32_test $tarball writeconf || rc=$?
done
t32_check
for tarball in $tarballs; do
+ banner "testing $tarball upgrade with ff convert"
ff_convert=yes t32_test $tarball || rc=$?
done
return $rc
t32_check
for tarball in $tarballs; do
- echo $tarball | grep "2_9" || continue
+ [[ "$tarball" =~ "2_9" ]] || continue
#load_modules
+ banner "testing $tarball upgrade with DoM"
dom_upgrade=yes t32_test $tarball writeconf || let "rc += $?"
done
return $rc
# LOV EA, and so on. These EA will use some EA space that is shared by
# ACL entries. So here we only check some reasonable ACL entries count,
# instead of the max number that is calculated from the max_ea_size.
- if [ "$MDS1_VERSION" -lt $(version_code 2.8.57) ];
- then
+ if [ "$MDS1_VERSION" -lt $(version_code 2.8.57) ]; then
count=28 # hard coded of RPC protocol
- elif [ "$mds1_FSTYPE" != ldiskfs ]; then
- count=4000 # max_num 4091 max_ea_size = ~65536
- elif ! large_xattr_enabled; then
- count=450 # max_num 497 max_ea_size = 4012
- else
+ elif large_xattr_enabled; then
count=4500 # max_num 8187 max_ea_size = 65452
- # not create too much (>5000) to save test time
+ # not create too many (4500) to save test time
+ else
+ count=450 # max_num 497 max_ea_size = 4012
fi
echo "It is expected to hold at least $count ACL entries"
# Check max_easize.
local max_easize=$($LCTL get_param -n llite.*.max_easize)
- if [ $MDS1_VERSION -lt $(version_code 2.12.51) ]
- then
- [[ $max_easize -eq 128 ]] ||
- error "max_easize is $max_easize, should be 128 bytes"
+ # 65452 is XATTR_SIZE_MAX less ldiskfs ea overhead
+ if large_xattr_enabled; then
+ [[ $max_easize -ge 65452 ]] ||
+ error "max_easize is $max_easize, should be at least 65452 bytes"
else
# LU-11868
- # 4012 is 4096 - ldiskfs ea overhead
+ # 4012 is 4096 less ldiskfs ea overhead
[[ $max_easize -ge 4012 ]] ||
- error "max_easize is $max_easize, should be at least 4012 bytes"
-
- # 65452 is XATTR_SIZE_MAX - ldiskfs ea overhead
- if large_xattr_enabled;
- then
- [[ $max_easize -ge 65452 ]] ||
- error "max_easize is $max_easize, should be at least 65452 bytes"
- fi
+ error "max_easize is $max_easize, should be at least 4012 bytes"
fi
restore_ostindex
# Remove OSTs from a pool and destroy the pool.
destroy_pool $ost_pool || true
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
restore_ostindex
}
done
mount_client $MOUNT || error "mount client $MOUNT failed"
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
wait_osts_up
$LFS df $MOUNT || error "$LFS df $MOUNT failed"
error "format ost1 error"
if ! test -b $dev; then
- mnt_opts=$(csa_add "$OST_MOUNT_OPTS" -o loop)
+ mnt_opts=$(csa_add "$OST_MOUNT_FS_OPTS" -o loop)
fi
echo "mnt_opts $mnt_opts"
do_facet ost1 mount -t "$ost1_FSTYPE" $dev \
skip "ldiskfs only test"
[[ $OSTCOUNT -gt 59 ]] &&
skip "Ignore wide striping situation"
+ [ -n "$FILESET" ] && skip "Not functional for FILESET set"
local mdsdev=$(mdsdevname 1)
local mdsvdev=$(mdsvdevname 1)
more than $left_size-byte space left in inode."
echo "Verified: at most $left_size-byte space left in inode."
- umount_ldiskfs $SINGLEMDS
+ unmount_ldiskfs $SINGLEMDS
for i in $(seq $OSTCOUNT); do
stop ost$i -f || error "stop ost$i failed"
}
run_test 90b "check max_mod_rpcs_in_flight is enforced after update"
+save_params_90c() {
+ # get max_rpcs_in_flight value
+ mrif_90c=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight)
+ echo "max_rpcs_in_flight is $mrif_90c"
+
+ # get max_mod_rpcs_in_flight value
+ mmrif_90c=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight)
+ echo "max_mod_rpcs_in_flight is $mmrif_90c"
+
+ # get MDT max_mod_rpcs_per_client value
+ mmrpc_90c=$(do_facet mds1 \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+ echo "max_mod_rpcs_per_client is $mmrpc_90c"
+}
+
+restore_params_90c() {
+ trap 0
+
+ # restore max_rpcs_in_flight value
+ do_facet mgs $LCTL set_param -P \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$mrif_90c
+
+ # restore max_mod_rpcs_in_flight value
+ do_facet mgs $LCTL set_param -P \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mmrif_90c
+
+ # restore MDT max_mod_rpcs_per_client value
+ do_facet mds1 "echo $mmrpc_90c > \
+ /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+}
+
test_90c() {
local tmp
- local mrif
- local mmrpc
setup
skip "Client not able to send multiple modify RPCs in parallel"
fi
- # get max_rpcs_in_flight value
- mrif=$($LCTL get_param -n mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight)
- echo "max_rpcs_in_flight is $mrif"
-
- # get MDT max_mod_rpcs_per_client
- mmrpc=$(do_facet mds1 \
- cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
- echo "max_mod_rpcs_per_client is $mmrpc"
+ save_params_90c
+ stack_trap restore_params_90c
# testcase 1
# attempt to set max_mod_rpcs_in_flight to max_rpcs_in_flight value
# prerequisite: set max_mod_rpcs_per_client to max_rpcs_in_flight value
- umount_client $MOUNT
- do_facet mds1 \
- "echo $mrif > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
- mount_client $MOUNT
+ do_facet mds1 "echo $mrif_90c > \
+ /sys/module/mdt/parameters/max_mod_rpcs_per_client"
- $LCTL set_param \
- mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif &&
- error "set max_mod_rpcs_in_flight to $mrif should fail"
+ # if max_mod_rpcs_in_flight is set to be equal to or larger than
+ # max_rpcs_in_flight, then max_rpcs_in_flight will be increased
+ if [[ "$CLIENT_VERSION" -ge $(version_code 2.13.53) ]]; then
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif_90c ||
+ error "set max_mod_rpcs_in_flight to $mrif_90c failed"
+
+ local new_mrif=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight)
+ ((new_mrif == mrif_90c + 1)) ||
+ error "max_rpcs_in_flight was not increased"
+ fi
umount_client $MOUNT
- do_facet mds1 \
- "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ do_facet mds1 "echo $mmrpc_90c > \
+ /sys/module/mdt/parameters/max_mod_rpcs_per_client"
mount_client $MOUNT
# testcase 2
# attempt to set max_mod_rpcs_in_flight to max_mod_rpcs_per_client+1
# prerequisite: set max_rpcs_in_flight to max_mod_rpcs_per_client+2
$LCTL set_param \
- mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$((mmrpc + 2))
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$((mmrpc_90c + 2))
$LCTL set_param \
- mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$((mmrpc + 1)) &&
- error "set max_mod_rpcs_in_flight to $((mmrpc + 1)) should fail"
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$((mmrpc_90c + 1)) &&
+ error "set max_mod_rpcs_in_flight to $((mmrpc_90c + 1)) should fail"
+
+ # testcase 3
+ # attempt to set max_mod_rpcs_in_flight permanently
+ do_facet mgs $LCTL set_param -P \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$mrif_90c
+
+ do_facet mgs $LCTL set_param -P \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif_90c
+
+ remount_client $MOUNT
+
+ wait_update_facet --verbose client "$LCTL get_param -n \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight" \
+ "$((mrif_90c + 1))" ||
+ error "expected '$((mrif_90c + 1))' for max_rpcs_in_flight"
+
+ wait_update_facet --verbose client "$LCTL get_param -n \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight" \
+ "$mrif_90c" ||
+ error "expected '$mrif_90c' for max_mod_rpcs_in_flight"
+ restore_params_90c
cleanup
}
run_test 90c "check max_mod_rpcs_in_flight update limits"
for ((x = 1; x <= 400; x++)); do
mountopt="$mountopt,user_xattr"
done
- remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ mount_client $MOUNT remount,$mountopt 2>&1 | grep "too long" ||
error "Buffer overflow check failed"
cleanup || error "cleanup failed"
}
# Desired output
# MGS:
# 0@lo
- # lustre-MDT0000:
+ # $FSNAME-MDT0000:
# 0@lo
- # lustre-OST0000:
+ # $FSNAME-OST0000:
# 0@lo
do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
END {exit rc}' || error "lshowmount have no output MGS"
echo "rename $FSNAME to $newname"
if ! combined_mgs_mds ; then
- local facet=$(mgsdevname)
+ local dev=$(mgsdevname)
do_facet mgs \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(7) Fail to rename MGS"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype mgs)" = "zfs" ]; then
reimport_zpool mgs $newname-mgs
fi
fi
for num in $(seq $MDSCOUNT); do
- local facet=$(mdsdevname $num)
+ local dev=$(mdsdevname $num)
do_facet mds${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(8) Fail to rename MDT $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype mds${num})" = "zfs" ]; then
reimport_zpool mds${num} $newname-mdt${num}
fi
done
for num in $(seq $OSTCOUNT); do
- local facet=$(ostdevname $num)
+ local dev=$(ostdevname $num)
do_facet ost${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(9) Fail to rename OST $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype ost${num})" = "zfs" ]; then
reimport_zpool ost${num} $newname-ost${num}
fi
done
cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
error "(2) Fail to copy test-framework.sh"
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
error "(3) Fail to create $FSNAME.pool1"
# name the pool name as the fsname
$LFS setstripe -p $FSNAME $DIR/$tdir/d0 ||
error "(6) Fail to setstripe on $DIR/$tdir/d0"
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
KEEP_ZPOOL=true
stopall
FSNAME="mylustre"
setupall
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
test_103_check_pool $save_fsname 7
if [ $OSTCOUNT -ge 2 ]; then
$LFS setstripe -p $save_fsname $DIR/$tdir/f0 ||
error "(16) Fail to setstripe on $DIR/$tdir/f0"
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
stopall
FSNAME="tfs"
setupall
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
test_103_check_pool $save_fsname 17
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
stopall
test_renamefs $save_fsname
}
run_test 103 "rename filesystem name"
-test_104() { # LU-6952
+test_104a() { # LU-6952
local mds_mountopts=$MDS_MOUNT_OPTS
local ost_mountopts=$OST_MOUNT_OPTS
local mds_mountfsopts=$MDS_MOUNT_FS_OPTS
OST_MOUNT_OPTS=$ost_mountopts
MDS_MOUNT_FS_OPTS=$mds_mountfsopts
}
-run_test 104 "Make sure user defined options are reflected in mount"
+run_test 104a "Make sure user defined options are reflected in mount"
+
+test_104b() { # LU-12859
+ mount_client $MOUNT3 flock,localflock
+ stack_trap "umount_client $MOUNT3" EXIT
+ mount | grep "$MOUNT3 .*,flock" && error "flock is still set"
+ mount | grep "$MOUNT3 .*,localflock" || error "localflock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock
+ mount | grep "$MOUNT3 .*,localflock" && error "localflock is still set"
+ mount | grep "$MOUNT3 .*,flock" || error "flock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock,noflock
+ flock_is_enabled $MOUNT3 && error "some flock is still enabled" || true
+}
+run_test 104b "Mount uses last flock argument"
error_and_umount() {
umount $TMP/$tdir
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
done
for facet in $scrub_list; do
- $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub ||
+ $rcmd $LCTL lfsck_start -M $FSNAME-$facet -t scrub ||
error "failed to start OI scrub on $facet"
done
reformat
setup_noconfig
client_up || error "client_up failed"
- #pool commands requires a client on MGS for procfs interfaces
- if ! combined_mgs_mds ; then
- mount_mgs_client
- stack_trap umount_mgs_client EXIT
- fi
#
# set number of permanent parameters
#
test_109_set_params $FSNAME
- combined_mgs_mds || umount_mgs_client
umount_client $MOUNT || error "umount_client failed"
stop_ost || error "stop_ost failed"
stop_mds || error "stop_mds failed"
error "failed to clear client config"
setup_noconfig
- combined_mgs_mds || mount_mgs_client
#
# check that configurations are intact
#
destroy_test_pools || error "destroy test pools failed"
- combined_mgs_mds || umount_mgs_client
cleanup
}
run_test 109a "test lctl clear_conf fsname"
reformat
setup_noconfig
client_up || error "client_up failed"
- #pool commands requires a client on MGS for procfs interfaces
- if ! combined_mgs_mds ; then
- mount_mgs_client
- stack_trap umount_mgs_client EXIT
- fi
#
# set number of permanent parameters
#
test_109_set_params $FSNAME
- combined_mgs_mds || umount_mgs_client
umount_client $MOUNT || error "umount_client failed"
stop_ost || error "stop_ost failed"
stop_mds || error "stop_mds failed"
error "failed to clear client config"
setup_noconfig
- combined_mgs_mds || mount_mgs_client
#
# check that configurations are intact
#
#
destroy_test_pools || error "destroy test pools failed"
- combined_mgs_mds || umount_mgs_client
cleanup
}
run_test 109b "test lctl clear_conf one config"
}
run_test 111 "Adding large_dir with over 2GB directory"
+test_112() {
+ start_mds || error "MDS start failed"
+ start_ost || error "OSS start failed"
+ echo "start ost2 service on $(facet_active_host ost2)"
+ start ost2 $(ostdevname 2) $(csa_add "$OST_MOUNT_OPTS" -o no_precreate) ||
+ error "start ost2 facet failed"
+ local val=$(do_facet ost2 \
+ "$LCTL get_param -n obdfilter.$FSNAME-OST0001*.no_precreate")
+ (( $val == 1 )) || error "obdfilter.$FSNAME-OST0001*.no_precreate=$val"
+
+ mount_client $MOUNT || error "mount client failed"
+ wait_osc_import_state client ost2 FULL
+
+ $LFS setstripe -i 0 $DIR/$tfile.0 ||
+ error "problem creating $tfile.0 on OST0000"
+ $LFS setstripe -i 1 $DIR/$tfile.1 && $LFS getstripe $DIR/$tfile.1 &&
+ (( $($LFS getstripe -i $DIR/$tfile.1) == 1 )) &&
+ error "allowed to create $tfile.1 on OST0001"
+ do_facet ost2 $LCTL set_param obdfilter.*.no_precreate=0
+ sleep_maxage
+ $LFS setstripe -i 1 $DIR/$tfile.2 ||
+ error "failed to create $tfile.2 on ost1 facet"
+ stop_ost2 || error "stop ost2 facet failed"
+ cleanup
+}
+run_test 112 "mount OST with nocreate option"
cleanup_115()
{
if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "Only applicable to ldiskfs-based MDTs"
fi
+ [ -n "$FILESET" ] && skip "Not functional for FILESET set"
local dbfs_ver=$(do_facet $SINGLEMDS $DEBUGFS -V 2>&1)
IMAGESIZE=$((3072 << 30)) # 3072 GiB
stopall
+
+ echo "client1: "
+ lctl dl
+ mount | grep lustre
+ echo "mds1: "
+ do_facet mds1 "hostname; ifconfig; lctl dl; mount"
+ echo "ost1: "
+ do_facet ost1 "hostname; ifconfig; lctl dl; mount"
# We need MDT size 3072GB, because it is smallest
# partition that can store 2B inodes
do_facet $SINGLEMDS "mkdir -p $TMP/$tdir"
local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
- local mds_opts="$(mkfs_opts mds1 $(mdsdevname 1)) --device-size=$IMAGESIZE \
+ local mds_opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
--mkfsoptions='-O ea_inode,^resize_inode,meta_bg \
- -N 2247484000 -E lazy_itable_init'"
+ -N 2247484000 -E lazy_itable_init' --device-size=$IMAGESIZE"
add mds1 $mds_opts --mgs --reformat $mdsdev ||
skip_env "format large MDT failed"
opts="$(mkfs_opts ost1 $(ostdevname 1)) \
local orig_val
orig_val=$(do_facet mgs $LCTL get_param jobid_name)
- do_facet mgs $LCTL set_param -P jobid_name="testname"
+ do_facet mgs $LCTL set_param -P jobid_name="TESTNAME"
yaml=$(do_facet mgs $LCTL --device MGS llog_print params |
grep jobid_name | tail -n 1)
local val=$(awk '{ print $12 }' <<< "$yaml")
#return to the default
do_facet mgs $LCTL set_param -P jobid_name=$orig_val
- [ $val = "testname" ] || error "bad value: $val"
+ [ $val = "TESTNAME" ] || error "bad value: $val"
[ $param = "jobid_name," ] || error "Bad param: $param"
}
run_test 123ab "llog_print params output values from set_param -P"
}
run_test 123ae "llog_cancel can cancel requested record"
+test_123af() { #LU-13609
+ [ "$MGS_VERSION" -ge $(version_code 2.13.54) -a \
+ "$MDS1_VERSION" -ge $(version_code 2.13.54) ] ||
+ skip "Need both MGS and MDS version at least 2.13.54"
+
+ [ -d $MOUNT/.lustre ] || setupall
+ stack_trap "do_facet mds1 $LCTL set_param fail_loc=0" EXIT
+
+ local device
+ local facet
+ local cmd
+ local orig_clist
+ local orig_count
+ local new_clist
+ local new_count
+
+ for device in "MGS" "$FSNAME-MDT0000"; do
+ cmd="--device $device llog_catlist"
+ echo "lctl $cmd ..."
+ if [ "$device" = "MGS" ]; then
+ facet="mgs"
+ else
+ facet="mds1"
+ fi
+ orig_clist=($(do_facet $facet $LCTL $cmd | awk '{ print $2 }'))
+ orig_count=${#orig_clist[@]}
+ echo "orig_clist: ${orig_clist[@]}"
+
+ #define OBD_FAIL_CATLIST 0x131b
+ #fetch to llog records from the second one
+ do_facet $facet $LCTL set_param fail_loc=0x131b fail_val=2
+
+ new_clist=($(do_facet $facet $LCTL $cmd | awk '{ print $2 }'))
+ new_count=${#new_clist[@]}
+ echo "new_clist: ${new_clist[@]}"
+
+ [ $new_count -eq $((orig_count - 1)) ] ||
+ error "$new_count != $orig_count - 1"
+ for i in $(seq 0 $new_count); do
+ j=$((i + 1))
+ [ "${orig_clist[$j]}" = "${new_clist[$i]}" ] ||
+ error "${orig_clist[$j]} != ${new_clist[$i]}"
+ done
+ do_facet mds1 $LCTL set_param fail_loc=0
+ echo "done"
+ done
+}
+run_test 123af "llog_catlist can show all config files correctly"
+
test_123F() {
remote_mgs_nodsh && skip "remote MGS with nodsh"
# set jobid_var to a different value for test
local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
- do_facet mgs $LCTL set_param -P jobid_var="testname"
+ do_facet mgs $LCTL set_param -P jobid_var="TESTNAME"
for i in $cfgfiles params; do
do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
local set_val=$(do_facet mgs $LCTL get_param jobid_var)
do_facet mgs $LCTL set_param -P $orig_val
- [ $set_val == "jobid_var=testname" ] ||
- error "$set_val is not testname"
+ [ $set_val == "jobid_var=TESTNAME" ] ||
+ error "$set_val is not TESTNAME"
do_facet mgs rm "$yaml_file"
cleanup
}
run_test 125 "check l_tunedisk only tunes OSTs and their slave devices"
+test_126() {
+ [[ "$MDS1_VERSION" -ge $(version_code 2.13.52) ]] ||
+ skip "Need MDS version at least 2.13.52"
+
+ cleanup
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ../libcfs/libcfs/libcfs
+ #define OBD_FAIL_OBD_SETUP 0x60d
+ do_facet mds1 $LCTL set_param fail_loc=0x60d
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) load_modules &
+ for i in {1..40}; do
+ do_facet mds1 lsmod | grep -q osd_$mds1_FSTYPE && break
+ sleep 1
+ done
+ clear_failloc $SINGLEMDS 20 &
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+}
+run_test 126 "mount in parallel shouldn't cause a crash"
+
if ! combined_mgs_mds ; then
stop mgs
fi