ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 32newtarball"
# bug number for skipped test: LU-11915
-ALWAYS_EXCEPT="$ALWAYS_EXCEPT 110"
+ALWAYS_EXCEPT="$ALWAYS_EXCEPT 110 115"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
if $SHARED_KEY; then
}
mount_client() {
- local MOUNTPATH=$1
- echo "mount $FSNAME on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH || return 96
-}
+ local mountpath=$1
+ local mountopt="$2"
-remount_client() {
- local mountopt="remount,$1"
- local MOUNTPATH=$2
- echo "remount '$1' lustre on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH "$mountopt" || return 96
+ echo "mount $FSNAME ${mountopt:+with opts $mountopt} on $mountpath....."
+ zconf_mount $HOSTNAME $mountpath $mountopt || return 96
}
umount_client() {
mount_client $MOUNT || error "mount_client $MOUNT failed"
check_mount || error "check_mount failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed."
- remount_client ro $MOUNT || error "remount_client with ro failed"
+ mount_client $MOUNT remount,ro || error "remount client with ro failed"
touch $DIR/$tfile && error "$DIR/$tfile created incorrectly"
[ -e $DIR/$tfile ] && error "$DIR/$tfile exists incorrectly"
- remount_client rw $MOUNT || error "remount_client with rw failed"
+ mount_client $MOUNT remount,rw || error "remount client with rw failed"
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
MCNT=$(grep -c $MOUNT' ' /etc/mtab)
[ "$MCNT" -ne 1 ] && error "$MOUNT in /etc/mtab $MCNT times"
start_mds || error "MDS start failed"
wait_osc_import_state mds ost2 FULL
+ local zkeeper=${KEEP_ZPOOL}
+ stack_trap "KEEP_ZPOOL=$zkeeper" EXIT
+ KEEP_ZPOOL="true"
+
stop_ost || error "Unable to stop OST1"
stop_ost2 || error "Unable to stop OST2"
stop_mds || error "Unable to stop MDS"
stop_mgs
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
+ KEEP_ZPOOL="${zkeeper}"
+
start_mgs || error "unable to start MGS"
}
run_test 21d "start mgs then ost and then mds"
$LFS setdirstripe -D -c2 $tmp/mnt/lustre/striped_dir
pushd $tmp/mnt/lustre
- tar -cf - . --exclude=./striped_dir \
- --exclude=./striped_dir_old \
- --exclude=./remote_dir |
+ tar -c --exclude=./striped_dir \
+ --exclude=./striped_dir_old \
+ --exclude=./remote_dir -f - .|
tar -xvf - -C striped_dir 1>/dev/null || {
error_noexit "cp to striped dir failed"
return 1
# LOV EA, and so on. These EA will use some EA space that is shared by
# ACL entries. So here we only check some reasonable ACL entries count,
# instead of the max number that is calculated from the max_ea_size.
- if [ "$MDS1_VERSION" -lt $(version_code 2.8.57) ];
- then
+ if [ "$MDS1_VERSION" -lt $(version_code 2.8.57) ]; then
count=28 # hard coded of RPC protocol
- elif [ "$mds1_FSTYPE" != ldiskfs ]; then
- count=4000 # max_num 4091 max_ea_size = ~65536
- elif ! large_xattr_enabled; then
- count=450 # max_num 497 max_ea_size = 4012
- else
+ elif large_xattr_enabled; then
count=4500 # max_num 8187 max_ea_size = 65452
- # not create too much (>5000) to save test time
+ # not create too many (4500) to save test time
+ else
+ count=450 # max_num 497 max_ea_size = 4012
fi
echo "It is expected to hold at least $count ACL entries"
# Check max_easize.
local max_easize=$($LCTL get_param -n llite.*.max_easize)
- if [ $MDS1_VERSION -lt $(version_code 2.12.51) ]
- then
- [[ $max_easize -eq 128 ]] ||
- error "max_easize is $max_easize, should be 128 bytes"
+ # 65452 is XATTR_SIZE_MAX less ldiskfs ea overhead
+ if large_xattr_enabled; then
+ [[ $max_easize -ge 65452 ]] ||
+ error "max_easize is $max_easize, should be at least 65452 bytes"
else
# LU-11868
- # 4012 is 4096 - ldiskfs ea overhead
+ # 4012 is 4096 less ldiskfs ea overhead
[[ $max_easize -ge 4012 ]] ||
- error "max_easize is $max_easize, should be at least 4012 bytes"
-
- # 65452 is XATTR_SIZE_MAX - ldiskfs ea overhead
- if large_xattr_enabled;
- then
- [[ $max_easize -ge 65452 ]] ||
- error "max_easize is $max_easize, should be at least 65452 bytes"
- fi
+ error "max_easize is $max_easize, should be at least 4012 bytes"
fi
restore_ostindex
error "format ost1 error"
if ! test -b $dev; then
- mnt_opts=$(csa_add "$OST_MOUNT_OPTS" -o loop)
+ mnt_opts=$(csa_add "$OST_MOUNT_FS_OPTS" -o loop)
fi
echo "mnt_opts $mnt_opts"
do_facet ost1 mount -t "$ost1_FSTYPE" $dev \
more than $left_size-byte space left in inode."
echo "Verified: at most $left_size-byte space left in inode."
- umount_ldiskfs $SINGLEMDS
+ unmount_ldiskfs $SINGLEMDS
for i in $(seq $OSTCOUNT); do
stop ost$i -f || error "stop ost$i failed"
for ((x = 1; x <= 400; x++)); do
mountopt="$mountopt,user_xattr"
done
- remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ mount_client $MOUNT remount,$mountopt 2>&1 | grep "too long" ||
error "Buffer overflow check failed"
cleanup || error "cleanup failed"
}
# Desired output
# MGS:
# 0@lo
- # lustre-MDT0000:
+ # $FSNAME-MDT0000:
# 0@lo
- # lustre-OST0000:
+ # $FSNAME-OST0000:
# 0@lo
do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
END {exit rc}' || error "lshowmount have no output MGS"
echo "rename $FSNAME to $newname"
if ! combined_mgs_mds ; then
- local facet=$(mgsdevname)
+ local dev=$(mgsdevname)
do_facet mgs \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(7) Fail to rename MGS"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype mgs)" = "zfs" ]; then
reimport_zpool mgs $newname-mgs
fi
fi
for num in $(seq $MDSCOUNT); do
- local facet=$(mdsdevname $num)
+ local dev=$(mdsdevname $num)
do_facet mds${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(8) Fail to rename MDT $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype mds${num})" = "zfs" ]; then
reimport_zpool mds${num} $newname-mdt${num}
fi
done
for num in $(seq $OSTCOUNT); do
- local facet=$(ostdevname $num)
+ local dev=$(ostdevname $num)
do_facet ost${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(9) Fail to rename OST $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype ost${num})" = "zfs" ]; then
reimport_zpool ost${num} $newname-ost${num}
fi
done
}
run_test 103 "rename filesystem name"
-test_104() { # LU-6952
+test_104a() { # LU-6952
local mds_mountopts=$MDS_MOUNT_OPTS
local ost_mountopts=$OST_MOUNT_OPTS
local mds_mountfsopts=$MDS_MOUNT_FS_OPTS
OST_MOUNT_OPTS=$ost_mountopts
MDS_MOUNT_FS_OPTS=$mds_mountfsopts
}
-run_test 104 "Make sure user defined options are reflected in mount"
+run_test 104a "Make sure user defined options are reflected in mount"
+
+test_104b() { # LU-12859
+ mount_client $MOUNT3 flock,localflock
+ stack_trap "umount_client $MOUNT3" EXIT
+ mount | grep "$MOUNT3 .*,flock" && error "flock is still set"
+ mount | grep "$MOUNT3 .*,localflock" || error "localflock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock
+ mount | grep "$MOUNT3 .*,localflock" && error "localflock is still set"
+ mount | grep "$MOUNT3 .*,flock" || error "flock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock,noflock
+ flock_is_enabled $MOUNT3 && error "some flock is still enabled" || true
+}
+run_test 104b "Mount uses last flock argument"
error_and_umount() {
umount $TMP/$tdir
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
done
for facet in $scrub_list; do
- $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub ||
+ $rcmd $LCTL lfsck_start -M $FSNAME-$facet -t scrub ||
error "failed to start OI scrub on $facet"
done
}
run_test 111 "Adding large_dir with over 2GB directory"
+test_112() {
+ start_mds || error "MDS start failed"
+ start_ost || error "OSS start failed"
+ echo "start ost2 service on $(facet_active_host ost2)"
+ start ost2 $(ostdevname 2) $(csa_add "$OST_MOUNT_OPTS" -o no_precreate) ||
+ error "start ost2 facet failed"
+ local val=$(do_facet ost2 \
+ "$LCTL get_param -n obdfilter.$FSNAME-OST0001*.no_precreate")
+ (( $val == 1 )) || error "obdfilter.$FSNAME-OST0001*.no_precreate=$val"
+
+ mount_client $MOUNT || error "mount client failed"
+ wait_osc_import_state client ost2 FULL
+
+ $LFS setstripe -i 0 $DIR/$tfile.0 ||
+ error "problem creating $tfile.0 on OST0000"
+ $LFS setstripe -i 1 $DIR/$tfile.1 && $LFS getstripe $DIR/$tfile.1 &&
+ (( $($LFS getstripe -i $DIR/$tfile.1) == 1 )) &&
+ error "allowed to create $tfile.1 on OST0001"
+ do_facet ost2 $LCTL set_param obdfilter.*.no_precreate=0
+ sleep_maxage
+ $LFS setstripe -i 1 $DIR/$tfile.2 ||
+ error "failed to create $tfile.2 on ost1 facet"
+ stop_ost2 || error "stop ost2 facet failed"
+ cleanup
+}
+run_test 112 "mount OST with nocreate option"
cleanup_115()
{
IMAGESIZE=$((3072 << 30)) # 3072 GiB
stopall
+
+ echo "client1: "
+ lctl dl
+ mount | grep lustre
+ echo "mds1: "
+ do_facet mds1 "hostname; ifconfig; lctl dl; mount"
+ echo "ost1: "
+ do_facet ost1 "hostname; ifconfig; lctl dl; mount"
# We need MDT size 3072GB, because it is smallest
# partition that can store 2B inodes
do_facet $SINGLEMDS "mkdir -p $TMP/$tdir"
local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
- local mds_opts="$(mkfs_opts mds1 $(mdsdevname 1)) --device-size=$IMAGESIZE \
+ local mds_opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
--mkfsoptions='-O ea_inode,^resize_inode,meta_bg \
- -N 2247484000 -E lazy_itable_init'"
+ -N 2247484000 -E lazy_itable_init' --device-size=$IMAGESIZE"
add mds1 $mds_opts --mgs --reformat $mdsdev ||
skip_env "format large MDT failed"
opts="$(mkfs_opts ost1 $(ostdevname 1)) \
local orig_val
orig_val=$(do_facet mgs $LCTL get_param jobid_name)
- do_facet mgs $LCTL set_param -P jobid_name="testname"
+ do_facet mgs $LCTL set_param -P jobid_name="TESTNAME"
yaml=$(do_facet mgs $LCTL --device MGS llog_print params |
grep jobid_name | tail -n 1)
local val=$(awk '{ print $12 }' <<< "$yaml")
#return to the default
do_facet mgs $LCTL set_param -P jobid_name=$orig_val
- [ $val = "testname" ] || error "bad value: $val"
+ [ $val = "TESTNAME" ] || error "bad value: $val"
[ $param = "jobid_name," ] || error "Bad param: $param"
}
run_test 123ab "llog_print params output values from set_param -P"
# set jobid_var to a different value for test
local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
- do_facet mgs $LCTL set_param -P jobid_var="testname"
+ do_facet mgs $LCTL set_param -P jobid_var="TESTNAME"
for i in $cfgfiles params; do
do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
local set_val=$(do_facet mgs $LCTL get_param jobid_var)
do_facet mgs $LCTL set_param -P $orig_val
- [ $set_val == "jobid_var=testname" ] ||
- error "$set_val is not testname"
+ [ $set_val == "jobid_var=TESTNAME" ] ||
+ error "$set_val is not TESTNAME"
do_facet mgs rm "$yaml_file"
cleanup