}
mount_client() {
- local MOUNTPATH=$1
- echo "mount $FSNAME on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH || return 96
-}
+ local mountpath=$1
+ local mountopt="$2"
-remount_client() {
- local mountopt="remount,$1"
- local MOUNTPATH=$2
- echo "remount '$1' lustre on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH "$mountopt" || return 96
+ echo "mount $FSNAME ${mountopt:+with opts $mountopt} on $mountpath....."
+ zconf_mount $HOSTNAME $mountpath $mountopt || return 96
}
umount_client() {
mount_client $MOUNT || error "mount_client $MOUNT failed"
check_mount || error "check_mount failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed."
- remount_client ro $MOUNT || error "remount_client with ro failed"
+ mount_client $MOUNT remount,ro || error "remount client with ro failed"
touch $DIR/$tfile && error "$DIR/$tfile created incorrectly"
[ -e $DIR/$tfile ] && error "$DIR/$tfile exists incorrectly"
- remount_client rw $MOUNT || error "remount_client with rw failed"
+ mount_client $MOUNT remount,rw || error "remount client with rw failed"
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
MCNT=$(grep -c $MOUNT' ' /etc/mtab)
[ "$MCNT" -ne 1 ] && error "$MOUNT in /etc/mtab $MCNT times"
# LOV EA, and so on. These EA will use some EA space that is shared by
# ACL entries. So here we only check some reasonable ACL entries count,
# instead of the max number that is calculated from the max_ea_size.
- if [ "$MDS1_VERSION" -lt $(version_code 2.8.57) ];
- then
+ if [ "$MDS1_VERSION" -lt $(version_code 2.8.57) ]; then
count=28 # hard coded of RPC protocol
- elif [ "$mds1_FSTYPE" != ldiskfs ]; then
- count=4000 # max_num 4091 max_ea_size = ~65536
- elif ! large_xattr_enabled; then
- count=450 # max_num 497 max_ea_size = 4012
- else
+ elif large_xattr_enabled; then
count=4500 # max_num 8187 max_ea_size = 65452
- # not create too much (>5000) to save test time
+ # not create too many (4500) to save test time
+ else
+ count=450 # max_num 497 max_ea_size = 4012
fi
echo "It is expected to hold at least $count ACL entries"
# Check max_easize.
local max_easize=$($LCTL get_param -n llite.*.max_easize)
- if [ $MDS1_VERSION -lt $(version_code 2.12.51) ]
- then
- [[ $max_easize -eq 128 ]] ||
- error "max_easize is $max_easize, should be 128 bytes"
+ # 65452 is XATTR_SIZE_MAX less ldiskfs ea overhead
+ if large_xattr_enabled; then
+ [[ $max_easize -ge 65452 ]] ||
+ error "max_easize is $max_easize, should be at least 65452 bytes"
else
# LU-11868
- # 4012 is 4096 - ldiskfs ea overhead
+ # 4012 is 4096 less ldiskfs ea overhead
[[ $max_easize -ge 4012 ]] ||
- error "max_easize is $max_easize, should be at least 4012 bytes"
-
- # 65452 is XATTR_SIZE_MAX - ldiskfs ea overhead
- if large_xattr_enabled;
- then
- [[ $max_easize -ge 65452 ]] ||
- error "max_easize is $max_easize, should be at least 65452 bytes"
- fi
+ error "max_easize is $max_easize, should be at least 4012 bytes"
fi
restore_ostindex
# Remove OSTs from a pool and destroy the pool.
destroy_pool $ost_pool || true
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
restore_ostindex
}
done
mount_client $MOUNT || error "mount client $MOUNT failed"
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
wait_osts_up
$LFS df $MOUNT || error "$LFS df $MOUNT failed"
for ((x = 1; x <= 400; x++)); do
mountopt="$mountopt,user_xattr"
done
- remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ mount_client $MOUNT remount,$mountopt 2>&1 | grep "too long" ||
error "Buffer overflow check failed"
cleanup || error "cleanup failed"
}
echo "rename $FSNAME to $newname"
if ! combined_mgs_mds ; then
- local facet=$(mgsdevname)
+ local dev=$(mgsdevname)
do_facet mgs \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(7) Fail to rename MGS"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype mgs)" = "zfs" ]; then
reimport_zpool mgs $newname-mgs
fi
fi
for num in $(seq $MDSCOUNT); do
- local facet=$(mdsdevname $num)
+ local dev=$(mdsdevname $num)
do_facet mds${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(8) Fail to rename MDT $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype mds${num})" = "zfs" ]; then
reimport_zpool mds${num} $newname-mdt${num}
fi
done
for num in $(seq $OSTCOUNT); do
- local facet=$(ostdevname $num)
+ local dev=$(ostdevname $num)
do_facet ost${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(9) Fail to rename OST $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype ost${num})" = "zfs" ]; then
reimport_zpool ost${num} $newname-ost${num}
fi
done
cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
error "(2) Fail to copy test-framework.sh"
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
error "(3) Fail to create $FSNAME.pool1"
# name the pool name as the fsname
$LFS setstripe -p $FSNAME $DIR/$tdir/d0 ||
error "(6) Fail to setstripe on $DIR/$tdir/d0"
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
KEEP_ZPOOL=true
stopall
FSNAME="mylustre"
setupall
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
test_103_check_pool $save_fsname 7
if [ $OSTCOUNT -ge 2 ]; then
$LFS setstripe -p $save_fsname $DIR/$tdir/f0 ||
error "(16) Fail to setstripe on $DIR/$tdir/f0"
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
stopall
FSNAME="tfs"
setupall
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
test_103_check_pool $save_fsname 17
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
stopall
test_renamefs $save_fsname
}
run_test 103 "rename filesystem name"
-test_104() { # LU-6952
+test_104a() { # LU-6952
local mds_mountopts=$MDS_MOUNT_OPTS
local ost_mountopts=$OST_MOUNT_OPTS
local mds_mountfsopts=$MDS_MOUNT_FS_OPTS
OST_MOUNT_OPTS=$ost_mountopts
MDS_MOUNT_FS_OPTS=$mds_mountfsopts
}
-run_test 104 "Make sure user defined options are reflected in mount"
+run_test 104a "Make sure user defined options are reflected in mount"
+
+test_104b() { # LU-12859
+ mount_client $MOUNT3 flock,localflock
+ stack_trap "umount_client $MOUNT3" EXIT
+ mount | grep "$MOUNT3 .*,flock" && error "flock is still set"
+ mount | grep "$MOUNT3 .*,localflock" || error "localflock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock
+ mount | grep "$MOUNT3 .*,localflock" && error "localflock is still set"
+ mount | grep "$MOUNT3 .*,flock" || error "flock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock,noflock
+ flock_is_enabled $MOUNT3 && error "some flock is still enabled" || true
+}
+run_test 104b "Mount uses last flock argument"
error_and_umount() {
umount $TMP/$tdir
reformat
setup_noconfig
client_up || error "client_up failed"
- #pool commands requires a client on MGS for procfs interfaces
- if ! combined_mgs_mds ; then
- mount_mgs_client
- stack_trap umount_mgs_client EXIT
- fi
#
# set number of permanent parameters
#
test_109_set_params $FSNAME
- combined_mgs_mds || umount_mgs_client
umount_client $MOUNT || error "umount_client failed"
stop_ost || error "stop_ost failed"
stop_mds || error "stop_mds failed"
error "failed to clear client config"
setup_noconfig
- combined_mgs_mds || mount_mgs_client
#
# check that configurations are intact
#
destroy_test_pools || error "destroy test pools failed"
- combined_mgs_mds || umount_mgs_client
cleanup
}
run_test 109a "test lctl clear_conf fsname"
reformat
setup_noconfig
client_up || error "client_up failed"
- #pool commands requires a client on MGS for procfs interfaces
- if ! combined_mgs_mds ; then
- mount_mgs_client
- stack_trap umount_mgs_client EXIT
- fi
#
# set number of permanent parameters
#
test_109_set_params $FSNAME
- combined_mgs_mds || umount_mgs_client
umount_client $MOUNT || error "umount_client failed"
stop_ost || error "stop_ost failed"
stop_mds || error "stop_mds failed"
error "failed to clear client config"
setup_noconfig
- combined_mgs_mds || mount_mgs_client
#
# check that configurations are intact
#
#
destroy_test_pools || error "destroy test pools failed"
- combined_mgs_mds || umount_mgs_client
cleanup
}
run_test 109b "test lctl clear_conf one config"
trap 0
stopall
rm -f $TMP/$tdir/lustre-mdt
- reformat_and_config
}
test_115() {
skip "Only applicable to ldiskfs-based MDTs"
fi
+ local dbfs_ver=$(do_facet $SINGLEMDS $DEBUGFS -V 2>&1)
+
+ echo "debugfs version: $dbfs_ver"
+ echo "$dbfs_ver" | egrep -w "1.44.3.wc1|1.44.5.wc1|1.45.2.wc1" &&
+ skip_env "This version of debugfs doesn't show inode number"
+
+ is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) &&
+ skip "This test can not be executed on flakey dev"
+
IMAGESIZE=$((3072 << 30)) # 3072 GiB
stopall
local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
- local mds_opts="$(mkfs_opts mds1 ${mdsdev}) --device-size=$IMAGESIZE \
- --mkfsoptions='-O lazy_itable_init,ea_inode,^resize_inode,meta_bg \
- -i 1024'"
+ local mds_opts="$(mkfs_opts mds1 $(mdsdevname 1)) --device-size=$IMAGESIZE \
+ --mkfsoptions='-O ea_inode,^resize_inode,meta_bg \
+ -N 2247484000 -E lazy_itable_init'"
add mds1 $mds_opts --mgs --reformat $mdsdev ||
skip_env "format large MDT failed"
- add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
- --reformat $(ostdevname 1) $(ostvdevname 1)
-
- start $SINGLEMDS ${mdsdev} $MDS_MOUNT_OPTS || error "start MDS failed"
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ $replace --reformat $(ostdevname 1) $(ostvdevname 1)"
+ add ost1 $opts || error "add ost1 failed with new params"
+ start $SINGLEMDS $mdsdev $MDS_MOUNT_OPTS || error "start MDS failed"
start_ost || error "start OSS failed"
mount_client $MOUNT || error "mount client failed"
mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir fail"
- for goal in $(do_facet $SINGLEMDS "ls /sys/fs/ldiskfs/*/inode_goal"); do
- do_facet $SINGLEMDS "echo 2147483947 >> $goal; grep . $goal"
- done
-
+ goal="/sys/fs/ldiskfs/$(basename $mdsdev)/inode_goal"
+echo goal: $goal
+ # 2147483648 is 0x80000000
+ do_facet $SINGLEMDS "echo 2147483648 >> $goal; grep . $goal"
touch $DIR/$tdir/$tfile
- # Add > 5k bytes to xattr
- for i in {1..30}; do
- ln $DIR/$tdir/$tfile $DIR/$tdir/$(printf "link%0250d" $i) ||
- error "Can't make link"
+ # attrs from 1 to 15 go to block, 16th - to inode
+ for i in {1..16}; do
+ local nm="trusted.ea$i"
+ setfattr -n $nm -v $(printf "xattr%0250d" $i) $DIR/$tdir/$tfile
done
- sync; sleep 5; sync
-
+ # inode <2147483649> trusted.ea16 (255)
local inode_num=$(do_facet $SINGLEMDS \
- "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsimgname" |
- awk '/link =/ { print $4 }' |
+ "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsdev" |
+ awk '/ea16/ { print $2 }' |
sed -e 's/>//' -e 's/<//' -e 's/\"//')
echo "inode num: $inode_num"
- [ $inode_num -ge 2147483947 ] || error "inode $inode_num too small"
+ [ $inode_num -ge 2147483648 ] || error "inode $inode_num too small"
do_facet $SINGLEMDS "losetup -d $mdsdev"
cleanup_115
}