ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-7005
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 50i"
+# bug number for skipped test: LU-7428
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 84"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
is_sles11() # LU-2181
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
# use small MDS + OST size to speed formatting time
-# do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size
+# do not use too small MDSSIZE/OSTSIZE, which affect the default journal size
# STORED_MDSSIZE is used in test_18
STORED_MDSSIZE=$MDSSIZE
STORED_OSTSIZE=$OSTSIZE
#
require_dsh_mds || exit 0
require_dsh_ost || exit 0
-#
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45 69"
+
+# 8 22 (min)"
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69"
assert_DIR
}
umount_client() {
- local MOUNTPATH=$1
- echo "umount lustre on ${MOUNTPATH}....."
- zconf_umount $(hostname) $MOUNTPATH || return 97
+ local mountpath=$1
+ shift
+ echo "umount lustre on $mountpath....."
+ zconf_umount $HOSTNAME $mountpath $@ || return 97
}
manual_umount_client(){
local rc
local FORCE=$1
echo "manual umount lustre on ${MOUNT}...."
- do_facet client "umount -d ${FORCE} $MOUNT"
+ do_facet client "umount ${FORCE} $MOUNT"
rc=$?
return $rc
}
setup
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
stop_ost || error "Unable to stop OST1"
- cleanup
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli
eno=$?
# ok for ost to fail shutdown
if [ 202 -ne $eno ] && [ 0 -ne $eno ]; then
# cleanup may return an error from the failed
# disconnects; for now I'll consider this successful
# if all the modules have unloaded.
- umount -d $MOUNT &
+ $UMOUNT -f $MOUNT &
UMOUNT_PID=$!
sleep 6
echo "killing umount"
start_mds || error "MDS start failed"
stop_ost || error "Unable to stop OST1"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- cleanup || error "cleanup_nocli failed with $?"
+ umount_client $MOUNT -f || error "umount_client $MOUNT failed"
+ cleanup_nocli || error "cleanup_nocli failed with $?"
grep " $MOUNT " /etc/mtab &&
error "$MOUNT entry in mtab after unmount"
pass
# check_mount will block trying to contact ost
mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed"
- umount_client $MOUNT
+ umount_client $MOUNT -f
pass
echo "Client mount with a running ost"
T32_QID=60000
-T32_BLIMIT=20480 # Kbytes
-T32_ILIMIT=2
+T32_BLIMIT=40960 # Kbytes
+T32_ILIMIT=4
#
# This is not really a test but a tool to create new disk
local dst=.
local src=/etc/rc.d
local tmp=$TMP/t32_image_create
+ local server_version=$(lustre_version_code $SINGLEMDS)
+ local remote_dir
+ local striped_dir
+ local pushd_dir
if [ $FSNAME != t32fs -o \( -z "$MDSDEV" -a -z "$MDSDEV1" \) -o \
$OSTCOUNT -ne 1 -o -z "$OSTDEV1" ]; then
mkdir $tmp/src || return 1
tar cf - -C $src . | tar xf - -C $tmp/src
dd if=/dev/zero of=$tmp/src/t32_qf_old bs=1M \
- count=$(($T32_BLIMIT / 1024 / 2))
+ count=$(($T32_BLIMIT / 1024 / 4))
chown $T32_QID.$T32_QID $tmp/src/t32_qf_old
# format ost with comma-separated NIDs to verify LU-4460
setupall
- [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
+ [[ $server_version -ge $(version_code 2.3.50) ]] ||
$LFS quotacheck -ug /mnt/$FSNAME
$LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \
/mnt/$FSNAME
tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME
+
+ if [[ $MDSCOUNT -ge 2 ]]; then
+ remote_dir=/mnt/$FSNAME/remote_dir
+ $LFS mkdir -i 1 $remote_dir
+ tar cf - -C $tmp/src . | tar xf - -C $remote_dir
+
+ if [[ $server_version -ge $(version_code 2.7.0) ]]; then
+ striped_dir=/mnt/$FSNAME/striped_dir_old
+ $LFS mkdir -i 1 -c 2 $striped_dir
+ tar cf - -C $tmp/src . | tar xf - -C $striped_dir
+ fi
+ fi
+
stopall
mkdir $tmp/img || return 1
setupall
- pushd /mnt/$FSNAME
+
+ pushd_dir=/mnt/$FSNAME
+ if [[ $MDSCOUNT -ge 2 ]]; then
+ pushd_dir=$remote_dir
+ if [[ $server_version -ge $(version_code 2.7.0) ]]; then
+ pushd $striped_dir
+ ls -Rni --time-style=+%s >$tmp/img/list2
+ popd
+ fi
+ fi
+
+ pushd $pushd_dir
ls -Rni --time-style=+%s >$tmp/img/list
find ! -name .lustre -type f -exec sha1sum {} \; |
sort -k 2 >$tmp/img/sha1sums
$LCTL get_param -n version | head -n 1 |
sed -e 's/^lustre: *//' >$tmp/img/commit
- [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
+ [[ $server_version -ge $(version_code 2.3.50) ]] ||
$LFS quotaon -ug /mnt/$FSNAME
$LFS quota -u $T32_QID -v /mnt/$FSNAME
$LFS quota -v -u $T32_QID /mnt/$FSNAME |
awk 'BEGIN { num='5' } { if ($1 == "'/mnt/$FSNAME'") \
{ if (NF == 1) { getline } else { num++ } ; print $num;} }' \
| tr -d "*" > $tmp/img/ispace
-
- if [ $MDSCOUNT -ge 2 ]; then
- $LFS mkdir -i 1 /mnt/$FSNAME/remote_dir
- tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME/remote_dir
- fi
+ echo $T32_BLIMIT > $tmp/img/blimit
+ echo $T32_ILIMIT > $tmp/img/ilimit
stopall
mv ${MDSDEV1:-$MDSDEV} $tmp/img
for num in $(seq 2 $MDSCOUNT); do
local devname=$(mdsdevname $num)
+ local facet=mds$num
+ [[ $(facet_fstype $facet) != zfs ]] ||
+ devname=$(mdsvdevname $num)
mv $devname $tmp/img
done
mv $OSTDEV1 $tmp/img
t32_test_cleanup() {
local tmp=$TMP/t32
- local fstype=$(facet_fstype $SINGLEMDS)
+ local facet=$SINGLEMDS
+ local fstype=$(facet_fstype $facet)
local rc=$?
if $shall_cleanup_lustre; then
umount $tmp/mnt/lustre || rc=$?
fi
if $shall_cleanup_mdt; then
- $r umount -d $tmp/mnt/mdt || rc=$?
+ $r $UMOUNT $tmp/mnt/mdt || rc=$?
fi
if $shall_cleanup_mdt1; then
- $r umount -d $tmp/mnt/mdt1 || rc=$?
+ $r $UMOUNT $tmp/mnt/mdt1 || rc=$?
fi
if $shall_cleanup_ost; then
- $r umount -d $tmp/mnt/ost || rc=$?
+ $r $UMOUNT $tmp/mnt/ost || rc=$?
fi
$r rm -rf $tmp
rm -rf $tmp
- if [ $fstype == "zfs" ]; then
- $r $ZPOOL destroy t32fs-mdt1 || rc=$?
- $r $ZPOOL destroy t32fs-ost1 || rc=$?
+ if [[ $fstype == zfs ]]; then
+ local poolname
+ local poolname_list="t32fs-mdt1 t32fs-ost1"
+
+ ! $mdt2_is_available || poolname_list+=" t32fs-mdt2"
+
+ for poolname in $poolname_list; do
+ destroy_zpool $facet $poolname
+ done
fi
return $rc
}
awk 'BEGIN { num='3' } { if ($1 == "'$mnt'") \
{ if (NF == 1) { getline } else { num++ } ; print $num;} }' \
| tr -d "*")
- [ $qval -eq $T32_BLIMIT ] || {
- echo "blimit, act:$qval, exp:$T32_BLIMIT"
+ [ $qval -eq $img_blimit ] || {
+ echo "blimit, act:$qval, exp:$img_blimit"
return 1
}
awk 'BEGIN { num='7' } { if ($1 == "'$mnt'") \
{ if (NF == 1) { getline } else { num++ } ; print $num;} }' \
| tr -d "*")
- [ $qval -eq $T32_ILIMIT ] || {
- echo "ilimit, act:$qval, exp:$T32_ILIMIT"
+ [ $qval -eq $img_ilimit ] || {
+ echo "ilimit, act:$qval, exp:$img_ilimit"
return 1
}
chmod 0777 $mnt
runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
- bs=1M count=$(($T32_BLIMIT / 1024)) oflag=sync && {
+ bs=1M count=$((img_blimit / 1024)) oflag=sync && {
echo "Write succeed, but expect -EDQUOT"
return 1
}
rm -f $mnt/t32_qf_new
runas -u $T32_QID -g $T32_QID createmany -m $mnt/t32_qf_ \
- $T32_ILIMIT && {
+ $img_ilimit && {
echo "Create succeed, but expect -EDQUOT"
return 1
}
- unlinkmany $mnt/t32_qf_ $T32_ILIMIT
+ unlinkmany $mnt/t32_qf_ $img_ilimit
return 0
}
local shall_cleanup_mdt1=false
local shall_cleanup_ost=false
local shall_cleanup_lustre=false
+ local mdt2_is_available=false
local node=$(facet_active_host $SINGLEMDS)
local r="do_node $node"
local node2=$(facet_active_host mds2)
local img_arch
local img_bspace
local img_ispace
+ local img_blimit
+ local img_ilimit
local fsname=t32fs
local nid=$($r $LCTL list_nids | head -1)
local mopts
local mdt2_dev=$tmp/mdt2
local ost_dev=$tmp/ost
local stripe_index
+ local stripe_count
local dir
trap 'trap - RETURN; t32_test_cleanup' RETURN
img_arch=$($r cat $tmp/arch)
img_bspace=$($r cat $tmp/bspace)
img_ispace=$($r cat $tmp/ispace)
+
+ # older images did not have "blimit" and "ilimit" files
+ # use old values for T32_BLIMIT and T32_ILIMIT
+ $r test -f $tmp/blimit && img_blimit=$($r cat $tmp/blimit) ||
+ img_blimit=20480
+ $r test -f $tmp/ilimit && img_ilimit=$($r cat $tmp/ilimit) ||
+ img_ilimit=2
+
echo "Upgrading from $(basename $tarball), created with:"
echo " Commit: $img_commit"
echo " Kernel: $img_kernel"
$(lustre_version_code ost1) -lt $(version_code 2.5.0) ] &&
ff_convert="no"
- if [ $fstype == "zfs" ]; then
+ ! $r test -f $mdt2_dev || mdt2_is_available=true
+
+ if [[ $fstype == zfs ]]; then
# import pool first
- $r $ZPOOL import -f -d $tmp t32fs-mdt1
- $r $ZPOOL import -f -d $tmp t32fs-ost1
+ local poolname
+ local poolname_list="t32fs-mdt1 t32fs-ost1"
+
+ ! $mdt2_is_available || poolname_list+=" t32fs-mdt2"
+
+ for poolname in $poolname_list; do
+ $r "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL import -f -d $tmp $poolname"
+ done
+
mdt_dev=t32fs-mdt1/mdt1
ost_dev=t32fs-ost1/ost1
+ ! $mdt2_is_available || mdt2_dev=t32fs-mdt2/mdt2
wait_update_facet $SINGLEMDS "$ZPOOL list |
awk '/^t32fs-mdt1/ { print \\\$1 }'" "t32fs-mdt1" || {
error_noexit "import zfs pool failed"
return 1
}
- if $r test -f $mdt2_dev; then
+ if $mdt2_is_available; then
$r $TUNEFS --dryrun $mdt2_dev || {
$r losetup -a
error_noexit "tunefs.lustre before mounting the MDT"
error_noexit "Enable mdt quota feature"
return 1
}
- if $r test -f $mdt2_dev; then
+ if $mdt2_is_available; then
$r $TUNEFS --quota $mdt2_dev || {
$r losetup -a
error_noexit "Enable mdt quota feature"
$r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt
$r $LCTL replace_nids $fsname-OST0000 $ostnid
$r $LCTL replace_nids $fsname-MDT0000 $nid
- $r umount -d $tmp/mnt/mdt
+ $r $UMOUNT $tmp/mnt/mdt
fi
mopts=exclude=$fsname-OST0000
}
shall_cleanup_mdt=true
- if $r test -f $mdt2_dev; then
+ if $mdt2_is_available; then
mopts=mgsnode=$nid,$mopts
$r $MOUNT_CMD -o $mopts $mdt2_dev $tmp/mnt/mdt1 || {
$r losetup -a
return 1
}
+ [[ $(facet_fstype mds1) != zfs ]] || import_zpool fs2mds
+
$r $TUNEFS --dryrun $fs2mdsdev || {
error_noexit "tunefs.lustre before mounting the MDT"
return 1
return 1
}
+ if $r test -f $tmp/list; then
+ #
+ # There is not a Test Framework API to copy files to or
+ # from a remote node.
+ #
+ # LU-2393 - do both sorts on same node to ensure locale
+ # is identical
+ local list_file=$tmp/list
+
+ if $mdt2_is_available; then
+ if [[ -d $tmp/mnt/lustre/striped_dir_old ]] &&
+ $r test -f $tmp/list2; then
+ list_file=$tmp/list2
+ pushd $tmp/mnt/lustre/striped_dir_old
+ else
+ pushd $tmp/mnt/lustre/remote_dir
+ fi
+ else
+ pushd $tmp/mnt/lustre
+ fi
+ $r cat $list_file | sort -k 6 >$tmp/list.orig
+ ls -Rni --time-style=+%s | sort -k 6 |
+ sed 's/\. / /' >$tmp/list || {
+ error_noexit "ls"
+ return 1
+ }
+ popd
+ #
+ # 32-bit and 64-bit clients use different algorithms to
+ # convert FIDs into inode numbers. Hence, remove the
+ # inode numbers from the lists, if the original list was
+ # created on an architecture with different number of
+ # bits per "long".
+ #
+ if [ $(t32_bits_per_long $(uname -m)) != \
+ $(t32_bits_per_long $img_arch) ]; then
+ echo "Different number of bits per \"long\"" \
+ "from the disk image"
+ for list in list.orig list; do
+ sed -i -e 's/^[0-9]\+[ \t]\+//' \
+ $tmp/$list
+ done
+ fi
+ if ! diff -ub $tmp/list.orig $tmp/list; then
+ error_noexit "list verification failed"
+ return 1
+ fi
+ else
+ echo "list verification skipped"
+ fi
+
if [ "$dne_upgrade" != "no" ]; then
$LFS mkdir -i 1 -c2 $tmp/mnt/lustre/striped_dir || {
- error_noexit "set remote dir failed"
+ error_noexit "set striped dir failed"
return 1
}
pushd $tmp/mnt/lustre
tar -cf - . --exclude=./striped_dir \
+ --exclude=./striped_dir_old \
--exclude=./remote_dir |
tar -xvf - -C striped_dir 1>/dev/null || {
- error_noexit "cp to remote dir failed"
+ error_noexit "cp to striped dir failed"
return 1
}
popd
# If it is upgrade from DNE (2.5), then rename the remote dir,
# which is created in 2.5 to striped dir.
- if $r test -f $mdt2_dev; then
- stripe_index=$(LFS getdirstripe -i \
+ if $mdt2_is_available && [[ "$dne_upgrade" != "no" ]]; then
+ stripe_index=$($LFS getdirstripe -i \
$tmp/mnt/lustre/remote_dir)
- [ $stripe_index -eq 1 ] || {
- error_noexit "get index $striped_index failed"
+
+ [[ $stripe_index -eq 1 ]] || {
+ error_noexit "get index \"$stripe_index\"" \
+ "from remote dir failed"
return 1
}
mv $tmp/mnt/lustre/remote_dir \
$tmp/mnt/lustre/striped_dir/ || {
- error_noexit "mv failed"
+ error_noexit "mv remote dir failed"
return 1
}
fi
+ # If it is upgraded from DNE (2.7), then move the striped dir
+ # which was created in 2.7 to the new striped dir.
+ if $mdt2_is_available && [[ "$dne_upgrade" != "no" ]] &&
+ [[ -d $tmp/mnt/lustre/striped_dir_old ]]; then
+ stripe_count=$($LFS getdirstripe -c \
+ $tmp/mnt/lustre/striped_dir_old)
+ [[ $stripe_count -eq 2 ]] || {
+ error_noexit "get count $stripe_count" \
+ "from striped dir failed"
+ return 1
+ }
+ mv $tmp/mnt/lustre/striped_dir_old \
+ $tmp/mnt/lustre/striped_dir/ || {
+ error_noexit "mv striped dir failed"
+ return 1
+ }
+ fi
+
+ sync; sleep 5; sync
+ $r $LCTL set_param -n osd*.*.force_sync=1
dd if=/dev/zero of=$tmp/mnt/lustre/tmp_file bs=10k count=10 || {
error_noexit "dd failed"
return 1
pushd $tmp/mnt/lustre
fi
- find ! -path "*remote_dir*" ! -name .lustre -type f \
- -exec sha1sum {} \; |
+ find ! -path "*remote_dir*" ! -path "*striped_dir*" \
+ ! -name .lustre -type f -exec sha1sum {} \; |
sort -k 2 >$tmp/sha1sums || {
popd
error_noexit "sha1sum"
return 1
fi
- # if upgrade from DNE (2.5), then check remote directory
- if $r test -f $mdt2_dev; then
- pushd $tmp/mnt/lustre/striped_dir/remote_dir
- find ! -name .lustre -type f \
- -exec sha1sum {} \; |
- sort -k 2 >$tmp/sha1sums || {
+ # if upgrade from DNE(2.5), then check remote directory
+ # if upgrade from DNE(2.7), then check striped directory
+ if $mdt2_is_available &&
+ [[ "$dne_upgrade" != "no" ]]; then
+ local new_dir="$tmp/mnt/lustre/striped_dir"
+ local striped_dir_old="$new_dir/striped_dir_old"
+
+ local dir_list="$new_dir/remote_dir"
+ [[ ! -d $triped_dir_old ]] ||
+ dir_list+=" $striped_dir_old"
+
+ for dir in $dir_list; do
+ pushd $dir
+ find ! -name .lustre -type f \
+ -exec sha1sum {} \; |
+ sort -k 2 >$tmp/sha1sums || {
+ popd
+ error_noexit "sha1sum"
+ return 1
+ }
popd
- error_noexit "sha1sum"
- return 1
- }
- popd
- if ! diff -ub $tmp/sha1sums.orig \
- $tmp/sha1sums; then
- error_noexit "sha1sum dne failed"
- return 1
- fi
+ if ! diff -ub $tmp/sha1sums.orig \
+ $tmp/sha1sums; then
+ error_noexit "sha1sum $dir" \
+ "failed"
+ return 1
+ fi
+ done
fi
else
echo "sha1sum verification skipped"
}
fi
- if $r test -f $tmp/list; then
- #
- # There is not a Test Framework API to copy files to or
- # from a remote node.
- #
- # LU-2393 - do both sorts on same node to ensure locale
- # is identical
- $r cat $tmp/list | sort -k 6 >$tmp/list.orig
- pushd $tmp/mnt/lustre
- ls -Rni --time-style=+%s | sort -k 6 >$tmp/list || {
- error_noexit "ls"
- return 1
- }
- popd
- #
- # 32-bit and 64-bit clients use different algorithms to
- # convert FIDs into inode numbers. Hence, remove the inode
- # numbers from the lists, if the original list was created
- # on an architecture with different number of bits per
- # "long".
- #
- if [ $(t32_bits_per_long $(uname -m)) != \
- $(t32_bits_per_long $img_arch) ]; then
- echo "Different number of bits per \"long\" from the disk image"
- for list in list.orig list; do
- sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list
- done
- fi
- if ! diff -ub $tmp/list.orig $tmp/list; then
- error_noexit "list verification failed"
- return 1
- fi
- else
- echo "list verification skipped"
- fi
-
# migrate files/dirs to remote MDT, then move them back
if [ $(lustre_version_code mds1) -ge $(version_code 2.7.50) -a \
$dne_upgrade != "no" ]; then
}
shall_cleanup_lustre=false
else
- if [ "$dne_upgrade" != "no" ]; then
- $r umount -d $tmp/mnt/mdt1 || {
+ if [[ "$dne_upgrade" != "no" ]] || $mdt2_is_available; then
+ $r $UMOUNT $tmp/mnt/mdt1 || {
error_noexit "Unmounting the MDT2"
return 1
}
shall_cleanup_mdt1=false
fi
- $r umount -d $tmp/mnt/mdt || {
+ $r $UMOUNT $tmp/mnt/mdt || {
error_noexit "Unmounting the MDT"
return 1
}
shall_cleanup_mdt=false
- $r umount -d $tmp/mnt/ost || {
+ $r $UMOUNT $tmp/mnt/ost || {
error_noexit "Unmounting the OST"
return 1
}
return 1
}
+ if [[ $fstype == zfs ]]; then
+ local poolname=t32fs-mdt1
+ $r "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL import -f -d $tmp $poolname"
+ fi
+
# mount a second time to make sure we didnt leave upgrade flag on
$r $TUNEFS --dryrun $mdt_dev || {
$r losetup -a
cp /etc/hosts $MOUNT2/ || error "copy /etc/hosts $MOUNT2/ failed"
$GETSTRIPE $MOUNT2/hosts || error "$GETSTRIPE $MOUNT2/hosts failed"
- umount -d $MOUNT2
+ umount $MOUNT2
stop fs2ost -f
stop fs2mds -f
cleanup_nocli || error "cleanup_nocli failed with $?"
rc=3
fi
- umount -d $MOUNT2
+ $UMOUNT $MOUNT2
stop fs3ost -f || error "unable to stop OST3"
stop fs2ost -f || error "unable to stop OST2"
stop fs2mds -f || error "unable to stop second MDS"
echo mount_op=$mount_op
- do_facet $SINGLEMDS "umount -d $mntpt && rm -f $mdsdev_sym"
+ do_facet $SINGLEMDS "$UMOUNT $mntpt && rm -f $mdsdev_sym"
if $(echo $mount_op | grep -q "unable to set tunable"); then
error "set tunables failed for symlink device"
stop ost1 -f || error "unable to stop OST1"
stop_mds || error "Unable to stop MDS"
stop_mds || error "Unable to stop MDS on second try"
- unload_modules_conf || error "unload_modules_conf failed"
}
run_test 41a "mount mds with --nosvc and --nomgs"
cleanup
# MDT concurrent start
- #define OBD_FAIL_TGT_DELAY_CONNECT 0x703
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x703"
+ #define OBD_FAIL_TGT_MOUNT_RACE 0x716
+ do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x716"
start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
local pid=$!
- sleep 2
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x0"
start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
+ do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x0"
local pid2=$!
wait $pid2
local rc2=$?
wait $pid
local rc=$?
- if [ $rc == 0 ] && [ $rc2 == 114 ]; then
+ if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then
echo "1st MDT start succeed"
- echo "2nd MDT start failed with EALREADY"
- elif [ $rc2 == 0 ] && [ $rc == 114 ]; then
- echo "1st MDT start failed with EALREADY"
+ echo "2nd MDT start failed with $rc2"
+ elif [ $rc2 -eq 0 ] && [ $rc -ne 0 ]; then
+ echo "1st MDT start failed with $rc"
echo "2nd MDT start succeed"
else
stop mds1 -f
# OST concurrent start
- #define OBD_FAIL_TGT_DELAY_CONNECT 0x703
- do_facet ost1 "$LCTL set_param fail_loc=0x703"
+ #define OBD_FAIL_TGT_MOUNT_RACE 0x716
+ do_facet ost1 "$LCTL set_param fail_loc=0x716"
start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
pid=$!
- sleep 2
- do_facet ost1 "$LCTL set_param fail_loc=0x0"
start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
+ do_facet ost1 "$LCTL set_param fail_loc=0x0"
pid2=$!
wait $pid2
rc2=$?
wait $pid
rc=$?
- if [ $rc == 0 ] && [ $rc2 == 114 ]; then
+ if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then
echo "1st OST start succeed"
- echo "2nd OST start failed with EALREADY"
- elif [ $rc2 == 0 ] && [ $rc == 114 ]; then
- echo "1st OST start failed with EALREADY"
+ echo "2nd OST start failed with $rc2"
+ elif [ $rc2 -eq 0 ] && [ $rc -ne 0 ]; then
+ echo "1st OST start failed with $rc"
echo "2nd OST start succeed"
else
stop_mds -f
do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
cleanup || error "stopping $FSNAME failed with invalid sys param"
- load_modules
setup
check_mount || error "client was not mounted with invalid sys param"
cleanup || error "stopping $FSNAME failed with invalid sys param"
}
run_test 42 "allow client/server mount/unmount with invalid config param"
-test_43() {
+test_43a() {
[[ $(lustre_version_code mgs) -ge $(version_code 2.5.58) ]] ||
{ skip "Need MDS version at least 2.5.58" && return 0; }
[ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
ID1=${ID1:-501}
- USER1=$(cat /etc/passwd | grep :$ID1:$ID1: | cut -d: -f1)
+ USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
[ -z "$USER1" ] && skip_env "missing user with uid=$ID1 gid=$ID1" &&
return
touch $DIR/$tdir-rootdir/tfile-2 ||
error "$ST: root create permission is denied"
echo "$ST: root create permission is granted - ok"
+ cleanup || error "cleanup failed with $?"
}
-run_test 43 "check root_squash and nosquash_nids"
+run_test 43a "check root_squash and nosquash_nids"
+
+test_43b() { # LU-5690
+ [[ $(lustre_version_code mgs) -ge $(version_code 2.7.62) ]] ||
+ { skip "Need MGS version 2.7.62+"; return; }
+
+ if [[ -z "$fs2mds_DEV" ]]; then
+ is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) &&
+ skip_env "mixed loopback and real device not working" && return
+ fi
+
+ local fs2mdsdev=$(mdsdevname 1_2)
+ local fs2mdsvdev=$(mdsvdevname 1_2)
+
+ # temporarily use fs2mds as fs2mgs
+ local fs2mgs=fs2mds
+ local fs2mgsdev=$fs2mdsdev
+ local fs2mgsvdev=$fs2mdsvdev
+
+ local fsname=test1234
+
+ load_module llite/lustre
+ local client_ip=$(host_nids_address $HOSTNAME $NETTYPE)
+ local host=${client_ip//*./}
+ local net=${client_ip/%$host/}
+ local nosquash_nids=$(h2$NETTYPE $net[$host,$host,$host])
+
+ add $fs2mgs $(mkfs_opts mgs $fs2mgsdev) --fsname=$fsname \
+ --param mdt.root_squash=$RUNAS_ID:$RUNAS_ID \
+ --param mdt.nosquash_nids=$nosquash_nids \
+ --reformat $fs2mgsdev $fs2mgsvdev || error "add fs2mgs failed"
+ start $fs2mgs $fs2mgsdev $MGS_MOUNT_OPTS || error "start fs2mgs failed"
+ stop $fs2mgs -f || error "stop fs2mgs failed"
+}
+run_test 43b "parse nosquash_nids with commas in expr_list"
umount_client $MOUNT
cleanup_nocli
df -h $MOUNT &
log "sleep 60 sec"
sleep 60
- #define OBD_FAIL_PTLRPC_LONG_UNLINK 0x50f
- do_facet client "$LCTL set_param fail_loc=0x50f"
+#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
+ do_facet client "$LCTL set_param fail_loc=0x50f fail_val=0"
log "sleep 10 sec"
sleep 10
manual_umount_client --force || error "manual_umount_client failed"
stop_ost2 || error "Unable to stop OST2"
fi
- umount_client $MOUNT || error "Unable to unmount client"
+ umount_client $MOUNT -f || error "Unable to unmount client"
stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
#writeconf to remove all ost2 traces for subsequent tests
[ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
- load_modules
do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" ||
error "tunefs MDT2 failed"
start_mds || error "Unable to start MDT"
"$TEST" "${FSNAME}-MDT0001.mdc.active" 1 ||
error "Unable to activate MDT2"
+ wait_clients_import_state ${CLIENTS:-$HOSTNAME} mds2 FULL
+ if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.60) ]
+ then
+ wait_dne_interconnect
+ fi
$LFS mkdir -i1 $DIR/$tdir/2 || error "mkdir $DIR/$tdir/2 failed"
# create some file
createmany -o $DIR/$tdir/2/$tfile-%d 1 || error "create files failed"
start_ost2 || error "Unable to start OST1"
wait $pid
stop_ost2 || error "Unable to stop OST1"
- cleanup || error "cleanup failed with $?"
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli || error “stop server failed”
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
echo
# backup files
- echo backup files to $TMP/files
+ echo backup files to $TMP/$tdir
local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
- copy_files_xattrs $(hostname) $TMP/files $TMP/file_xattrs $files ||
+ copy_files_xattrs $(hostname) $TMP/$tdir $TMP/file_xattrs $files ||
error "Unable to copy files"
umount_client $MOUNT || error "Unable to umount client"
do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found'
[ $? -eq 0 ] || { error "Unable to move objects"; return 14; }
- # recover objects dry-run
- if [ $(lustre_version_code ost1) -ge $(version_code 2.5.56) ]; then
- echo "ll_recover_lost_found_objs dry_run"
- do_node $ost1node \
- "ll_recover_lost_found_objs -n -d $ost1mnt/O" ||
- error "ll_recover_lost_found_objs failed"
- fi
-
- # recover objects
- echo "ll_recover_lost_found_objs fix run"
- do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found" ||
- error "ll_recover_lost_found_objs failed"
-
- # compare restored objects against saved ones
- diff_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
- [ $? -eq 0 ] || error "Unable to diff objects"
-
do_node $ost1node "umount $ost1mnt" ||
error "Unable to umount ost1 as ldiskfs"
start_ost || error "Unable to start OST1"
mount_client $MOUNT || error "Unable to mount client"
+ local REPAIRED=$(do_node $ost1node "$LCTL get_param \
+ -n osd-ldiskfs.$FSNAME-OST0000.oi_scrub" |
+ awk '/^lf_repa[ri]*ed/ { print $2 }')
+ [ $REPAIRED -gt 0 ] ||
+ error "Some entry under /lost+found should be repaired"
+
# compare files
- diff_files_xattrs $(hostname) $TMP/files $TMP/file_xattrs $files ||
+ diff_files_xattrs $(hostname) $TMP/$tdir $TMP/file_xattrs $files ||
error "Unable to diff files"
- rm -rf $TMP/files $TMP/file_xattrs ||
+ rm -rf $TMP/$tdir $TMP/file_xattrs ||
error "Unable to delete temporary files"
do_node $ost1node "rm -rf $ost1tmp" ||
error "Unable to delete temporary files"
local newvalue="${opts}=$(expr $basethr \* $ncpts)"
setmodopts -a $modname "$newvalue" oldvalue
- load_modules
setup
check_mount || return 41
return $?
cleanup
- load_modules
setup
}
local large_value="$(generate_string $(max_xattr_size))"
local small_value="bar"
- local name="trusted.big"
- log "save large xattr $name on $file"
- setfattr -n $name -v $large_value $file ||
- error "saving $name on $file failed"
+ local name="trusted.big"
+ log "save large xattr $name on $file"
+ setfattr -n $name -v $large_value $file ||
+ error "saving $name on $file failed"
+
+ local new_value=$(get_xattr_value $name $file)
+ [[ "$new_value" != "$large_value" ]] &&
+ error "$name different after saving"
- local new_value=$(get_xattr_value $name $file)
- [[ "$new_value" != "$large_value" ]] &&
- error "$name different after saving"
+ log "shrink value of $name on $file"
+ setfattr -n $name -v $small_value $file ||
+ error "shrinking value of $name on $file failed"
- log "shrink value of $name on $file"
- setfattr -n $name -v $small_value $file ||
- error "shrinking value of $name on $file failed"
+ new_value=$(get_xattr_value $name $file)
+ [[ "$new_value" != "$small_value" ]] &&
+ error "$name different after shrinking"
- new_value=$(get_xattr_value $name $file)
- [[ "$new_value" != "$small_value" ]] &&
- error "$name different after shrinking"
+ log "grow value of $name on $file"
+ setfattr -n $name -v $large_value $file ||
+ error "growing value of $name on $file failed"
- log "grow value of $name on $file"
- setfattr -n $name -v $large_value $file ||
- error "growing value of $name on $file failed"
+ new_value=$(get_xattr_value $name $file)
+ [[ "$new_value" != "$large_value" ]] &&
+ error "$name different after growing"
- new_value=$(get_xattr_value $name $file)
- [[ "$new_value" != "$large_value" ]] &&
- error "$name different after growing"
+ log "check value of $name on $file after remounting MDS"
+ fail $SINGLEMDS
+ new_value=$(get_xattr_value $name $file)
+ [[ "$new_value" != "$large_value" ]] &&
+ error "$name different after remounting MDS"
- log "check value of $name on $file after remounting MDS"
- fail $SINGLEMDS
- new_value=$(get_xattr_value $name $file)
- [[ "$new_value" != "$large_value" ]] &&
- error "$name different after remounting MDS"
+ log "remove large xattr $name from $file"
+ setfattr -x $name $file || error "removing $name from $file failed"
- log "remove large xattr $name from $file"
- setfattr -x $name $file || error "removing $name from $file failed"
+ if $lxattr; then
+ stopall || error "stopping for e2fsck run"
+ for num in $(seq $MDSCOUNT); do
+ run_e2fsck $(facet_active_host mds$num) \
+ $(mdsdevname $num) "-y" ||
+ error "e2fsck MDT$num failed"
+ done
+ setup_noconfig || error "remounting the filesystem failed"
+ fi
- rm -f $file
- stopall
+ # need to delete this file to avoid problems in other tests
+ rm -f $file
+ stopall || error "stopping systems to turn off large_xattr"
if $lxattr; then
for num in $(seq $MDSCOUNT); do
do_facet mds${num} $TUNE2FS -O ^large_xattr \
stop_ost2 || error "Unable to stop second ost"
echo "$LFS df"
$LFS df --lazy || error "lfs df failed"
- cleanup || error "cleanup failed with $?"
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli || error "cleanup_nocli failed with $?"
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
do_facet $SINGLEMDS \
"mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
- do_facet $SINGLEMDS "umount -d $brpt"
+ do_facet $SINGLEMDS "$UMOUNT $brpt"
# restart MDS, the "last_rcvd" file should be recreated.
start_mds || error "fail to restart the MDS"
}
run_test 70d "stop MDT1, mkdir succeed, create remote dir fail"
+test_70e() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.62) ] ||
+ { skip "Need MDS version at least 2.7.62"; return 0; }
+
+ cleanup || error "cleanup failed with $?"
+
+ local mdsdev=$(mdsdevname 1)
+ local ostdev=$(ostdevname 1)
+ local mdsvdev=$(mdsvdevname 1)
+ local ostvdev=$(ostvdevname 1)
+ local opts_mds="$(mkfs_opts mds1 $mdsdev) --reformat $mdsdev $mdsvdev"
+ local opts_ost="$(mkfs_opts ost1 $ostdev) --reformat $ostdev $ostvdev"
+
+ add mds1 $opts_mds || error "add mds1 failed"
+ start_mdt 1 || error "start mdt1 failed"
+ add ost1 $opts_ost || error "add ost1 failed"
+ start_ost || error "start ost failed"
+ mount_client $MOUNT > /dev/null || error "mount client $MOUNT failed"
+
+ local soc=$(do_facet mds1 "$LCTL get_param -n \
+ mdt.*MDT0000.sync_lock_cancel")
+ [ $soc == "never" ] || error "SoC enabled on single MDS"
+
+ for i in $(seq 2 $MDSCOUNT); do
+ mdsdev=$(mdsdevname $i)
+ mdsvdev=$(mdsvdevname $i)
+ opts_mds="$(mkfs_opts mds$i $mdsdev) --reformat $mdsdev \
+ $mdsvdev"
+ add mds$i $opts_mds || error "add mds$i failed"
+ start_mdt $i || error "start mdt$i fail"
+ done
+
+ wait_dne_interconnect
+
+ for i in $(seq $MDSCOUNT); do
+ soc=$(do_facet mds$i "$LCTL get_param -n \
+ mdt.*MDT000$((i - 1)).sync_lock_cancel")
+ [ $soc == "blocking" ] || error "SoC not enabled on DNE"
+ done
+
+ for i in $(seq 2 $MDSCOUNT); do
+ stop_mdt $i || error "stop mdt$i fail"
+ done
+ soc=$(do_facet mds1 "$LCTL get_param -n \
+ mdt.*MDT0000.sync_lock_cancel")
+ [ $soc == "never" ] || error "SoC enabled on single MDS"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 70e "Sync-on-Cancel will be enabled by default on DNE"
+
test_71a() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
if combined_mgs_mds; then
for num in $(seq $MDSCOUNT); do
add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
- --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
- error "add mds $num failed"
+ --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
+ error "add mds $num failed"
do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" ||
error "$TUNE2FS failed on mds${num}"
done
run_test 72 "test fast symlink with extents flag enabled"
test_73() { #LU-3006
- load_modules
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" ||
error "1st tunefs failed"
}
run_test 76b "verify params log setup correctly"
+test_76c() {
+ [[ $(lustre_version_code mgs) -ge $(version_code 2.8.54) ]] ||
+ { skip "Need MDS version at least 2.4.52" && return 0; }
+ setupall
+ local MASK_PARAM="mdd.*.changelog_mask"
+ echo "Change changelog_mask"
+ do_facet mgs $LCTL set_param -P $MASK_PARAM=-CLOSE ||
+ error "Can't change changlog_mask"
+ wait_update $(facet_host mds) "$LCTL get_param -n $MASK_PARAM |
+ grep 'CLOSE'" ""
+
+ echo "Check the value is stored after mds remount"
+ stop_mds || error "Failed to stop MDS"
+ start_mds || error "Failed to start MDS"
+ local CHANGELOG_MASK=$(do_facet mgs $LCTL get_param -n $MASK_PARAM)
+ echo $CHANGELOG_MASK | grep CLOSE > /dev/null &&
+ error "changelog_mask is not changed"
+
+ stopall
+}
+run_test 76c "verify changelog_mask is applied with set_param -P"
+
test_77() { # LU-3445
local server_version=$(lustre_version_code $SINGLEMDS)
skip "only applicable to ldiskfs-based MDTs and OSTs" && return
# reformat the Lustre filesystem with a smaller size
+ local saved_MDSCOUNT=$MDSCOUNT
local saved_MDSSIZE=$MDSSIZE
+ local saved_OSTCOUNT=$OSTCOUNT
local saved_OSTSIZE=$OSTSIZE
+ MDSCOUNT=1
+ OSTCOUNT=1
MDSSIZE=$((MDSSIZE - 20000))
OSTSIZE=$((OSTSIZE - 20000))
reformat || error "(1) reformat Lustre filesystem failed"
local i
local file
local num_files=100
+
mkdir $MOUNT/$tdir || error "(3) mkdir $MOUNT/$tdir failed"
+ $LFS df; $LFS df -i
for i in $(seq $num_files); do
file=$MOUNT/$tdir/$tfile-$i
- dd if=/dev/urandom of=$file count=1 bs=1M ||
+ dd if=/dev/urandom of=$file count=1 bs=1M || {
+ $LCTL get_param osc.*.cur*grant*
+ $LFS df; $LFS df -i;
+ # stop creating files if there is no more space
+ if [ ! -e $file ]; then
+ num_files=$((i - 1))
+ break
+ fi
+
+ $LFS getstripe -v $file
+ local ost_idx=$(LFS getstripe -i $file)
+ do_facet ost$((ost_idx + 1)) \
+ $LCTL get_param obdfilter.*.*grant*
error "(4) create $file failed"
+ }
done
# unmount the Lustre filesystem
# unmount and reformat the Lustre filesystem
cleanup || error "(12) cleanup Lustre filesystem failed"
combined_mgs_mds || stop_mgs || error "(13) stop mgs failed"
+
+ MDSCOUNT=$saved_MDSCOUNT
+ OSTCOUNT=$saved_OSTCOUNT
reformat || error "(14) reformat Lustre filesystem failed"
}
run_test 78 "run resize2fs on MDT and OST filesystems"
local i
local index
local ost_indices
+ local LOV_V1_INSANE_STRIPE_COUNT=65532
for i in $(seq $OSTCOUNT); do
- index=$((RANDOM * 2))
+ index=$(((RANDOM * 2) % LOV_V1_INSANE_STRIPE_COUNT))
ost_indices+=" $index"
done
ost_indices=$(comma_list $ost_indices)
local i
local index
local ost_indices
+ local LOV_V1_INSANE_STRIPE_COUNT=65532
for i in $(seq $OSTCOUNT); do
- index=$((RANDOM * 2))
+ index=$(((RANDOM * 2) % LOV_V1_INSANE_STRIPE_COUNT))
ost_indices+=" $index"
done
ost_indices=$(comma_list $ost_indices)
run_test 83 "ENOSPACE on OST doesn't cause message VFS: \
Busy inodes after unmount ..."
-recovery_time_min() {
- local CONNECTION_SWITCH_MIN=5
- local CONNECTION_SWITCH_INC=5
- local CONNECTION_SWITCH_MAX
- local RECONNECT_DELAY_MAX
- local INITIAL_CONNECT_TIMEOUT
- local max
- local TO_20
-
- #CONNECTION_SWITCH_MAX=min(50, max($CONNECTION_SWITCH_MIN,$TIMEOUT)
- (($CONNECTION_SWITCH_MIN>$TIMEOUT)) && \
- max=$CONNECTION_SWITCH_MIN || max=$TIMEOUT
- (($max<50)) && CONNECTION_SWITCH_MAX=$max || CONNECTION_SWITCH_MAX=50
-
- #INITIAL_CONNECT_TIMEOUT = max(CONNECTION_SWITCH_MIN, \
- #obd_timeout/20)
- TO_20=$(($TIMEOUT/20))
- (($CONNECTION_SWITCH_MIN>$TO_20)) && \
- INITIAL_CONNECT_TIMEOUT=$CONNECTION_SWITCH_MIN || \
- INITIAL_CONNECT_TIMEOUT=$TO_20
-
- RECONNECT_DELAY_MAX=$(($CONNECTION_SWITCH_MAX+$CONNECTION_SWITCH_INC+ \
- $INITIAL_CONNECT_TIMEOUT))
- echo $((2*$RECONNECT_DELAY_MAX))
-}
-
test_84() {
local facet=$SINGLEMDS
local num=$(echo $facet | tr -d "mds")
local time_min=$(recovery_time_min)
local recovery_duration
local completed_clients
+ local correct_clients
local wrap_up=5
echo "start mds service on $(facet_active_host $facet)"
- start $facet ${dev} $MDS_MOUNT_OPTS \
- "-o recovery_time_hard=$time_min,recovery_time_soft=$time_min" $@ ||
+ start_mds \
+ "-o recovery_time_hard=$time_min,recovery_time_soft=$time_min" $@ ||
error "start MDS failed"
- start_ost
- start_ost2
+ start_ost || error "start OST0000 failed"
+ start_ost2 || error "start OST0001 failed"
echo "recovery_time=$time_min, timeout=$TIMEOUT, wrap_up=$wrap_up"
- mount_client $MOUNT1 || error "mount failed"
- mount_client $MOUNT2 || error "mount failed"
+ mount_client $MOUNT1 || error "mount $MOUNT1 failed"
+ mount_client $MOUNT2 || error "mount $MOUNT2 failed"
+ # make sure new superblock labels are sync'd before disabling writes
+ sync_all_data
+ sleep 5
replay_barrier $SINGLEMDS
createmany -o $DIR1/$tfile-%d 1000
#define OBD_FAIL_TGT_REPLAY_DELAY 0x709 | FAIL_SKIP
do_facet $SINGLEMDS "lctl set_param fail_loc=0x20000709 fail_val=5"
- facet_failover $SINGLEMDS || error "failover: $?"
+ facet_failover --fsck $SINGLEMDS || error "failover: $?"
client_up
echo "recovery status"
completed_clients=$(do_facet $SINGLEMDS \
"$LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_status" |
awk '/completed_clients/ { print $2 }')
- [ "$completed_clients" = "1/2" ] ||
- error "completed_clients != 1/2: $completed_clients"
+
+ correct_clients="$MDSCOUNT/$((MDSCOUNT+1))"
+ [ "$completed_clients" = "${correct_clients}" ] ||
+ error "$completed_clients != $correct_clients"
do_facet $SINGLEMDS "lctl set_param fail_loc=0"
umount_client $MOUNT1
}
run_test 87 "check if MDT inode can hold EAs with N stripes properly"
+test_88() {
+ [ "$(facet_fstype mds1)" == "zfs" ] &&
+ skip "LU-6662: no implementation for ZFS" && return
+
+ load_modules
+
+ add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) \
+ --reformat $(mdsdevname 1) || error "add mds1 failed"
+
+ do_facet mds1 "$TUNEFS $(mdsdevname 1) |
+ grep -e \".*opts:.*errors=remount-ro.*\"" ||
+ error "default mount options is missing"
+
+ add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) \
+ --mountfsoptions="user_xattr,errors=panic" \
+ --reformat $(mdsdevname 1) || error "add mds1 failed"
+
+ do_facet mds1 "$TUNEFS $(mdsdevname 1) |
+ grep -e \".*opts:.*errors=panic.*\"" ||
+ error "user can't override default mount options"
+}
+run_test 88 "check the default mount options can be overridden"
+
# $1 test directory
# $2 (optional) value of max_mod_rpcs_in_flight to set
check_max_mod_rpcs_in_flight() {
for i in $(seq $((mmr - 1))); do
chmod 0600 $dir/file-$i &
done
+ sleep 1
# send one additional modify RPC
do_facet $facet "$LCTL set_param fail_loc=0"
for i in $(seq $mmr); do
chmod 0666 $dir/file-$i &
done
+ sleep 1
# send one additional modify RPC
do_facet $facet "$LCTL set_param fail_loc=0"
# check this additional modify RPC blocked getting a modify RPC slot
checkstat -vp 0644 $dir/file-$((mmr + 1)) ||
- error "Unexpectedly send $mmr modify RPCs in parallel"
+ error "Unexpectedly send $(($mmr + 1)) modify RPCs in parallel"
wait
}
}
run_test 90d "check one close RPC is allowed above max_mod_rpcs_in_flight"
+check_uuid_on_ost() {
+ local nid=$1
+ do_facet ost1 "$LCTL get_param obdfilter.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+check_uuid_on_mdt() {
+ local nid=$1
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+test_91() {
+ local uuid
+ local nid
+ local found
+
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need OST version at least 2.7.63" && return 0; }
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need MDT version at least 2.7.63" && return 0; }
+
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ if remote_mds; then
+ nid=$($LCTL list_nids | head -1 | sed "s/\./\\\./g")
+ else
+ nid="0@lo"
+ fi
+ uuid=$(get_client_uuid $MOUNT)
+
+ echo "list nids on mdt:"
+ do_facet $SINGLEMDS "$LCTL list_param mdt.${FSNAME}*.exports.*"
+ echo "uuid from $nid:"
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on OST"
+
+ # umount the client so it won't reconnect
+ manual_umount_client --force || error "failed to umount $?"
+ # shouldn't disappear on MDS after forced umount
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid"
+
+ echo "evict $nid"
+ do_facet $SINGLEMDS \
+ "$LCTL set_param -n mdt.${mds1_svc}.evict_client nid:$nid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ # check it didn't reconnect (being umounted)
+ sleep $((TIMEOUT+1))
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ cleanup
+}
+run_test 91 "evict-by-nid support"
+
+generate_ldev_conf() {
+ # generate an ldev.conf file
+ local ldevconfpath=$1
+ touch $ldevconfpath
+ printf "%s\t-\t%s-MGS0000\t%s\n" \
+ $mgs_HOST \
+ $FSNAME \
+ $(mgsdevname) >> $ldevconfpath
+
+ local mdsfo_host=$mdsfailover_HOST;
+ if [ -z "$mdsfo_host" ]; then
+ mdsfo_host="-"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s\t%s\t%s-MDT%04d\t%s\n" \
+ $mds_HOST \
+ $mdsfo_host \
+ $FSNAME \
+ $num \
+ $(mdsdevname $num) >> $ldevconfpath
+ done
+
+ local ostfo_host=$ostfailover_HOST;
+ if [ -z "$ostfo_host" ]; then
+ ostfo_host="-"
+ fi
+
+ for num in $(seq $OSTCOUNT); do
+ printf "%s\t%s\t%s-OST%04d\t%s\n" \
+ $ost_HOST \
+ $ostfo_host \
+ $FSNAME \
+ $num \
+ $(ostdevname $num) >> $ldevconfpath
+ done
+}
+
+generate_nids() {
+ # generate a nids file (mapping between hostname to nid)
+ # looks like we only have the MGS nid available to us
+ # so just echo that to a file
+ local nidspath=$1
+ touch $nidspath
+ echo -e "${mgs_HOST}\t${MGSNID}" >> $nidspath
+}
+
+test_92() {
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ echo "Host is $(hostname)"
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ echo "----- ldev.conf -----"
+ cat $LDEVCONFPATH
+ echo "--- END ldev.conf ---"
+
+ echo "----- /etc/nids -----"
+ cat $NIDSPATH
+ echo "--- END /etc/nids ---"
+
+ # ldev can be in our build tree and if we aren't in a
+ # build tree, use 'which' to try and find it
+ local LDEV=$LUSTRE/scripts/ldev
+ [ ! -f "$LDEV" ] && local LDEV=$(which ldev 2> /dev/null)
+
+ echo "ldev path is $LDEV"
+
+ if [ ! -f "$LDEV" ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "failed to find ldev!"
+ fi
+
+ # echo the mgs nid and compare it to environment variable MGSNID
+ # also, ldev.conf and nids is a server side thing, use the OSS
+ # hostname
+ local output
+ output=$(perl $LDEV -c $LDEVCONFPATH -H \
+ $ost_HOST -n $NIDSPATH echo %m)
+
+ echo "-- START OF LDEV OUTPUT --"
+ echo -e "$output"
+ echo "--- END OF LDEV OUTPUT ---"
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed to execute!"
+ fi
+
+ # need to process multiple lines because of combined MGS and MDS
+ echo -e $output | awk '{ print $2 }' | while read -r line ; do
+ if [ "$line" != "$MGSNID" ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed mgs nid '$line', expected '$MGSNID'"
+ fi
+ done
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 92 "ldev returns MGS NID correctly in command substitution"
+
+test_93() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return
+
+ reformat
+ #start mgs or mgs/mdt0
+ if ! combined_mgs_mds ; then
+ start_mgs
+ start_mdt 1
+ else
+ start_mdt 1
+ fi
+
+ start_ost || error "OST0 start fail"
+
+ #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
+ do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num &
+ done
+
+ mount_client $MOUNT || error "mount client fails"
+ wait_osc_import_state mds ost FULL
+ wait_osc_import_state client ost FULL
+ check_mount || error "check_mount failed"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 93 "register mulitple MDT at the same time"
+
if ! combined_mgs_mds ; then
stop mgs
fi