sles12 umount command has issue with '-d' option.
it will report error while a absolute pathname
of mountpoint present with '-d'. So we export
UMOUNT to avoid such problem. In fact, loopdev
can be free automaticly. So '-d' needn't given
explicity. But consider compatibility we still
keep it.
Also include http://review.whamcloud.com/#/c/14799/
port patch to master.
In SLES12, umount command will run statfs() on the
filesystem, which will cause unmounting Lustre client
hang when OST is unavailable. This patch adds "-f"
option to zconf_umount in conf-sanity.sh to avoid the
issue.
Test-Parameters: alwaysuploadlogs envdefinitions=ONLY=32 clientdistro=sles12 mdtcount=1 testlist=sanity
Test-Parameters: alwaysuploadlogs envdefinitions=SLOW=yes clientdistro=sles12 mdtcount=1 testlist=conf-sanity
Signed-off-by: Yang Sheng <yang.sheng@intel.com>
Change-Id: If466c2101e0db52b5ec1f7273a846dc2497cfb84
Reviewed-on: http://review.whamcloud.com/16445
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Tested-by: Jenkins
Reviewed-by: Jian Yu <jian.yu@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
- local MOUNTPATH=$1
- echo "umount lustre on ${MOUNTPATH}....."
- zconf_umount $(hostname) $MOUNTPATH || return 97
+ local mountpath=$1
+ shift
+ echo "umount lustre on $mountpath....."
+ zconf_umount $HOSTNAME $mountpath $@ || return 97
}
manual_umount_client(){
local rc
local FORCE=$1
echo "manual umount lustre on ${MOUNT}...."
}
manual_umount_client(){
local rc
local FORCE=$1
echo "manual umount lustre on ${MOUNT}...."
- do_facet client "umount -d ${FORCE} $MOUNT"
+ do_facet client "umount ${FORCE} $MOUNT"
setup
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
stop_ost || error "Unable to stop OST1"
setup
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
stop_ost || error "Unable to stop OST1"
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli
eno=$?
# ok for ost to fail shutdown
if [ 202 -ne $eno ] && [ 0 -ne $eno ]; then
eno=$?
# ok for ost to fail shutdown
if [ 202 -ne $eno ] && [ 0 -ne $eno ]; then
# cleanup may return an error from the failed
# disconnects; for now I'll consider this successful
# if all the modules have unloaded.
# cleanup may return an error from the failed
# disconnects; for now I'll consider this successful
# if all the modules have unloaded.
UMOUNT_PID=$!
sleep 6
echo "killing umount"
UMOUNT_PID=$!
sleep 6
echo "killing umount"
start_mds || error "MDS start failed"
stop_ost || error "Unable to stop OST1"
mount_client $MOUNT || error "mount_client $MOUNT failed"
start_mds || error "MDS start failed"
stop_ost || error "Unable to stop OST1"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- cleanup || error "cleanup_nocli failed with $?"
+ umount_client $MOUNT -f || error "umount_client $MOUNT failed"
+ cleanup_nocli || error "cleanup_nocli failed with $?"
grep " $MOUNT " /etc/mtab &&
error "$MOUNT entry in mtab after unmount"
pass
grep " $MOUNT " /etc/mtab &&
error "$MOUNT entry in mtab after unmount"
pass
# check_mount will block trying to contact ost
mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed"
# check_mount will block trying to contact ost
mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed"
+ umount_client $MOUNT -f
pass
echo "Client mount with a running ost"
pass
echo "Client mount with a running ost"
umount $tmp/mnt/lustre || rc=$?
fi
if $shall_cleanup_mdt; then
umount $tmp/mnt/lustre || rc=$?
fi
if $shall_cleanup_mdt; then
- $r umount -d $tmp/mnt/mdt || rc=$?
+ $r $UMOUNT $tmp/mnt/mdt || rc=$?
fi
if $shall_cleanup_mdt1; then
fi
if $shall_cleanup_mdt1; then
- $r umount -d $tmp/mnt/mdt1 || rc=$?
+ $r $UMOUNT $tmp/mnt/mdt1 || rc=$?
fi
if $shall_cleanup_ost; then
fi
if $shall_cleanup_ost; then
- $r umount -d $tmp/mnt/ost || rc=$?
+ $r $UMOUNT $tmp/mnt/ost || rc=$?
$r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt
$r $LCTL replace_nids $fsname-OST0000 $ostnid
$r $LCTL replace_nids $fsname-MDT0000 $nid
$r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt
$r $LCTL replace_nids $fsname-OST0000 $ostnid
$r $LCTL replace_nids $fsname-MDT0000 $nid
- $r umount -d $tmp/mnt/mdt
+ $r $UMOUNT $tmp/mnt/mdt
fi
mopts=exclude=$fsname-OST0000
fi
mopts=exclude=$fsname-OST0000
shall_cleanup_lustre=false
else
if [ "$dne_upgrade" != "no" ]; then
shall_cleanup_lustre=false
else
if [ "$dne_upgrade" != "no" ]; then
- $r umount -d $tmp/mnt/mdt1 || {
+ $r $UMOUNT $tmp/mnt/mdt1 || {
error_noexit "Unmounting the MDT2"
return 1
}
shall_cleanup_mdt1=false
fi
error_noexit "Unmounting the MDT2"
return 1
}
shall_cleanup_mdt1=false
fi
- $r umount -d $tmp/mnt/mdt || {
+ $r $UMOUNT $tmp/mnt/mdt || {
error_noexit "Unmounting the MDT"
return 1
}
shall_cleanup_mdt=false
error_noexit "Unmounting the MDT"
return 1
}
shall_cleanup_mdt=false
- $r umount -d $tmp/mnt/ost || {
+ $r $UMOUNT $tmp/mnt/ost || {
error_noexit "Unmounting the OST"
return 1
}
error_noexit "Unmounting the OST"
return 1
}
cp /etc/hosts $MOUNT2/ || error "copy /etc/hosts $MOUNT2/ failed"
$GETSTRIPE $MOUNT2/hosts || error "$GETSTRIPE $MOUNT2/hosts failed"
cp /etc/hosts $MOUNT2/ || error "copy /etc/hosts $MOUNT2/ failed"
$GETSTRIPE $MOUNT2/hosts || error "$GETSTRIPE $MOUNT2/hosts failed"
stop fs2ost -f
stop fs2mds -f
cleanup_nocli || error "cleanup_nocli failed with $?"
stop fs2ost -f
stop fs2mds -f
cleanup_nocli || error "cleanup_nocli failed with $?"
stop fs3ost -f || error "unable to stop OST3"
stop fs2ost -f || error "unable to stop OST2"
stop fs2mds -f || error "unable to stop second MDS"
stop fs3ost -f || error "unable to stop OST3"
stop fs2ost -f || error "unable to stop OST2"
stop fs2mds -f || error "unable to stop second MDS"
- do_facet $SINGLEMDS "umount -d $mntpt && rm -f $mdsdev_sym"
+ do_facet $SINGLEMDS "$UMOUNT $mntpt && rm -f $mdsdev_sym"
if $(echo $mount_op | grep -q "unable to set tunable"); then
error "set tunables failed for symlink device"
if $(echo $mount_op | grep -q "unable to set tunable"); then
error "set tunables failed for symlink device"
stop_ost2 || error "Unable to stop OST2"
fi
stop_ost2 || error "Unable to stop OST2"
fi
- umount_client $MOUNT || error "Unable to unmount client"
+ umount_client $MOUNT -f || error "Unable to unmount client"
stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
#writeconf to remove all ost2 traces for subsequent tests
stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
#writeconf to remove all ost2 traces for subsequent tests
start_ost2 || error "Unable to start OST1"
wait $pid
stop_ost2 || error "Unable to stop OST1"
start_ost2 || error "Unable to start OST1"
wait $pid
stop_ost2 || error "Unable to stop OST1"
- cleanup || error "cleanup failed with $?"
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli || error “stop server failed”
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
stop_ost2 || error "Unable to stop second ost"
echo "$LFS df"
$LFS df --lazy || error "lfs df failed"
stop_ost2 || error "Unable to stop second ost"
echo "$LFS df"
$LFS df --lazy || error "lfs df failed"
- cleanup || error "cleanup failed with $?"
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli || error "cleanup_nocli failed with $?"
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
do_facet $SINGLEMDS \
"mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
do_facet $SINGLEMDS \
"mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
- do_facet $SINGLEMDS "umount -d $brpt"
+ do_facet $SINGLEMDS "$UMOUNT $brpt"
# restart MDS, the "last_rcvd" file should be recreated.
start_mds || error "fail to restart the MDS"
# restart MDS, the "last_rcvd" file should be recreated.
start_mds || error "fail to restart the MDS"
MEMHOG=${MEMHOG:-memhog}
DIRECTIO=${DIRECTIO:-directio}
ACCEPTOR_PORT=${ACCEPTOR_PORT:-988}
MEMHOG=${MEMHOG:-memhog}
DIRECTIO=${DIRECTIO:-directio}
ACCEPTOR_PORT=${ACCEPTOR_PORT:-988}
-UMOUNT=${UMOUNT:-"umount -d"}
STRIPES_PER_OBJ=-1
CHECK_GRANT=${CHECK_GRANT:-"yes"}
GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
STRIPES_PER_OBJ=-1
CHECK_GRANT=${CHECK_GRANT:-"yes"}
GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
cleanup_test32_mount() {
trap 0
cleanup_test32_mount() {
trap 0
- $UMOUNT -d $DIR/$tdir/ext2-mountpoint
+ $UMOUNT $DIR/$tdir/ext2-mountpoint
cleanup_testdir_mount() {
trap 0
cleanup_testdir_mount() {
trap 0
loopdev="$DIR/loop54c"
trap 0
loopdev="$DIR/loop54c"
trap 0
- $UMOUNT -d $DIR/$tdir || rc=$?
+ $UMOUNT $DIR/$tdir || rc=$?
losetup -d $loopdev || true
losetup -d $LOOPDEV || true
rm -rf $loopdev $DIR/$tfile $DIR/$tdir
losetup -d $loopdev || true
losetup -d $LOOPDEV || true
rm -rf $loopdev $DIR/$tfile $DIR/$tdir
#export PDSH="pdsh -S -Rssh -w"
export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
#export PDSH="pdsh -S -Rssh -w"
export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
+export UMOUNT=${UMOUNT:-"umount -d"}
+# sles12 umount has a issue with -d option
+[ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && {
+ export UMOUNT="umount"
+}
# function used by scripts run on remote nodes
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
# function used by scripts run on remote nodes
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
if [ ${running} -ne 0 ]; then
echo "Stopping $mntpt (opts:$@) on $HOST"
running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
if [ ${running} -ne 0 ]; then
echo "Stopping $mntpt (opts:$@) on $HOST"
- do_facet ${facet} umount -d $@ $mntpt
+ do_facet ${facet} $UMOUNT $@ $mntpt
fi
# umount should block, but we should wait for unrelated obd's
fi
# umount should block, but we should wait for unrelated obd's
local dev=$(facet_device $facet)
local mnt=$(facet_mntpt $facet)
local dev=$(facet_device $facet)
local mnt=$(facet_mntpt $facet)
- do_facet $facet umount -d $mnt
+ do_facet $facet $UMOUNT $mnt
echo "backup data"
${rcmd} tar zcf $metadata -C $mntpt/ . > /dev/null 2>&1 || return 3
# step 6: umount
echo "backup data"
${rcmd} tar zcf $metadata -C $mntpt/ . > /dev/null 2>&1 || return 3
# step 6: umount
- ${rcmd} umount -d $mntpt || return 4
+ ${rcmd} $UMOUNT $mntpt || return 4
# step 8: reformat dev
echo "reformat new device"
format_mdt $(facet_number $facet)
# step 8: reformat dev
echo "reformat new device"
format_mdt $(facet_number $facet)
echo "remove recovery logs"
${rcmd} rm -fv $mntpt/OBJECTS/* $mntpt/CATALOGS
# step 13: umount dev
echo "remove recovery logs"
${rcmd} rm -fv $mntpt/OBJECTS/* $mntpt/CATALOGS
# step 13: umount dev
- ${rcmd} umount -d $mntpt || return 10
+ ${rcmd} $UMOUNT $mntpt || return 10
# step 14: cleanup tmp backup
${rcmd} rm -f $metaea $metadata
# step 15: reset device label - it's not virgin on
# step 14: cleanup tmp backup
${rcmd} rm -f $metaea $metadata
# step 15: reset device label - it's not virgin on
- ${rcmd} umount -d $mntpt || return 2
+ ${rcmd} $UMOUNT $mntpt || return 2
# OI files will be recreated when mounted as lustre next time.
}
# OI files will be recreated when mounted as lustre next time.
}