}
umount_client() {
- local MOUNTPATH=$1
- echo "umount lustre on ${MOUNTPATH}....."
- zconf_umount $(hostname) $MOUNTPATH || return 97
+ local mountpath=$1
+ shift
+ echo "umount lustre on $mountpath....."
+ zconf_umount $HOSTNAME $mountpath $@ || return 97
}
manual_umount_client(){
local rc
local FORCE=$1
echo "manual umount lustre on ${MOUNT}...."
- do_facet client "umount -d ${FORCE} $MOUNT"
+ do_facet client "umount ${FORCE} $MOUNT"
rc=$?
return $rc
}
setup
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
stop_ost || error "Unable to stop OST1"
- cleanup
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli
eno=$?
# ok for ost to fail shutdown
if [ 202 -ne $eno ] && [ 0 -ne $eno ]; then
# cleanup may return an error from the failed
# disconnects; for now I'll consider this successful
# if all the modules have unloaded.
- umount -d $MOUNT &
+ $UMOUNT -f $MOUNT &
UMOUNT_PID=$!
sleep 6
echo "killing umount"
start_mds || error "MDS start failed"
stop_ost || error "Unable to stop OST1"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- cleanup || error "cleanup_nocli failed with $?"
+ umount_client $MOUNT -f || error "umount_client $MOUNT failed"
+ cleanup_nocli || error "cleanup_nocli failed with $?"
grep " $MOUNT " /etc/mtab &&
error "$MOUNT entry in mtab after unmount"
pass
# check_mount will block trying to contact ost
mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed"
- umount_client $MOUNT
+ umount_client $MOUNT -f
pass
echo "Client mount with a running ost"
umount $tmp/mnt/lustre || rc=$?
fi
if $shall_cleanup_mdt; then
- $r umount -d $tmp/mnt/mdt || rc=$?
+ $r $UMOUNT $tmp/mnt/mdt || rc=$?
fi
if $shall_cleanup_mdt1; then
- $r umount -d $tmp/mnt/mdt1 || rc=$?
+ $r $UMOUNT $tmp/mnt/mdt1 || rc=$?
fi
if $shall_cleanup_ost; then
- $r umount -d $tmp/mnt/ost || rc=$?
+ $r $UMOUNT $tmp/mnt/ost || rc=$?
fi
$r rm -rf $tmp
$r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt
$r $LCTL replace_nids $fsname-OST0000 $ostnid
$r $LCTL replace_nids $fsname-MDT0000 $nid
- $r umount -d $tmp/mnt/mdt
+ $r $UMOUNT $tmp/mnt/mdt
fi
mopts=exclude=$fsname-OST0000
shall_cleanup_lustre=false
else
if [ "$dne_upgrade" != "no" ]; then
- $r umount -d $tmp/mnt/mdt1 || {
+ $r $UMOUNT $tmp/mnt/mdt1 || {
error_noexit "Unmounting the MDT2"
return 1
}
shall_cleanup_mdt1=false
fi
- $r umount -d $tmp/mnt/mdt || {
+ $r $UMOUNT $tmp/mnt/mdt || {
error_noexit "Unmounting the MDT"
return 1
}
shall_cleanup_mdt=false
- $r umount -d $tmp/mnt/ost || {
+ $r $UMOUNT $tmp/mnt/ost || {
error_noexit "Unmounting the OST"
return 1
}
cp /etc/hosts $MOUNT2/ || error "copy /etc/hosts $MOUNT2/ failed"
$GETSTRIPE $MOUNT2/hosts || error "$GETSTRIPE $MOUNT2/hosts failed"
- umount -d $MOUNT2
+ umount $MOUNT2
stop fs2ost -f
stop fs2mds -f
cleanup_nocli || error "cleanup_nocli failed with $?"
rc=3
fi
- umount -d $MOUNT2
+ $UMOUNT $MOUNT2
stop fs3ost -f || error "unable to stop OST3"
stop fs2ost -f || error "unable to stop OST2"
stop fs2mds -f || error "unable to stop second MDS"
echo mount_op=$mount_op
- do_facet $SINGLEMDS "umount -d $mntpt && rm -f $mdsdev_sym"
+ do_facet $SINGLEMDS "$UMOUNT $mntpt && rm -f $mdsdev_sym"
if $(echo $mount_op | grep -q "unable to set tunable"); then
error "set tunables failed for symlink device"
stop_ost2 || error "Unable to stop OST2"
fi
- umount_client $MOUNT || error "Unable to unmount client"
+ umount_client $MOUNT -f || error "Unable to unmount client"
stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
#writeconf to remove all ost2 traces for subsequent tests
start_ost2 || error "Unable to start OST1"
wait $pid
stop_ost2 || error "Unable to stop OST1"
- cleanup || error "cleanup failed with $?"
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli || error “stop server failed”
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
stop_ost2 || error "Unable to stop second ost"
echo "$LFS df"
$LFS df --lazy || error "lfs df failed"
- cleanup || error "cleanup failed with $?"
+ umount_client $MOUNT -f || error “unmount $MOUNT failed”
+ cleanup_nocli || error "cleanup_nocli failed with $?"
#writeconf to remove all ost2 traces for subsequent tests
writeconf_or_reformat
}
do_facet $SINGLEMDS \
"mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
- do_facet $SINGLEMDS "umount -d $brpt"
+ do_facet $SINGLEMDS "$UMOUNT $brpt"
# restart MDS, the "last_rcvd" file should be recreated.
start_mds || error "fail to restart the MDS"
#export PDSH="pdsh -S -Rssh -w"
export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
+export UMOUNT=${UMOUNT:-"umount -d"}
+# sles12 umount has a issue with -d option
+[ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && {
+ export UMOUNT="umount"
+}
# function used by scripts run on remote nodes
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
if [ ${running} -ne 0 ]; then
echo "Stopping $mntpt (opts:$@) on $HOST"
- do_facet ${facet} umount -d $@ $mntpt
+ do_facet ${facet} $UMOUNT $@ $mntpt
fi
# umount should block, but we should wait for unrelated obd's
local dev=$(facet_device $facet)
local mnt=$(facet_mntpt $facet)
- do_facet $facet umount -d $mnt
+ do_facet $facet $UMOUNT $mnt
}
var_name() {
echo "backup data"
${rcmd} tar zcf $metadata -C $mntpt/ . > /dev/null 2>&1 || return 3
# step 6: umount
- ${rcmd} umount -d $mntpt || return 4
+ ${rcmd} $UMOUNT $mntpt || return 4
# step 8: reformat dev
echo "reformat new device"
format_mdt $(facet_number $facet)
echo "remove recovery logs"
${rcmd} rm -fv $mntpt/OBJECTS/* $mntpt/CATALOGS
# step 13: umount dev
- ${rcmd} umount -d $mntpt || return 10
+ ${rcmd} $UMOUNT $mntpt || return 10
# step 14: cleanup tmp backup
${rcmd} rm -f $metaea $metadata
# step 15: reset device label - it's not virgin on
done
fi
# step 4: umount
- ${rcmd} umount -d $mntpt || return 2
+ ${rcmd} $UMOUNT $mntpt || return 2
# OI files will be recreated when mounted as lustre next time.
}