Whamcloud - gitweb
LU-3738 tests: improve posix.sh to support BASELINE_FS=zfs 80/8180/7
authorJian Yu <jian.yu@intel.com>
Mon, 25 Nov 2013 03:55:58 +0000 (11:55 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 29 Nov 2013 01:33:31 +0000 (01:33 +0000)
This patch improves posix.sh to support BASELINE_FS=zfs. It also
adds create_zpool(), create_zfs() and destroy_zpool() common
functions into test-framework.sh.

Test-Parameters: envdefinitions=SLOW=yes \
mdtfilesystemtype=zfs mdsfilesystemtype=zfs ostfilesystemtype=zfs \
ostcount=2 testlist=posix

Signed-off-by: Jian Yu <jian.yu@intel.com>
Change-Id: I7e59181ef5f3926c6a8799ac4f6c775d04350a99
Reviewed-on: http://review.whamcloud.com/8180
Tested-by: Jenkins
Reviewed-by: Minh Diep <minh.diep@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Nathaniel Clark <nathaniel.l.clark@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/tests/posix.sh
lustre/tests/test-framework.sh

index 396e930..46e9637 100755 (executable)
@@ -8,9 +8,6 @@ init_test_env $@
 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
 init_logging
 
 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
 init_logging
 
-build_test_filter
-check_and_setup_lustre
-
 POSIX_DIR=${POSIX_DIR:-"$LUSTRE/tests/posix"}
 POSIX_SRC=${POSIX_SRC:-"/usr/src/posix"}
 BASELINE_FS=${BASELINE_FS:-"ext4"}
 POSIX_DIR=${POSIX_DIR:-"$LUSTRE/tests/posix"}
 POSIX_SRC=${POSIX_SRC:-"/usr/src/posix"}
 BASELINE_FS=${BASELINE_FS:-"ext4"}
@@ -18,6 +15,18 @@ BASELINE_FS=${BASELINE_FS:-"ext4"}
 # SLES does not support read-write access to an ext4 file system by default
 [[ -e /etc/SuSE-release ]] && BASELINE_FS=ext3
 
 # SLES does not support read-write access to an ext4 file system by default
 [[ -e /etc/SuSE-release ]] && BASELINE_FS=ext3
 
+if [[ $(facet_fstype $SINGLEMDS) = zfs ]]; then
+       BASELINE_FS=zfs
+       ! which $ZFS $ZPOOL >/dev/null 2>&1 &&
+               skip_env "need $ZFS and $ZPOOL commands" && exit 0
+
+       POSIX_ZPOOL=$FSNAME-posix
+       POSIX_ZFS=$POSIX_ZPOOL/${POSIX_ZPOOL##$FSNAME-}
+fi
+
+check_and_setup_lustre
+build_test_filter
+
 cleanup_loop_dev() {
     local mnt=$1
     local dev=$2
 cleanup_loop_dev() {
     local mnt=$1
     local dev=$2
@@ -34,6 +43,8 @@ cleanup_loop_dev() {
         losetup -d $dev && rm -rf $mnt
         rm -f $file
     fi
         losetup -d $dev && rm -rf $mnt
         rm -f $file
     fi
+
+       [[ $BASELINE_FS != zfs ]] || destroy_zpool client $POSIX_ZPOOL
 }
 
 setup_loop_dev() {
 }
 
 setup_loop_dev() {
@@ -49,7 +60,13 @@ setup_loop_dev() {
                echo "can't set up $dev for $file"
                return $rc
        fi
                echo "can't set up $dev for $file"
                return $rc
        fi
-       if ! eval mkfs.$BASELINE_FS $dev; then
+
+       if [[ $BASELINE_FS = zfs ]]; then
+               create_zpool client $POSIX_ZPOOL $dev || return ${PIPESTATUS[0]}
+               create_zfs client $POSIX_ZFS || return ${PIPESTATUS[0]}
+               dev=$POSIX_ZFS
+
+       elif ! eval mkfs.$BASELINE_FS $dev; then
                rc=$?
                echo "mkfs.$BASELINE_FS on $dev failed"
                return $rc
                rc=$?
                echo "mkfs.$BASELINE_FS on $dev failed"
                return $rc
index f046b87..2a62f83 100644 (file)
@@ -900,6 +900,33 @@ zpool_name() {
 }
 
 #
 }
 
 #
+# Create ZFS storage pool.
+#
+create_zpool() {
+       local facet=$1
+       local poolname=$2
+       local vdev=$3
+       shift 3
+       local opts=${@:-"-o cachefile=none"}
+
+       do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+               $ZPOOL create -f $opts $poolname $vdev"
+}
+
+#
+# Create ZFS file system.
+#
+create_zfs() {
+       local facet=$1
+       local dataset=$2
+       shift 2
+       local opts=${@:-"-o mountpoint=legacy"}
+
+       do_facet $facet "$ZFS list -H $dataset >/dev/null 2>&1 ||
+               $ZFS create $opts $dataset"
+}
+
+#
 # Export ZFS storage pool.
 # Before exporting the pool, all datasets within the pool should be unmounted.
 #
 # Export ZFS storage pool.
 # Before exporting the pool, all datasets within the pool should be unmounted.
 #
@@ -919,6 +946,22 @@ export_zpool() {
 }
 
 #
 }
 
 #
+# Destroy ZFS storage pool.
+# Destroy the given pool and free up any devices for other use. This command
+# tries to unmount any active datasets before destroying the pool.
+# -f    Force any active datasets contained within the pool to be unmounted.
+#
+destroy_zpool() {
+       local facet=$1
+       local poolname=${2:-$(zpool_name $facet)}
+
+       if [[ -n "$poolname" ]]; then
+               do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
+                       $ZPOOL destroy -f $poolname"
+       fi
+}
+
+#
 # Import ZFS storage pool.
 # Force importing, even if the pool appears to be potentially active.
 #
 # Import ZFS storage pool.
 # Force importing, even if the pool appears to be potentially active.
 #