Whamcloud - gitweb
LU-1134 test: can not assume lustre setup before nfs test
authorMinh Diep <mdiep@whamcloud.com>
Mon, 12 Mar 2012 20:42:15 +0000 (13:42 -0700)
committerOleg Drokin <green@whamcloud.com>
Sat, 7 Apr 2012 02:00:52 +0000 (22:00 -0400)
During autotest, lustre can be unmounted. parallel-scale-nfs
test should not assume that lustre is mounted and skip the setup.

Signed-off-by: Minh Diep <mdiep@whamcloud.com>
Change-Id: I79bd995efd9f08b27ec8c08ac7087be78d578a36
Reviewed-on: http://review.whamcloud.com/2218
Tested-by: Hudson
Reviewed-by: Wei Liu <sarah@whamcloud.com>
Reviewed-by: Yu Jian <yujian@whamcloud.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/tests/Makefile.am
lustre/tests/parallel-scale-nfs.sh [new file with mode: 0755]
lustre/tests/parallel-scale-nfsv3.sh
lustre/tests/parallel-scale-nfsv4.sh

index 19d557d..e1ad6b3 100644 (file)
@@ -26,7 +26,7 @@ noinst_SCRIPTS += recovery-random-scale.sh parallel-scale.sh metadata-updates.sh
 noinst_SCRIPTS += lustre-rsync-test.sh ost-pools.sh rpc.sh yaml.sh liblustre.sh
 noinst_SCRIPTS += lnet-selftest.sh obdfilter-survey.sh mmp.sh mmp_mark.sh
 noinst_SCRIPTS += sgpdd-survey.sh maloo_upload.sh auster setup-nfs.sh
-noinst_SCRIPTS += mds-survey.sh
+noinst_SCRIPTS += mds-survey.sh parallel-scale-nfs.sh
 noinst_SCRIPTS += parallel-scale-nfsv3.sh parallel-scale-nfsv4.sh
 nobase_noinst_SCRIPTS = cfg/local.sh
 nobase_noinst_SCRIPTS += test-groups/regression test-groups/regression-mpi
diff --git a/lustre/tests/parallel-scale-nfs.sh b/lustre/tests/parallel-scale-nfs.sh
new file mode 100755 (executable)
index 0000000..9955dc1
--- /dev/null
@@ -0,0 +1,138 @@
+#!/bin/bash
+#
+#set -vx
+
+NFSVERSION=${1:-"3"}
+LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
+. $LUSTRE/tests/test-framework.sh
+# only call init_test_env if this script is called directly
+if [[ -z "$TESTSUITE" || "$TESTSUITE" = "$(basename $0 .sh)" ]]; then
+    init_test_env $@
+fi
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
+
+. $LUSTRE/tests/setup-nfs.sh
+
+check_and_setup_lustre
+
+# first unmount all the lustre client
+cleanup_mount $MOUNT
+# mount lustre on mds
+lustre_client=$(facet_active_host $SINGLEMDS)
+[ "$NFSVERSION" = "4" ] && cl_mnt_opt="$MOUNTOPT,32bitapi" || cl_mnt_opt=""
+zconf_mount_clients $lustre_client $MOUNT "$cl_mnt_opt" || \
+    error "mount lustre on $lustre_client failed"
+
+# setup the nfs
+if ! setup_nfs "$NFSVERSION" "$MOUNT" "$lustre_client" "$CLIENTS"; then
+    error_noexit false "setup nfs failed!"
+    cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
+        error_noexit false "failed to cleanup nfs"
+    if ! zconf_umount_clients $lustre_client $MOUNT force; then
+        error_noexit false "failed to umount lustre on $lustre_client"
+    elif ! zconf_mount_clients $CLIENTS $MOUNT; then
+        error_noexit false "failed to mount lustre"
+    fi
+    check_and_cleanup_lustre
+    exit
+fi
+
+NFSCLIENT=true
+FAIL_ON_ERROR=false
+
+# common setup
+#
+MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
+clients=${CLIENTS:-$HOSTNAME}
+generate_machine_file $clients $MACHINEFILE || \
+    error "Failed to generate machine file"
+num_clients=$(get_node_count ${clients//,/ })
+
+# compilbench
+#
+cbench_DIR=${cbench_DIR:-"/usr/bin"}
+cbench_IDIRS=${cbench_IDIRS:-4}
+# FIXME: wiki page requirements is 30, do we really need 30 ?
+cbench_RUNS=${cbench_RUNS:-4}
+
+if [ "$SLOW" = "no" ]; then
+    cbench_IDIRS=2
+    cbench_RUNS=2
+fi
+
+#
+# metabench
+#
+METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
+mbench_NFILES=${mbench_NFILES:-30400}
+[ "$SLOW" = "no" ] && mbench_NFILES=10000
+# threads per client
+mbench_THREADS=${mbench_THREADS:-4}
+
+#
+# connectathon
+#
+cnt_DIR=${cnt_DIR:-""}
+cnt_NRUN=${cnt_NRUN:-10}
+[ "$SLOW" = "no" ] && cnt_NRUN=2
+
+#
+# IOR
+#
+IOR=${IOR:-$(which IOR 2> /dev/null || true)}
+# threads per client
+ior_THREADS=${ior_THREADS:-2}
+ior_iteration=${ior_iteration:-1}
+ior_blockSize=${ior_blockSize:-6} # Gb
+ior_xferSize=${ior_xferSize:-2m}
+ior_type=${ior_type:-POSIX}
+ior_DURATION=${ior_DURATION:-60} # minutes
+[ "$SLOW" = "no" ] && ior_DURATION=30
+
+# source the common file after all parameters are set to take affect
+. $LUSTRE/tests/functions.sh
+
+build_test_filter
+
+get_mpiuser_id $MPI_USER
+MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
+$GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
+
+test_compilebench() {
+    run_compilebench
+}
+run_test compilebench "compilebench"
+
+test_metabench() {
+    run_metabench
+}
+run_test metabench "metabench"
+
+test_connectathon() {
+    run_connectathon
+}
+run_test connectathon "connectathon"
+
+test_iorssf() {
+    run_ior "ssf"
+}
+run_test iorssf "iorssf"
+
+test_iorfpp() {
+    run_ior "fpp"
+}
+run_test iorfpp "iorfpp"
+
+# cleanup nfs
+cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
+    error_noexit false "cleanup_nfs failed"
+if ! zconf_umount_clients $lustre_client $MOUNT force; then
+    error_noexit false "failed to umount lustre on $lustre_client"
+elif ! zconf_mount_clients $CLIENTS $MOUNT; then
+    error_noexit false "failed to mount lustre after nfs test"
+fi
+
+complete $(basename $0) $SECONDS
+check_and_cleanup_lustre
+exit_status
index 4e067c2..f75327c 100755 (executable)
@@ -5,128 +5,5 @@
 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
 . $LUSTRE/tests/test-framework.sh
 init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-init_logging
 
-. $LUSTRE/tests/setup-nfs.sh
-
-# first unmount all the lustre client
-cleanup_mount $MOUNT
-# mount lustre on mds
-lustre_client=$(facet_active_host $SINGLEMDS)
-zconf_mount_clients $lustre_client $MOUNT || \
-    error "mount lustre on $lustre_client failed"
-
-# setup the nfs
-if ! setup_nfs "3" "$MOUNT" "$lustre_client" "$CLIENTS"; then
-    error_noexit false "setup nfs failed!"
-    cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
-        error_noexit false "failed to cleanup nfs"
-    if ! zconf_umount_clients $lustre_client $MOUNT force; then
-        error_noexit false "failed to umount lustre on $lustre_client"
-    elif ! zconf_mount_clients $CLIENTS $MOUNT; then
-        error_noexit false "failed to mount lustre after nfs test"
-    fi
-    check_and_cleanup_lustre
-    exit
-fi
-
-NFSCLIENT=yes
-FAIL_ON_ERROR=false
-
-# common setup
-#
-MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
-clients=${CLIENTS:-$HOSTNAME}
-generate_machine_file $clients $MACHINEFILE || \
-    error "Failed to generate machine file"
-num_clients=$(get_node_count ${clients//,/ })
-
-# compilbench
-#
-cbench_DIR=${cbench_DIR:-"/usr/bin"}
-cbench_IDIRS=${cbench_IDIRS:-4}
-# FIXME: wiki page requirements is 30, do we really need 30 ?
-cbench_RUNS=${cbench_RUNS:-4}
-
-if [ "$SLOW" = "no" ]; then
-    cbench_IDIRS=2
-    cbench_RUNS=2
-fi
-
-#
-# metabench
-#
-METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
-mbench_NFILES=${mbench_NFILES:-30400}
-[ "$SLOW" = "no" ] && mbench_NFILES=10000
-# threads per client
-mbench_THREADS=${mbench_THREADS:-4}
-
-#
-# connectathon
-#
-cnt_DIR=${cnt_DIR:-""}
-cnt_NRUN=${cnt_NRUN:-10}
-[ "$SLOW" = "no" ] && cnt_NRUN=2
-
-#
-# IOR
-#
-IOR=${IOR:-$(which IOR 2> /dev/null || true)}
-# threads per client
-ior_THREADS=${ior_THREADS:-2}
-ior_iteration=${ior_iteration:-1}
-ior_blockSize=${ior_blockSize:-6} # Gb
-ior_xferSize=${ior_xferSize:-2m}
-ior_type=${ior_type:-POSIX}
-ior_DURATION=${ior_DURATION:-60} # minutes
-[ "$SLOW" = "no" ] && ior_DURATION=30
-
-# source the common file after all parameters are set to take affect
-. $LUSTRE/tests/functions.sh
-
-build_test_filter
-check_and_setup_lustre
-
-get_mpiuser_id $MPI_USER
-MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
-$GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
-
-test_compilebench() {
-    run_compilebench
-}
-run_test compilebench "compilebench"
-
-test_metabench() {
-    run_metabench
-}
-run_test metabench "metabench"
-
-test_connectathon() {
-    run_connectathon
-}
-run_test connectathon "connectathon"
-
-test_iorssf() {
-    run_ior "ssf"
-}
-run_test iorssf "iorssf"
-
-test_iorfpp() {
-    run_ior "fpp"
-}
-run_test iorfpp "iorfpp"
-
-# cleanup nfs
-cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
-    error_noexit false "cleanup_nfs failed"
-if ! zconf_umount_clients $lustre_client $MOUNT force; then
-    error_noexit false "failed to umount lustre on $lustre_client"
-elif ! zconf_mount_clients $CLIENTS $MOUNT; then
-    error_noexit false "failed to mount lustre after nfs test"
-fi
-
-complete $(basename $0) $SECONDS
-check_and_cleanup_lustre
-exit_status
+sh $LUSTRE/tests/parallel-scale-nfs.sh 3
index ec71a41..e0a5fc4 100755 (executable)
@@ -5,129 +5,5 @@
 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
 . $LUSTRE/tests/test-framework.sh
 init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-init_logging
 
-. $LUSTRE/tests/setup-nfs.sh
-
-# first unmount all the lustre client
-cleanup_mount $MOUNT
-# mount lustre on mds
-lustre_client=$(facet_active_host $SINGLEMDS)
-zconf_mount_clients $lustre_client $MOUNT \
-    "-o user_xattr,flock,32bitapi" || \
-    error "mount lustre on $lustre_client failed"
-
-# setup the nfs
-if ! setup_nfs "4" "$MOUNT" "$lustre_client" "$CLIENTS"; then
-    error_noexit false "setup nfs failed!"
-    cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
-        error_noexit false "failed to cleanup nfs"
-    if ! zconf_umount_clients $lustre_client $MOUNT force; then
-        error_noexit false "failed to umount lustre on $lustre_client"
-    elif ! zconf_mount_clients $CLIENTS $MOUNT; then
-        error_noexit false "failed to mount lustre after nfs test"
-    fi
-    check_and_cleanup_lustre
-    exit
-fi
-
-NFSCLIENT=yes
-FAIL_ON_ERROR=false
-
-# common setup
-#
-MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
-clients=${CLIENTS:-$HOSTNAME}
-generate_machine_file $clients $MACHINEFILE || \
-    error "Failed to generate machine file"
-num_clients=$(get_node_count ${clients//,/ })
-
-# compilbench
-#
-cbench_DIR=${cbench_DIR:-"/usr/bin"}
-cbench_IDIRS=${cbench_IDIRS:-4}
-# FIXME: wiki page requirements is 30, do we really need 30 ?
-cbench_RUNS=${cbench_RUNS:-4}
-
-if [ "$SLOW" = "no" ]; then
-    cbench_IDIRS=2
-    cbench_RUNS=2
-fi
-
-#
-# metabench
-#
-METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
-mbench_NFILES=${mbench_NFILES:-30400}
-[ "$SLOW" = "no" ] && mbench_NFILES=10000
-# threads per client
-mbench_THREADS=${mbench_THREADS:-4}
-
-#
-# connectathon
-#
-cnt_DIR=${cnt_DIR:-""}
-cnt_NRUN=${cnt_NRUN:-10}
-[ "$SLOW" = "no" ] && cnt_NRUN=2
-
-#
-# IOR
-#
-IOR=${IOR:-$(which IOR 2> /dev/null || true)}
-# threads per client
-ior_THREADS=${ior_THREADS:-2}
-ior_iteration=${ior_iteration:-1}
-ior_blockSize=${ior_blockSize:-6} # Gb
-ior_xferSize=${ior_xferSize:-2m}
-ior_type=${ior_type:-POSIX}
-ior_DURATION=${ior_DURATION:-60} # minutes
-[ "$SLOW" = "no" ] && ior_DURATION=30
-
-# source the common file after all parameters are set to take affect
-. $LUSTRE/tests/functions.sh
-
-build_test_filter
-check_and_setup_lustre
-
-get_mpiuser_id $MPI_USER
-MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
-$GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
-
-test_compilebench() {
-    run_compilebench
-}
-run_test compilebench "compilebench"
-
-test_metabench() {
-    run_metabench
-}
-run_test metabench "metabench"
-
-test_connectathon() {
-    run_connectathon
-}
-run_test connectathon "connectathon"
-
-test_iorssf() {
-    run_ior "ssf"
-}
-run_test iorssf "iorssf"
-
-test_iorfpp() {
-    run_ior "fpp"
-}
-run_test iorfpp "iorfpp"
-
-# cleanup nfs
-cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
-    error_noexit false "cleanup_nfs failed"
-if ! zconf_umount_clients $lustre_client $MOUNT force; then
-    error_noexit false "failed to umount lustre on $lustre_client"
-elif ! zconf_mount_clients $CLIENTS $MOUNT; then
-    error_noexit false "failed to mount lustre after nfs test"
-fi
-
-complete $(basename $0) $SECONDS
-check_and_cleanup_lustre
-exit_status
+sh $LUSTRE/tests/parallel-scale-nfs.sh 4