noinst_SCRIPTS += lnet-selftest.sh obdfilter-survey.sh mmp.sh mmp_mark.sh
noinst_SCRIPTS += sgpdd-survey.sh maloo_upload.sh auster setup-nfs.sh
noinst_SCRIPTS += parallel-scale-nfsv3.sh parallel-scale-nfsv4.sh
+noinst_SCRIPTS += parallel-scale-nfs.sh
nobase_noinst_SCRIPTS = cfg/local.sh
nobase_noinst_SCRIPTS += test-groups/regression test-groups/regression-mpi
nobase_noinst_SCRIPTS += acl/make-tree acl/run cfg/ncli.sh
# This is used when testing on SLURM environment.
# Test will use srun when SRUN_PARTITION is set
-SRUN=${SRUN:-$(which srun 2>/dev/null)}
+SRUN=${SRUN:-$(which srun 2>/dev/null || true)}
SRUN_PARTITION=${SRUN_PARTITION:-""}
SRUN_OPTIONS=${SRUN_OPTIONS:-"-W 1800 -l -O"}
run_write_disjoint() {
- WRITE_DISJOINT=${WRITE_DISJOINT:-\
- $(which write_disjoint 2> /dev/null || true)}
+ WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint \
+ 2> /dev/null || true)}
# threads per client
wdisjoint_THREADS=${wdisjoint_THREADS:-4}
wdisjoint_REP=${wdisjoint_REP:-10000}
run_parallel_grouplock() {
- PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-\
- $(which parallel_grouplock 2> /dev/null || true)}
+ PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-$(which parallel_grouplock \
+ 2> /dev/null || true)}
parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
if [ "$NFSCLIENT" ]; then
--- /dev/null
+#!/bin/bash
+#
+#set -vx
+
+NFSVERSION=${1:-"3"}
+LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
+. $LUSTRE/tests/test-framework.sh
+# only call init_test_env if this script is called directly
+if [[ -z "$TESTSUITE" || "$TESTSUITE" = "$(basename $0 .sh)" ]]; then
+ init_test_env $@
+fi
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
+
+. $LUSTRE/tests/setup-nfs.sh
+
+check_and_setup_lustre
+
+# first unmount all the lustre client
+cleanup_mount $MOUNT
+# mount lustre on mds
+lustre_client=$(facet_active_host $SINGLEMDS)
+[ "$NFSVERSION" = "4" ] && cl_mnt_opt="$MOUNTOPT,32bitapi" || cl_mnt_opt=""
+zconf_mount_clients $lustre_client $MOUNT "$cl_mnt_opt" || \
+ error "mount lustre on $lustre_client failed"
+
+# setup the nfs
+if ! setup_nfs "$NFSVERSION" "$MOUNT" "$lustre_client" "$CLIENTS"; then
+ error_noexit false "setup nfs failed!"
+ cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
+ error_noexit false "failed to cleanup nfs"
+ if ! zconf_umount_clients $lustre_client $MOUNT force; then
+ error_noexit false "failed to umount lustre on $lustre_client"
+ elif ! zconf_mount_clients $CLIENTS $MOUNT; then
+ error_noexit false "failed to mount lustre"
+ fi
+ check_and_cleanup_lustre
+ exit
+fi
+
+NFSCLIENT=true
+FAIL_ON_ERROR=false
+
+# common setup
+MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
+clients=${CLIENTS:-$HOSTNAME}
+generate_machine_file $clients $MACHINEFILE || \
+ error "Failed to generate machine file"
+num_clients=$(get_node_count ${clients//,/ })
+
+# compilbench
+if [ "$SLOW" = "no" ]; then
+ cbench_IDIRS=2
+ cbench_RUNS=2
+fi
+
+# metabench
+[ "$SLOW" = "no" ] && mbench_NFILES=10000
+
+# connectathon
+[ "$SLOW" = "no" ] && cnt_NRUN=2
+
+# IOR
+[ "$SLOW" = "no" ] && ior_DURATION=30
+
+# source the common file after all parameters are set to take affect
+. $LUSTRE/tests/functions.sh
+
+build_test_filter
+
+get_mpiuser_id $MPI_USER
+MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
+$GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
+
+test_compilebench() {
+ run_compilebench
+}
+run_test compilebench "compilebench"
+
+test_metabench() {
+ run_metabench
+}
+run_test metabench "metabench"
+
+test_connectathon() {
+ run_connectathon
+}
+run_test connectathon "connectathon"
+
+test_iorssf() {
+ run_ior "ssf"
+}
+run_test iorssf "iorssf"
+
+test_iorfpp() {
+ run_ior "fpp"
+}
+run_test iorfpp "iorfpp"
+
+# cleanup nfs
+cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
+ error_noexit false "cleanup_nfs failed"
+if ! zconf_umount_clients $lustre_client $MOUNT force; then
+ error_noexit false "failed to umount lustre on $lustre_client"
+elif ! zconf_mount_clients $CLIENTS $MOUNT; then
+ error_noexit false "failed to mount lustre after nfs test"
+fi
+
+complete $(basename $0) $SECONDS
+check_and_cleanup_lustre
+exit_status
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-init_logging
-. $LUSTRE/tests/setup-nfs.sh
-
-# first unmount all the lustre client
-cleanup_mount $MOUNT
-# mount lustre on mds
-lustre_client=$(facet_active_host $SINGLEMDS)
-zconf_mount_clients $lustre_client $MOUNT || \
- error "mount lustre on $lustre_client failed"
-
-# setup the nfs
-if ! setup_nfs "3" "$MOUNT" "$lustre_client" "$CLIENTS"; then
- error_noexit false "setup nfs failed!"
- cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
- error_noexit false "failed to cleanup nfs"
- if ! zconf_umount_clients $lustre_client $MOUNT force; then
- error_noexit false "failed to umount lustre on $lustre_client"
- elif ! zconf_mount_clients $CLIENTS $MOUNT; then
- error_noexit false "failed to mount lustre after nfs test"
- fi
- check_and_cleanup_lustre
- exit
-fi
-
-NFSCLIENT=yes
-FAIL_ON_ERROR=false
-
-# common setup
-#
-MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
-clients=${CLIENTS:-$HOSTNAME}
-generate_machine_file $clients $MACHINEFILE || \
- error "Failed to generate machine file"
-num_clients=$(get_node_count ${clients//,/ })
-
-# compilbench
-#
-cbench_DIR=${cbench_DIR:-"/usr/bin"}
-cbench_IDIRS=${cbench_IDIRS:-4}
-# FIXME: wiki page requirements is 30, do we really need 30 ?
-cbench_RUNS=${cbench_RUNS:-4}
-
-if [ "$SLOW" = "no" ]; then
- cbench_IDIRS=2
- cbench_RUNS=2
-fi
-
-#
-# metabench
-#
-METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
-mbench_NFILES=${mbench_NFILES:-30400}
-[ "$SLOW" = "no" ] && mbench_NFILES=10000
-# threads per client
-mbench_THREADS=${mbench_THREADS:-4}
-
-#
-# connectathon
-#
-cnt_DIR=${cnt_DIR:-""}
-cnt_NRUN=${cnt_NRUN:-10}
-[ "$SLOW" = "no" ] && cnt_NRUN=2
-
-#
-# IOR
-#
-IOR=${IOR:-$(which IOR 2> /dev/null || true)}
-# threads per client
-ior_THREADS=${ior_THREADS:-2}
-ior_iteration=${ior_iteration:-1}
-ior_blockSize=${ior_blockSize:-6} # Gb
-ior_xferSize=${ior_xferSize:-2m}
-ior_type=${ior_type:-POSIX}
-ior_DURATION=${ior_DURATION:-60} # minutes
-[ "$SLOW" = "no" ] && ior_DURATION=30
-
-# source the common file after all parameters are set to take affect
-. $LUSTRE/tests/functions.sh
-
-build_test_filter
-check_and_setup_lustre
-
-get_mpiuser_id $MPI_USER
-MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
-$GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
-
-test_compilebench() {
- run_compilebench
-}
-run_test compilebench "compilebench"
-
-test_metabench() {
- run_metabench
-}
-run_test metabench "metabench"
-
-test_connectathon() {
- run_connectathon
-}
-run_test connectathon "connectathon"
-
-test_iorssf() {
- run_ior "ssf"
-}
-run_test iorssf "iorssf"
-
-test_iorfpp() {
- run_ior "fpp"
-}
-run_test iorfpp "iorfpp"
-
-# cleanup nfs
-cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
- error_noexit false "cleanup_nfs failed"
-if ! zconf_umount_clients $lustre_client $MOUNT force; then
- error_noexit false "failed to umount lustre on $lustre_client"
-elif ! zconf_mount_clients $CLIENTS $MOUNT; then
- error_noexit false "failed to mount lustre after nfs test"
-fi
-
-complete $(basename $0) $SECONDS
-check_and_cleanup_lustre
-exit_status
+sh $LUSTRE/tests/parallel-scale-nfs.sh 3
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-init_logging
-. $LUSTRE/tests/setup-nfs.sh
-
-# first unmount all the lustre client
-cleanup_mount $MOUNT
-# mount lustre on mds
-lustre_client=$(facet_active_host $SINGLEMDS)
-zconf_mount_clients $lustre_client $MOUNT \
- "-o user_xattr,acl,flock,32bitapi" || \
- error "mount lustre on $lustre_client failed"
-
-# setup the nfs
-if ! setup_nfs "4" "$MOUNT" "$lustre_client" "$CLIENTS"; then
- error_noexit false "setup nfs failed!"
- cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
- error_noexit false "failed to cleanup nfs"
- if ! zconf_umount_clients $lustre_client $MOUNT force; then
- error_noexit false "failed to umount lustre on $lustre_client"
- elif ! zconf_mount_clients $CLIENTS $MOUNT; then
- error_noexit false "failed to mount lustre after nfs test"
- fi
- check_and_cleanup_lustre
- exit
-fi
-
-NFSCLIENT=yes
-FAIL_ON_ERROR=false
-
-# common setup
-#
-MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
-clients=${CLIENTS:-$HOSTNAME}
-generate_machine_file $clients $MACHINEFILE || \
- error "Failed to generate machine file"
-num_clients=$(get_node_count ${clients//,/ })
-
-# compilbench
-#
-cbench_DIR=${cbench_DIR:-"/usr/bin"}
-cbench_IDIRS=${cbench_IDIRS:-4}
-# FIXME: wiki page requirements is 30, do we really need 30 ?
-cbench_RUNS=${cbench_RUNS:-4}
-
-if [ "$SLOW" = "no" ]; then
- cbench_IDIRS=2
- cbench_RUNS=2
-fi
-
-#
-# metabench
-#
-METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
-mbench_NFILES=${mbench_NFILES:-30400}
-[ "$SLOW" = "no" ] && mbench_NFILES=10000
-# threads per client
-mbench_THREADS=${mbench_THREADS:-4}
-
-#
-# connectathon
-#
-cnt_DIR=${cnt_DIR:-""}
-cnt_NRUN=${cnt_NRUN:-10}
-[ "$SLOW" = "no" ] && cnt_NRUN=2
-
-#
-# IOR
-#
-IOR=${IOR:-$(which IOR 2> /dev/null || true)}
-# threads per client
-ior_THREADS=${ior_THREADS:-2}
-ior_iteration=${ior_iteration:-1}
-ior_blockSize=${ior_blockSize:-6} # Gb
-ior_xferSize=${ior_xferSize:-2m}
-ior_type=${ior_type:-POSIX}
-ior_DURATION=${ior_DURATION:-60} # minutes
-[ "$SLOW" = "no" ] && ior_DURATION=30
-
-# source the common file after all parameters are set to take affect
-. $LUSTRE/tests/functions.sh
-
-build_test_filter
-check_and_setup_lustre
-
-get_mpiuser_id $MPI_USER
-MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
-$GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
-
-test_compilebench() {
- run_compilebench
-}
-run_test compilebench "compilebench"
-
-test_metabench() {
- run_metabench
-}
-run_test metabench "metabench"
-
-test_connectathon() {
- run_connectathon
-}
-run_test connectathon "connectathon"
-
-test_iorssf() {
- run_ior "ssf"
-}
-run_test iorssf "iorssf"
-
-test_iorfpp() {
- run_ior "fpp"
-}
-run_test iorfpp "iorfpp"
-
-# cleanup nfs
-cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
- error_noexit false "cleanup_nfs failed"
-if ! zconf_umount_clients $lustre_client $MOUNT force; then
- error_noexit false "failed to umount lustre on $lustre_client"
-elif ! zconf_mount_clients $CLIENTS $MOUNT; then
- error_noexit false "failed to mount lustre after nfs test"
-fi
-
-complete $(basename $0) $SECONDS
-check_and_cleanup_lustre
-exit_status
+sh $LUSTRE/tests/parallel-scale-nfs.sh 4
ALWAYS_EXCEPT="parallel_grouplock $PARALLEL_SCALE_EXCEPT"
# common setup
-#
MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
clients=${CLIENTS:-$HOSTNAME}
generate_machine_file $clients $MACHINEFILE || \
error "Failed to generate machine file"
num_clients=$(get_node_count ${clients//,/ })
-
# compilbench
-#
-cbench_DIR=${cbench_DIR:-""}
-cbench_IDIRS=${cbench_IDIRS:-4}
-# FIXME: wiki page requirements is 30, do we really need 30 ?
-cbench_RUNS=${cbench_RUNS:-4}
-
if [ "$SLOW" = "no" ]; then
cbench_IDIRS=2
cbench_RUNS=2
fi
-#
# metabench
-#
-METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
-mbench_NFILES=${mbench_NFILES:-30400}
[ "$SLOW" = "no" ] && mbench_NFILES=10000
-# threads per client
-mbench_THREADS=${mbench_THREADS:-4}
-#
# simul
-#
-SIMUL=${SIMUL:=$(which simul 2> /dev/null || true)}
-# threads per client
-simul_THREADS=${simul_THREADS:-2}
-simul_REP=${simul_REP:-20}
[ "$SLOW" = "no" ] && simul_REP=2
-#
-# mib
-#
-MIB=${MIB:=$(which mib 2> /dev/null || true)}
-# threads per client
-mib_THREADS=${mib_THREADS:-2}
-mib_xferSize=${mib_xferSize:-1m}
-mib_xferLimit=${mib_xferLimit:-5000}
-mib_timeLimit=${mib_timeLimit:-300}
-
-#
-# MDTEST
-#
-MDTEST=${MDTEST:=$(which mdtest 2> /dev/null || true)}
-# threads per client
-mdtest_THREADS=${mdtest_THREADS:-2}
-mdtest_nFiles=${mdtest_nFiles:-"100000"}
-# We devide the files by number of core
-mdtest_nFiles=$((mdtest_nFiles/mdtest_THREADS/num_clients))
-mdtest_iteration=${mdtest_iteration:-1}
-
-#
# connectathon
-#
-cnt_DIR=${cnt_DIR:-""}
-cnt_NRUN=${cnt_NRUN:-10}
[ "$SLOW" = "no" ] && cnt_NRUN=2
-#
# cascading rw
-#
-CASC_RW=${CASC_RW:-$(which cascading_rw 2> /dev/null || true)}
-# threads per client
-casc_THREADS=${casc_THREADS:-2}
-casc_REP=${casc_REP:-300}
[ "$SLOW" = "no" ] && casc_REP=10
-#
# IOR
-#
-IOR=${IOR:-$(which IOR 2> /dev/null || true)}
-# threads per client
-ior_THREADS=${ior_THREADS:-2}
-ior_iteration=${ior_iteration:-1}
-ior_blockSize=${ior_blockSize:-6} # Gb
-ior_xferSize=${ior_xferSize:-2m}
-ior_type=${ior_type:-POSIX}
-ior_DURATION=${ior_DURATION:-30} # minutes
[ "$SLOW" = "no" ] && ior_DURATION=5
-#
# write_append_truncate
-#
-# threads per client
-write_THREADS=${write_THREADS:-8}
-write_REP=${write_REP:-10000}
[ "$SLOW" = "no" ] && write_REP=100
-#
# write_disjoint
-#
-WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint 2> /dev/null || true)}
-# threads per client
-wdisjoint_THREADS=${wdisjoint_THREADS:-4}
-wdisjoint_REP=${wdisjoint_REP:-10000}
[ "$SLOW" = "no" ] && wdisjoint_REP=100
-#
-# parallel_grouplock
-#
-#
-PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-\
- $(which parallel_grouplock 2> /dev/null || true)}
-parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
-
. $LUSTRE/tests/functions.sh
build_test_filter
}
run_test parallel_grouplock "parallel_grouplock"
-statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
-statahead_NUMFILES=${statahead_NUMFILES:-500000}
-
test_statahead () {
run_statahead
}