. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
+
+# bug 20670 21255
+ALWAYS_EXCEPT="parallel_grouplock statahead $PARALLEL_SCALE_EXCEPT"
#
# compilbench
wdisjoint_REP=${wdisjoint_REP:-10000}
[ "$SLOW" = "no" ] && wdisjoint_REP=100
+#
+# parallel_grouplock
+#
+#
+PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-$(which parallel_grouplock 2> /dev/null || true)}
+parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
+
build_test_filter
check_and_setup_lustre
run_test metabench "metabench"
test_simul() {
+ if [ "$NFSCLIENT" ]; then
+ skip "skipped for NFSCLIENT mode"
+ return
+ fi
+
[ x$SIMUL = x ] &&
{ skip_env "simul not found" && return; }
mkdir -p $testdir
# mpi_run uses mpiuser
chmod 0777 $testdir
-
+ if [ "$NFSCLIENT" ]; then
+ setstripe_nfsserver $testdir -c -1 ||
+ { error "setstripe on nfsserver failed" && return 1; }
+ else
+ $LFS setstripe $testdir -c -1 ||
+ { error "setstripe failed" && return 2; }
+ fi
#
# -b N blockSize -- contiguous bytes to write per task (e.g.: 8, 4k, 2m, 1g)"
# -o S testFileName
run_test ior "ior"
test_cascading_rw() {
+ if [ "$NFSCLIENT" ]; then
+ skip "skipped for NFSCLIENT mode"
+ return
+ fi
+
[ x$CASC_RW = x ] &&
{ skip_env "cascading_rw not found" && return; }
run_test cascading_rw "cascading_rw"
test_write_append_truncate() {
+ if [ "$NFSCLIENT" ]; then
+ skip "skipped for NFSCLIENT mode"
+ return
+ fi
+
# location is lustre/tests dir
if ! which write_append_truncate > /dev/null 2>&1 ; then
skip_env "write_append_truncate not found"
run_test write_append_truncate "write_append_truncate"
test_write_disjoint() {
+ if [ "$NFSCLIENT" ]; then
+ skip "skipped for NFSCLIENT mode"
+ return
+ fi
+
[ x$WRITE_DISJOINT = x ] &&
{ skip_env "write_disjoint not found" && return; }
}
run_test write_disjoint "write_disjoint"
+test_parallel_grouplock() {
+ [ x$PARALLEL_GROUPLOCK = x ] &&
+ { skip "PARALLEL_GROUPLOCK not found" && return; }
+
+ local clients=$CLIENTS
+ [ -z $clients ] && clients=$(hostname)
+
+ local num_clients=$(get_node_count ${clients//,/ })
+
+ generate_machine_file $clients $MACHINEFILE || \
+ error "can not generate machinefile $MACHINEFILE"
+
+ print_opts clients parallel_grouplock_MINTASKS MACHINEFILE
+
+ local testdir=$DIR/d0.parallel_grouplock
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ do_nodes $clients "lctl set_param llite.*.max_rw_chunk=0" ||
+ error "set_param max_rw_chunk=0 failed "
+
+ local cmd
+ local status=0
+ local subtest
+ for i in $(seq 12); do
+ subtest="-t $i"
+ local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
+ echo "+ $cmd"
+
+ mpi_run -np $parallel_grouplock_MINTASKS -machinefile ${MACHINEFILE} $cmd
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error_noexit "parallel_grouplock subtests $subtest failed! $rc"
+ else
+ echo "parallel_grouplock subtests $subtest PASS"
+ fi
+ let status=$((status + rc))
+ # clear debug to collect one log per one test
+ do_nodes $(comma_list $(nodes_list)) lctl clear
+ done
+ [ $status -eq 0 ] || error "parallel_grouplock status: $status"
+ rm -rf $testdir
+}
+run_test parallel_grouplock "parallel_grouplock"
+
+statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
+statahead_NUMFILES=${statahead_NUMFILES:-500000}
+
+cleanup_statahead () {
+ trap 0
+
+ local clients=$1
+ local mntpt_root=$2
+ local num_mntpts=$3
+
+ for i in $(seq 0 $num_mntpts);do
+ zconf_umount_clients $clients ${mntpt_root}$i ||
+ error_exit "Failed to umount lustre on ${mntpt_root}$i"
+ done
+}
+
+test_statahead () {
+
+ # create large dir
+
+ local dir=d0.statahead
+ # FIXME has to use DIR
+ local testdir=$DIR/$dir
+
+ mkdir -p $testdir
+
+ local num_files=$statahead_NUMFILES
+
+ local IFree=$(inodes_available)
+ if [ $IFree -lt $num_files ]; then
+ num_files=$IFree
+ fi
+
+ cancel_lru_locks mdc
+
+ log "createmany -o $testdir/f-%d $num_files"
+ createmany -o $testdir/$f-%d $num_files
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "createmany failed to create $rc"
+ return $rc
+ fi
+
+ local num_mntpts=$statahead_NUMMNTPTS
+ local mntpt_root=$TMP/mntpt/lustre
+ mntopts=${MNTOPTSTATAHEAD:-$MOUNTOPT}
+
+ local clients=$CLIENTS
+ [ -z $clients ] && clients=$(hostname)
+
+ echo "Mounting $num_mntpts lustre clients starts on $clients"
+ trap "cleanup_statahead $clients $mntpt_root $num_mntpts" EXIT ERR
+ for i in $(seq 0 $num_mntpts);do
+ zconf_mount_clients $clients ${mntpt_root}$i $mntopts ||
+ error_exit "Failed to mount lustre on ${mntpt_root}$i on $clients"
+ done
+
+ do_rpc_nodes $clients cancel_lru_locks mdc
+
+ do_rpc_nodes $clients do_ls $mntpt_root $num_mntpts $dir
+
+ cleanup_statahead $clients $mntpt_root $num_mntpts
+}
+
+run_test statahead "statahead test, multiple clients"
+
equals_msg `basename $0`: test complete, cleaning up
check_and_cleanup_lustre
[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true