+test_parallel_grouplock() {
+ if [ "$NFSCLIENT" ]; then
+ skip "skipped for NFSCLIENT mode"
+ return
+ fi
+
+ [ x$PARALLEL_GROUPLOCK = x ] &&
+ { skip "PARALLEL_GROUPLOCK not found" && return; }
+
+ local clients=$CLIENTS
+ [ -z $clients ] && clients=$(hostname)
+
+ local num_clients=$(get_node_count ${clients//,/ })
+
+ generate_machine_file $clients $MACHINEFILE || return $?
+
+ print_opts clients parallel_grouplock_MINTASKS MACHINEFILE
+
+ local testdir=$DIR/d0.parallel_grouplock
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ do_nodes $clients "lctl set_param llite.*.max_rw_chunk=0" ||
+ error "set_param max_rw_chunk=0 failed "
+
+ local cmd
+ local status=0
+ local subtest
+ for i in $(seq 12); do
+ subtest="-t $i"
+ local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
+ echo "+ $cmd"
+
+ mpi_run -np $parallel_grouplock_MINTASKS -machinefile ${MACHINEFILE} $cmd
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error_noexit "parallel_grouplock subtests $subtest failed! $rc"
+ else
+ echo "parallel_grouplock subtests $subtest PASS"
+ fi
+ let status=$((status + rc))
+ # clear debug to collect one log per one test
+ do_nodes $(comma_list $(nodes_list)) lctl clear
+ done
+ [ $status -eq 0 ] || error "parallel_grouplock status: $status"
+ rm -rf $testdir
+}
+run_test parallel_grouplock "parallel_grouplock"
+
+statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
+statahead_NUMFILES=${statahead_NUMFILES:-500000}
+
+cleanup_statahead () {
+ trap 0
+
+ local clients=$1
+ local mntpt_root=$2
+ local num_mntpts=$3
+
+ for i in $(seq 0 $num_mntpts);do
+ zconf_umount_clients $clients ${mntpt_root}$i ||
+ error_exit "Failed to umount lustre on ${mntpt_root}$i"
+ done
+}
+
+test_statahead () {
+ if [[ -n $NFSCLIENT ]]; then
+ skip "Statahead testing is not supported on NFS clients."
+ return 0
+ fi
+
+ [ x$MDSRATE = x ] &&
+ { skip_env "mdsrate not found" && return; }
+
+ local clients=$CLIENTS
+ [ -z $clients ] && clients=$(hostname)
+
+ local num_clients=$(get_node_count ${clients//,/ })
+
+ generate_machine_file $clients $MACHINEFILE || return $?
+
+ print_opts MDSRATE clients statahead_NUMMNTPTS statahead_NUMFILES
+
+ # create large dir
+
+ # do not use default "d[0-9]*" dir name
+ # to avoid of rm $statahead_NUMFILES (500k) files in t-f cleanup
+ local dir=dstatahead
+ local testdir=$DIR/$dir
+
+ # cleanup only if dir exists
+ # cleanup only $statahead_NUMFILES number of files
+ # ignore the other files created by someone else
+ [ -d $testdir ] &&
+ mdsrate_cleanup $((num_clients * 32)) $MACHINEFILE $statahead_NUMFILES $testdir 'f%%d' --ignore
+
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ local num_files=$statahead_NUMFILES
+
+ local IFree=$(inodes_available)
+ if [ $IFree -lt $num_files ]; then
+ num_files=$IFree
+ fi
+
+ cancel_lru_locks mdc
+
+ local cmd="${MDSRATE} ${MDSRATE_DEBUG} --mknod --dir $testdir --nfiles $num_files --filefmt 'f%%d'"
+ echo "+ $cmd"
+
+ mpi_run -np $((num_clients * 32)) -machinefile ${MACHINEFILE} $cmd
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "mdsrate failed to create $rc"
+ return $rc
+ fi
+
+ local num_mntpts=$statahead_NUMMNTPTS
+ local mntpt_root=$TMP/mntpt/lustre
+ mntopts=${MNTOPTSTATAHEAD:-$MOUNTOPT}
+
+ echo "Mounting $num_mntpts lustre clients starts on $clients"
+ trap "cleanup_statahead $clients $mntpt_root $num_mntpts" EXIT ERR
+ for i in $(seq 0 $num_mntpts); do
+ zconf_mount_clients $clients ${mntpt_root}$i $mntopts ||
+ error_exit "Failed to mount lustre on ${mntpt_root}$i on $clients"
+ done
+
+ do_rpc_nodes $clients cancel_lru_locks mdc
+
+ do_rpc_nodes $clients do_ls $mntpt_root $num_mntpts $dir
+
+ mdsrate_cleanup $((num_clients * 32)) $MACHINEFILE $num_files $testdir 'f%%d' --ignore
+
+ # use rm instead of rmdir because of
+ # testdir could contain the files created by someone else,
+ # or by previous run where is num_files prev > num_files current
+ rm -rf $testdir
+ cleanup_statahead $clients $mntpt_root $num_mntpts
+}
+
+run_test statahead "statahead test, multiple clients"
+