Whamcloud - gitweb
add larry's changes that allow using mpirun or yod for running IOR.
[fs/lustre-release.git] / lustre-iokit / ior-survey / ior-survey
index 28e91ef..03595a9 100644 (file)
@@ -1,67 +1,79 @@
 #!/bin/bash
 
-# cluster name (expect all node names to be this followed by a number)
-cluster=mdev
+# cluster name (all node names are this followed by the node number)
+cluster=nid000
 
 # client node numbers (individual numbers or inclusive ranges)
-clients=(7-8)
+clients=(1-64)
 
 # numbers of clients to survey
 clients_lo=1
-clients_hi=2
-clients_iterator="+=1"
+clients_hi=64
+clients_iterator="*=2"
 
 # numbers of tasks per client to survey
 tasks_per_client_lo=1
-tasks_per_client_hi=16
-tasks_per_client_iterator="*=4"
+tasks_per_client_hi=1
+tasks_per_client_iterator="*=2"
 
 # record sizes to survey
 rsize_lo=1M
 rsize_hi=1M
 rsize_iterator="*=2"
 
-## which tests to run (first must be write)
-# remount)   not really a test; just remount to uncache everything
-# *write*)   write
-# *)         read
-#tests=(write rewrite read reread rewrite_again)
-tests=(write rewrite remount read)
+# This line contains all of the possible tests.
+# IMPORTANT:::if you want to remount, put it in your tests array
+# in the order you want to remount to clear the cache.  For ex:
+# tests=(write rewrite remount read reread rewrite_again) has a
+# remount between the rewrite and remount tests
+tests=(write read)
 
 # total # bytes written/read by any client node
-min_per_client_size=75M
-min_total_size=100M
+min_per_client_size=1G
+min_total_size=1G
 
 # should each task do I/O to its own file?
 file_per_task=1
 
-# the IOR binary
-IOR="/home/ericb/ior/src/C/IOR"
+# the binaries
+IOR="/spin/home/henken/IOR-2.8.4/src/C/IOR"
+llmount=llmount
 
-# the pdsh binary
-pdsh=pdsh
+#Command to run IOR (pdsh,mpirun,yod)
+runior="yod"
+#Path to binary for program specified in runior
+pathtobin="$(which yod)"
+#location of machines file for mpirun, this file
+#will be built from the cluster and client ranges
+#above
+machines=machines
 
-# the llmount binary
-llmount=/home/ericb/lustre/utils/llmount
+# the result file prefix (date/time + hostname makes unique)
+rslt=/spin/home/henken/ior_survey/ior_survey_`date +%F@%R`_`uname -n`
+#rslt=/home/larry/ior_survey
 
 # where lustre is mounted on the clients
-lustre=/mnt/lustre
+lustre=/lustre/fs1/nic/
 
 # basename of the test file(s)
 testfile=${lustre}/ior_survey_testfile
 
+
 # how to unmount and remount the F/S on a client (to clear the cache)
-remount="umount $lustre && $llmount -o nettype=elan mdev6:/ll_mds/client $lustre"
+# change this depending on lustre config (network type, MDS etc)
+unmount="umount $lustre"
+remount="llmount pegasus:/mds1/client $lustre"
 
-# the result file prefix (date/time + hostname makes unique)
-#rslt=/home/ericb/ior_survey_`date +%F@%R`_`uname -n`
-rslt=/home/ericb/ior_survey
+# pdsh args required to instantiate all instances of IOR in parallel
+# the chosen module must support '-n <procs-per-node>'
+# -R<module>, -f<fanout> etc
+pdsh_mpiargs="-Rmqsh"
 
 #don't spin for MPI completions
 export LIBELAN_WAITTYPE=0
 
 ################################################################################
-# dont change stuff below here
+# dont change stuff below here unless you know what you're doing...
 
 count_range() {
     echo $1 | awk '{ nvals=split($1, vals, "-");\
@@ -74,9 +86,9 @@ base_range() {
 }
 
 idx2nodenum() {
-    n=$1; shift
+    local n=$1; shift
     while ((1)); do
-       range=$1; shift
+       local range=$1; shift
        if [ -z "$range" ]; then
            return
        fi
@@ -91,18 +103,18 @@ idx2nodenum() {
 }
 
 n2noderange() {
-    n=$1; shift
+    local n=$1; shift
     sep=""
     nodes="["
     while ((n > 0)); do
-       range=$1; shift
+       local range=$1; shift
        if [ -z "$range" ]; then
             return
        fi
-       base=`base_range $range`
-       chunk=`count_range $range`
+       local base=`base_range $range`
+       local chunk=`count_range $range`
        if ((chunk > n)); then chunk=n; fi
-       nodes="${nodes}${sep}${base}"; sep=","
+       local nodes="${nodes}${sep}${base}"; sep=","
        if ((chunk > 1)); then nodes="${nodes}-$((base+chunk-1))"; fi
        n=$((n-chunk))
     done
@@ -110,10 +122,10 @@ n2noderange() {
 }
 
 countnodes() {
-    radix=16384
-    n=0
+    local radix=16384
+    local n=0
     while ((radix > 0)); do
-       nodes=`n2noderange $((n+radix)) $@`
+       local nodes=`n2noderange $((n+radix)) $@`
        if [ -n "$nodes" ]; then
            n=$((n+radix))
         fi
@@ -123,7 +135,7 @@ countnodes() {
 }
 
 parse_number() {
-    str=$1
+    local str=$1
     case $str in
        *G|*g) n=`echo $str | sed 's/[gG]//'`; echo $((n*1024*1024*1024));;
        *M|*m) n=`echo $str | sed 's/[Mm]//'`; echo $((n*1024*1024));;
@@ -133,10 +145,10 @@ parse_number() {
 }
 
 pp_number() {
-    n=$1
-    G=$((1024*1024*1024))
-    M=$((1024*1024))
-    K=$((1024))
+    local n=$1
+    local G=$((1024*1024*1024))
+    local M=$((1024*1024))
+    local K=$((1024))
     if ((n%G == 0 && n >= G)); then
        echo "$((n/G))G"
     elif ((n%M == 0 && n >= M)); then
@@ -154,10 +166,7 @@ if [ ${#tests[@]} -eq 0 -o "${tests[0]}" != "write" ]; then
 fi
 
 rsltf="${rslt}.summary"
-iorcf="${rslt}.script"
 workf="${rslt}.detail"
-tmpf="${workf}_tmp"
-
 echo -n > $rsltf
 echo -n > $workf
 
@@ -171,14 +180,46 @@ print_summary () {
     echo $minusn "$*"
 }
 
+mpi_client_file() {
+       echo -n > $machines
+       local base=`base_range $1`
+       echo $base
+       local chunk=`count_range $1`
+       echo $chunk
+       local high=$((base+chunk-1))
+       echo $high
+       for ((nmpi=$base; nmpi<=$high; nmpi++)); do
+               echo $cluster$nmpi >> $machines
+       done
+}
+
+parse_cmdline() {
+    case $runior in
+               'mpirun') 
+               #echo "this"
+               $pathtobin -np $((ntask*nclnt)) -machinefile $machines >> $tmpf 2>1 \
+                       "${cmdline[@]}";;
+               'pdsh')
+               $pathtobin -S -b $pdsh_mpiargs -w "$test_clients" -n $ntask \
+                       >> $tmpf 2>&1 "${cmdline[@]}";;
+               'yod')
+                       $pathtobin -np $((ntask*nclnt)) >> $tmpf 2>&1 "${cmdline[@]}";;
+       esac    
+}
+
+if [ $runior = "mpirun" ]; then
+       mpi_client_file ${clients[@]}
+fi
+
+# convert params to actual numbers
 min_per_client_size=`parse_number $min_per_client_size`
 min_total_size=`parse_number $min_total_size`
 
 rsize_lo=`parse_number $rsize_lo`
 rsize_hi=`parse_number $rsize_hi`
 
+# check on actual numbers of client nodes
 nclients=`countnodes ${clients[@]}`
-
 if ((clients_hi > nclients)); then clients_hi=$nclients; fi
 
 for ((rsize=rsize_lo; rsize<=rsize_hi; rsize$rsize_iterator)); do
@@ -186,17 +227,21 @@ for ((rsize=rsize_lo; rsize<=rsize_hi; rsize$rsize_iterator)); do
 
     for ((nclnt=clients_lo; nclnt<=clients_hi; nclnt$clients_iterator)); do
        test_clients="${cluster}`n2noderange $nclnt ${clients[@]}`"
-
+        
        per_client_size=$((min_total_size/nclnt))
        if ((per_client_size < min_per_client_size)); then
            per_client_size=$min_per_client_size
        fi
-       total_size=`pp_number $((per_client_size * nclnt))`
 
        for ((ntask=tasks_per_client_lo; ntask <= tasks_per_client_hi; ntask$tasks_per_client_iterator)); do
            per_task_size=$((per_client_size/ntask))
+           if ((per_task_size%rsize != 0)); then
+               per_task_size=$(((per_task_size/rsize + 1)*rsize))
+           fi
+           total_size=`pp_number $((per_task_size*nclnt*ntask))`
            
-           hdrstr=`printf "Total: %5sB rsize: %4s clients: %4d tasks: %3d: " $total_size $rsize $nclnt $ntask`
+           hdrstr=`printf "Total: %5sB rsize: %4sB clients: %4d tasks: %3d: " \
+               $total_size $pp_rsize $nclnt $ntask`
            print_summary -n "$hdrstr"
 
            for ((test_idx=0; test_idx < ${#tests[@]}; test_idx++)); do
@@ -204,12 +249,20 @@ for ((rsize=rsize_lo; rsize<=rsize_hi; rsize$rsize_iterator)); do
                
                print_summary -n "$test "
                echo "===========> ${hdrstr} on $test_clients doing $test" >> $workf
+               tmpf=${workf}_tmp
                echo -n > $tmpf
 
                if [ "$test" = "remount" ]; then
                    echo "=> $remount" >> $tmpf
-                   $pdsh -S -b -w "$test_clients" >> $tmpf 2>&1 \
-                       "$remount"
+                   if [ "$runior" = "pdsh" ]; then
+                               $pdsh -S -b -w "$test_clients" >> $tmpf 2>&1 \
+                                       "$unmount"
+                               $pdsh -S -b -w "$test_clients" >> $tmpf 2>&1 \
+                                       "$remount"
+                   else
+                               $unmount
+                               $remount
+                   fi
                    status=$?
                    echo "Completion Status: $status" >> $tmpf
 
@@ -219,13 +272,15 @@ for ((rsize=rsize_lo; rsize<=rsize_hi; rsize$rsize_iterator)); do
                        result="OK"
                    fi
                else
+                   # check lustre is mounted everywhere it's needed
                    cmd="(mount -t lustre; mount -t lustre_lite) | grep $lustre"
-                   echo "=> $cmd" >> $tmpf
-                   $pdsh -S -b -w "$test_clients" >> $tmpf 2>&1 \
-                       "$cmd"
+                   echo "=> Mount Check: $cmd" >> $tmpf
+                   if [ "$runior" = "pdsh" ]; then
+                               $pdsh -S -b -w "$test_clients" >> $tmpf 2>&1 \
+                                       "$cmd"
+                   fi
                    status=$?
                    echo "Completion Status: $status" >> $tmpf
-
                    if ((status)); then
                        cat $tmpf >> $workf
                        rm $tmpf
@@ -261,9 +316,8 @@ for ((rsize=rsize_lo; rsize<=rsize_hi; rsize$rsize_iterator)); do
                     esac
 
                    echo "=> ${cmdline[@]}" >> $tmpf
-       
-                   $pdsh -S -b -Rmqsh -w "$test_clients" -n $ntask >> $tmpf 2>&1 \
-                       "${cmdline[@]}"
+
+                   parse_cmdline       
                    status=$?
 
                    echo "Completion Status: $status" >> $tmpf
@@ -287,4 +341,3 @@ for ((rsize=rsize_lo; rsize<=rsize_hi; rsize$rsize_iterator)); do
     done
 done
 
-# rm $iorcf