+get_clientmdc_proc_path() {
+ echo "${1}-mdc-*"
+}
+
+do_rpc_nodes () {
+ local list=$1
+ shift
+
+ # Add paths to lustre tests for 32 and 64 bit systems.
+ local RPATH="$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH"
+ do_nodesv $list "PATH=$RPATH sh rpc.sh $@ "
+}
+
+wait_clients_import_state () {
+ local list=$1
+ local facet=$2
+ local expected=$3
+ shift
+
+ local label=$(convert_facet2label $facet)
+ local proc_path
+ case $facet in
+ ost* ) proc_path="osc.$(get_clientosc_proc_path $label).ost_server_uuid" ;;
+ mds* ) proc_path="mdc.$(get_clientmdc_proc_path $label).mds_server_uuid" ;;
+ *) error "unknown facet!" ;;
+ esac
+
+ if ! do_rpc_nodes $list wait_import_state $expected $proc_path; then
+ error "import is not in ${expected} state"
+ return 1
+ fi
+}
+
+oos_full() {
+ local -a AVAILA
+ local -a GRANTA
+ local OSCFULL=1
+ AVAILA=($(do_nodes $(comma_list $(osts_nodes)) \
+ $LCTL get_param obdfilter.*.kbytesavail))
+ GRANTA=($(do_nodes $(comma_list $(osts_nodes)) \
+ $LCTL get_param -n obdfilter.*.tot_granted))
+ for ((i=0; i<${#AVAILA[@]}; i++)); do
+ local -a AVAIL1=(${AVAILA[$i]//=/ })
+ GRANT=$((${GRANTA[$i]}/1024))
+ echo -n $(echo ${AVAIL1[0]} | cut -d"." -f2) avl=${AVAIL1[1]} grnt=$GRANT diff=$((AVAIL1[1] - GRANT))
+ [ $((AVAIL1[1] - GRANT)) -lt 400 ] && OSCFULL=0 && echo " FULL" || echo
+ done
+ return $OSCFULL
+}
+
+pool_list () {
+ do_facet mgs lctl pool_list $1
+}
+
+create_pool() {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ do_facet mgs lctl pool_new $1
+ local RC=$?
+ # get param should return err unless pool is created
+ [[ $RC -ne 0 ]] && return $RC
+
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "" || RC=1
+ if [[ $RC -eq 0 ]]; then
+ add_pool_to_list $1
+ else
+ error "pool_new failed $1"
+ fi
+ return $RC
+}
+
+add_pool_to_list () {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ local listvar=${fsname}_CREATED_POOLS
+ eval export ${listvar}=$(expand_list ${!listvar} $poolname)
+}
+
+remove_pool_from_list () {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ local listvar=${fsname}_CREATED_POOLS
+ eval export ${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
+}
+
+destroy_pool_int() {
+ local ost
+ local OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \
+ awk '$1 !~ /^Pool:/ {print $1}')
+ for ost in $OSTS; do
+ do_facet mgs lctl pool_remove $1 $ost
+ done
+ do_facet mgs lctl pool_destroy $1
+}
+
+# <fsname>.<poolname> or <poolname>
+destroy_pool() {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ [[ x$fsname = x$poolname ]] && fsname=$FSNAME
+
+ local RC
+
+ pool_list $fsname.$poolname || return $?
+
+ destroy_pool_int $fsname.$poolname
+ RC=$?
+ [[ $RC -ne 0 ]] && return $RC
+
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "foo" || RC=1
+
+ if [[ $RC -eq 0 ]]; then
+ remove_pool_from_list $fsname.$poolname
+ else
+ error "destroy pool failed $1"
+ fi
+ return $RC
+}
+
+destroy_pools () {
+ local fsname=${1:-$FSNAME}
+ local poolname
+ local listvar=${fsname}_CREATED_POOLS
+
+ pool_list $fsname
+
+ [ x${!listvar} = x ] && return 0
+
+ echo destroy the created pools: ${!listvar}
+ for poolname in ${!listvar//,/ }; do
+ destroy_pool $fsname.$poolname
+ done
+}
+
+cleanup_pools () {
+ local fsname=${1:-$FSNAME}
+ trap 0
+ destroy_pools $fsname
+}
+
+gather_logs () {
+ local list=$1
+
+ local ts=$(date +%s)
+
+ # bug 20237, comment 11
+ # It would also be useful to provide the option
+ # of writing the file to an NFS directory so it doesn't need to be copied.
+ local tmp=$TMP
+ local docp=true
+ [ -f $LOGDIR/shared ] && docp=false
+
+ # dump lustre logs, dmesg
+
+ prefix="$LOGDIR/${TESTSUITE}.${TESTNAME}"
+ suffix="$ts.log"
+ echo "Dumping lctl log to ${prefix}.*.${suffix}"
+
+ if [ "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
+ echo "Dumping logs only on local client."
+ $LCTL dk > ${prefix}.debug_log.$(hostname).${suffix}
+ dmesg > ${prefix}.dmesg.$(hostname).${suffix}
+ return
+ fi
+
+ do_nodesv $list \
+ "$LCTL dk > ${prefix}.debug_log.\\\$(hostname).${suffix};
+ dmesg > ${prefix}.dmesg.\\\$(hostname).${suffix}"
+ if [ ! -f $LOGDIR/shared ]; then
+ do_nodes $list rsync -az "${prefix}.*.${suffix}" $HOSTNAME:$LOGDIR
+ fi
+
+ local archive=$LOGDIR/${TESTSUITE}-$ts.tar.bz2
+ tar -jcf $archive $LOGDIR/*$ts* $LOGDIR/*${TESTSUITE}*
+
+ echo $archive
+}
+
+cleanup_logs () {
+ local list=${1:-$(comma_list $(nodes_list))}
+
+ [ -n ${TESTSUITE} ] && do_nodes $list "rm -f $TMP/*${TESTSUITE}*" || true
+}
+
+do_ls () {
+ local mntpt_root=$1
+ local num_mntpts=$2
+ local dir=$3
+ local i
+ local cmd
+ local pids
+ local rc=0
+
+ for i in $(seq 0 $num_mntpts); do
+ cmd="ls -laf ${mntpt_root}$i/$dir"
+ echo + $cmd;
+ $cmd > /dev/null &
+ pids="$pids $!"
+ done
+ echo pids=$pids
+ for pid in $pids; do
+ wait $pid || rc=$?
+ done
+
+ return $rc
+}
+
+# target_start_and_reset_recovery_timer()
+# service_time = at_est2timeout(service_time);
+# service_time += 2 * (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC +
+# INITIAL_CONNECT_TIMEOUT);
+# CONNECTION_SWITCH_MAX : min(25U, max(CONNECTION_SWITCH_MIN,obd_timeout))
+#define CONNECTION_SWITCH_INC 1
+#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/20)
+#define CONNECTION_SWITCH_MIN 5U
+
+max_recovery_time () {
+ local init_connect_timeout=$(( TIMEOUT / 20 ))
+ [[ $init_connect_timeout > 5 ]] || init_connect_timeout=5
+
+ local service_time=$(( $(at_max_get client) + $(( 2 * $(( 25 + 1 + init_connect_timeout)) )) ))
+
+ echo $service_time
+}
+
+get_clients_mount_count () {
+ local clients=${CLIENTS:-`hostname`}
+
+ # we need to take into account the clients mounts and
+ # exclude mds/ost mounts if any;
+ do_nodes $clients cat /proc/mounts | grep lustre | grep $MOUNT | wc -l
+}
+
+# gss functions
+PROC_CLI="srpc_info"
+
+combination()
+{
+ local M=$1
+ local N=$2
+ local R=1
+
+ if [ $M -lt $N ]; then
+ R=0
+ else
+ N=$((N + 1))
+ while [ $N -le $M ]; do
+ R=$((R * N))
+ N=$((N + 1))
+ done
+ fi
+
+ echo $R
+ return 0
+}
+
+calc_connection_cnt() {
+ local dir=$1
+
+ # MDT->MDT = 2 * C(M, 2)
+ # MDT->OST = M * O
+ # CLI->OST = C * O
+ # CLI->MDT = C * M
+ comb_m2=$(combination $MDSCOUNT 2)
+
+ local num_clients=$(get_clients_mount_count)
+
+ local cnt_mdt2mdt=$((comb_m2 * 2))
+ local cnt_mdt2ost=$((MDSCOUNT * OSTCOUNT))
+ local cnt_cli2ost=$((num_clients * OSTCOUNT))
+ local cnt_cli2mdt=$((num_clients * MDSCOUNT))
+ local cnt_all2ost=$((cnt_mdt2ost + cnt_cli2ost))
+ local cnt_all2mdt=$((cnt_mdt2mdt + cnt_cli2mdt))
+ local cnt_all2all=$((cnt_mdt2ost + cnt_mdt2mdt + cnt_cli2ost + cnt_cli2mdt))
+
+ local var=cnt_$dir
+ local res=${!var}
+
+ echo $res
+}
+
+set_rule()
+{
+ local tgt=$1
+ local net=$2
+ local dir=$3
+ local flavor=$4
+ local cmd="$tgt.srpc.flavor"
+
+ if [ $net == "any" ]; then
+ net="default"
+ fi
+ cmd="$cmd.$net"
+
+ if [ $dir != "any" ]; then
+ cmd="$cmd.$dir"
+ fi
+
+ cmd="$cmd=$flavor"
+ log "Setting sptlrpc rule: $cmd"
+ do_facet mgs "$LCTL conf_param $cmd"
+}
+
+count_flvr()
+{
+ local output=$1
+ local flavor=$2
+ local count=0
+
+ rpc_flvr=`echo $flavor | awk -F - '{ print $1 }'`
+ bulkspec=`echo $flavor | awk -F - '{ print $2 }'`
+
+ count=`echo "$output" | grep "rpc flavor" | grep $rpc_flvr | wc -l`
+
+ if [ "x$bulkspec" != "x" ]; then
+ algs=`echo $bulkspec | awk -F : '{ print $2 }'`
+
+ if [ "x$algs" != "x" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" | grep $algs | wc -l`
+ else
+ bulk=`echo $bulkspec | awk -F : '{ print $1 }'`
+ if [ $bulk == "bulkn" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep "null/null" | wc -l`
+ elif [ $bulk == "bulki" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep "/null" | grep -v "null/" | wc -l`
+ else
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep -v "/null" | grep -v "null/" | wc -l`
+ fi
+ fi
+
+ [ $bulk_count -lt $count ] && count=$bulk_count
+ fi
+
+ echo $count
+}
+
+flvr_cnt_cli2mdt()
+{
+ local flavor=$1
+ local cnt
+
+ local clients=${CLIENTS:-`hostname`}
+
+ for c in ${clients//,/ }; do
+ output=`do_node $c lctl get_param -n mdc.*-MDT*-mdc-*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt
+}
+
+flvr_cnt_cli2ost()
+{
+ local flavor=$1
+ local cnt
+
+ local clients=${CLIENTS:-`hostname`}
+
+ for c in ${clients//,/ }; do
+ output=`do_node $c lctl get_param -n osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt
+}
+
+flvr_cnt_mdt2mdt()
+{
+ local flavor=$1
+ local cnt=0
+
+ if [ $MDSCOUNT -le 1 ]; then
+ echo 0
+ return
+ fi
+
+ for num in `seq $MDSCOUNT`; do
+ output=`do_facet mds$num lctl get_param -n mdc.*-MDT*-mdc[0-9]*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt;
+}
+
+flvr_cnt_mdt2ost()
+{
+ local flavor=$1
+ local cnt=0
+
+ for num in `seq $MDSCOUNT`; do
+ output=`do_facet mds$num lctl get_param -n osc.*OST*-osc-MDT*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt;
+}
+
+flvr_cnt_mgc2mgs()
+{
+ local flavor=$1
+
+ output=`do_facet client lctl get_param -n mgc.*.$PROC_CLI 2>/dev/null`
+ count_flvr "$output" $flavor
+}
+
+do_check_flavor()
+{
+ local dir=$1 # from to
+ local flavor=$2 # flavor expected
+ local res=0
+
+ if [ $dir == "cli2mdt" ]; then
+ res=`flvr_cnt_cli2mdt $flavor`
+ elif [ $dir == "cli2ost" ]; then
+ res=`flvr_cnt_cli2ost $flavor`
+ elif [ $dir == "mdt2mdt" ]; then
+ res=`flvr_cnt_mdt2mdt $flavor`
+ elif [ $dir == "mdt2ost" ]; then
+ res=`flvr_cnt_mdt2ost $flavor`
+ elif [ $dir == "all2ost" ]; then
+ res1=`flvr_cnt_mdt2ost $flavor`
+ res2=`flvr_cnt_cli2ost $flavor`
+ res=$((res1 + res2))
+ elif [ $dir == "all2mdt" ]; then
+ res1=`flvr_cnt_mdt2mdt $flavor`
+ res2=`flvr_cnt_cli2mdt $flavor`
+ res=$((res1 + res2))
+ elif [ $dir == "all2all" ]; then
+ res1=`flvr_cnt_mdt2ost $flavor`
+ res2=`flvr_cnt_cli2ost $flavor`
+ res3=`flvr_cnt_mdt2mdt $flavor`
+ res4=`flvr_cnt_cli2mdt $flavor`
+ res=$((res1 + res2 + res3 + res4))
+ fi
+
+ echo $res
+}
+
+wait_flavor()
+{
+ local dir=$1 # from to
+ local flavor=$2 # flavor expected
+ local expect=${3:-$(calc_connection_cnt $dir)} # number expected
+
+ local res=0
+
+ for ((i=0;i<20;i++)); do
+ echo -n "checking..."
+ res=$(do_check_flavor $dir $flavor)
+ if [ $res -eq $expect ]; then
+ echo "found $res $flavor connections of $dir, OK"
+ return 0
+ else
+ echo "found $res $flavor connections of $dir, not ready ($expect)"
+ sleep 4
+ fi
+ done
+
+ echo "Error checking $flavor of $dir: expect $expect, actual $res"
+ return 1
+}
+
+restore_to_default_flavor()
+{
+ local proc="mgs.MGS.live.$FSNAME"
+
+ echo "restoring to default flavor..."
+
+ nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l`
+
+ # remove all existing rules if any
+ if [ $nrule -ne 0 ]; then
+ echo "$nrule existing rules"
+ for rule in `do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor."`; do
+ echo "remove rule: $rule"
+ spec=`echo $rule | awk -F = '{print $1}'`
+ do_facet mgs "$LCTL conf_param $spec="
+ done
+ fi
+
+ # verify no rules left
+ nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l`
+ [ $nrule -ne 0 ] && error "still $nrule rules left"
+
+ # wait for default flavor to be applied
+ # currently default flavor for all connections are 'null'
+ wait_flavor all2all null
+ echo "now at default flavor settings"
+}
+
+set_flavor_all()
+{
+ local flavor=${1:-null}
+
+ echo "setting all flavor to $flavor"
+
+ # FIXME need parameter to this fn
+ # and remove global vars
+ local cnt_all2all=$(calc_connection_cnt all2all)
+
+ local res=$(do_check_flavor all2all $flavor)
+ if [ $res -eq $cnt_all2all ]; then
+ echo "already have total $res $flavor connections"
+ return
+ fi
+
+ echo "found $res $flavor out of total $cnt_all2all connections"
+ restore_to_default_flavor
+
+ [[ $flavor = null ]] && return 0
+
+ set_rule $FSNAME any any $flavor
+ wait_flavor all2all $flavor
+}
+
+
+check_logdir() {
+ local dir=$1
+ # Checking for shared logdir
+ if [ ! -d $dir ]; then
+ # Not found. Create local logdir
+ mkdir -p $dir
+ else
+ touch $dir/node.$(hostname).yml
+ fi
+ return 0
+}
+
+check_write_access() {
+ local dir=$1
+ for node in $(nodes_list); do
+ if [ ! -f "$dir/node.${node}.yml" ]; then
+ # Logdir not accessible/writable from this node.
+ return 1
+ fi
+ done
+ return 0
+}
+
+init_logging() {
+ if [[ -n $YAML_LOG ]]; then
+ return
+ fi
+ export YAML_LOG=${LOGDIR}/results.yml
+ mkdir -p $LOGDIR
+ init_clients_lists
+
+ do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $LOGDIR
+ if check_write_access $LOGDIR; then
+ touch $LOGDIR/shared
+ echo "Logging to shared log directory: $LOGDIR"
+ else
+ echo "Logging to local directory: $LOGDIR"
+ fi
+
+ yml_nodes_file $LOGDIR
+ yml_results_file >> $YAML_LOG
+}
+
+log_test() {
+ yml_log_test $1 >> $YAML_LOG
+}
+
+log_sub_test_begin() {
+ yml_log_sub_test_begin $@ >> $YAML_LOG
+}
+
+log_sub_test_end() {
+ yml_log_sub_test_end $@ >> $YAML_LOG
+}
+
+run_llverdev()
+{
+ local dev=$1
+ local devname=$(basename $1)
+ local size=$(grep "$devname"$ /proc/partitions | awk '{print $3}')
+ # loop devices aren't in /proc/partitions
+ [ "x$size" == "x" ] && local size=$(ls -l $dev | awk '{print $5}')
+
+ size=$(($size / 1024 / 1024)) # Gb
+
+ local partial_arg=""
+ # Run in partial (fast) mode if the size
+ # of a partition > 10 GB
+ [ $size -gt 10 ] && partial_arg="-p"
+
+ llverdev --force $partial_arg $dev
+}