X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=afc2488d339a2a46d956e6ec880958486125e909;hp=41bdd078739f399549051c404d5c9b9702598fda;hb=890f223c5f8f92b086d757ff545f45ea165b7a91;hpb=b0f08a4e902411a8fbdb660e5b32c5821ca7159f;ds=sidebyside diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index 41bdd07..afc2488 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -15,8 +15,8 @@ export GSS_KRB5=false export GSS_PIPEFS=false export IDENTITY_UPCALL=default export QUOTA_AUTO=1 -export JOBSTATS_AUTO=${JOBSTATS_AUTO:-1} -export JOBID_VAR=${JOBID_VAR:-"procname_uid"} +# specify environment variable containing batch job name for server statistics +export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable" # LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or # LOAD_LLOOP is true. LOAD_LLOOP is false by default. @@ -118,10 +118,11 @@ print_summary () { } init_test_env() { - export LUSTRE=`absolute_path $LUSTRE` - export TESTSUITE=`basename $0 .sh` - export TEST_FAILED=false - export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} + export LUSTRE=$(absolute_path $LUSTRE) + export TESTSUITE=$(basename $0 .sh) + export TEST_FAILED=false + export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} + export RPC_MODE=${RPC_MODE:-false} export MKE2FS=$MKE2FS if [ -z "$MKE2FS" ]; then @@ -312,10 +313,12 @@ init_test_env() { shift $((OPTIND - 1)) ONLY=${ONLY:-$*} - # print the durations of each test if "true" - DDETAILS=${DDETAILS:-false} - [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true - rm -f $TMP/*active + # print the durations of each test if "true" + DDETAILS=${DDETAILS:-false} + [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true + if ! $RPC_MODE; then + rm -f $TMP/*active + fi } check_cpt_number() { @@ -582,6 +585,18 @@ unload_modules() { return 0 } +fs_log_size() { + local facet=${1:-$SINGLEMDS} + local fstype=$(facet_fstype $facet) + local size=0 + case $fstype in + ldiskfs) size=50;; # largest seen is 44, leave some headroom + zfs) size=256;; + esac + + echo -n $size +} + check_gss_daemon_nodes() { local list=$1 dname=$2 @@ -732,6 +747,11 @@ facet_fstype() { return fi + if [[ $facet == mgs ]] && combined_mgs_mds; then + facet_fstype mds1 + return + fi + return 1 } @@ -788,7 +808,7 @@ ostdevlabel() { } # -# This and set_obdfilter_param() shall be used to access OSD parameters +# This and set_osd_param() shall be used to access OSD parameters # once existed under "obdfilter": # # mntdev @@ -796,7 +816,7 @@ ostdevlabel() { # read_cache_enable # writethrough_cache_enable # -get_obdfilter_param() { +get_osd_param() { local nodes=$1 local device=${2:-$FSNAME-OST*} local name=$3 @@ -805,7 +825,7 @@ get_obdfilter_param() { osd-*.$device.$name 2>&1" | grep -v 'Found no match' } -set_obdfilter_param() { +set_osd_param() { local nodes=$1 local device=${2:-$FSNAME-OST*} local name=$3 @@ -1773,7 +1793,10 @@ wait_update_facet() { } sync_all_data() { - do_node $(osts_nodes) "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 | + do_nodes $(comma_list $(mdts_nodes)) \ + "lctl set_param -n osd*.*MDT*.force_sync 1" + do_nodes $(comma_list $(osts_nodes)) \ + "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 | grep -v 'Found no match' } @@ -1889,30 +1912,44 @@ wait_recovery_complete () { } wait_mds_ost_sync () { - # just because recovery is done doesn't mean we've finished - # orphan cleanup. Wait for llogs to get synchronized. - echo "Waiting for orphan cleanup..." - # MAX value includes time needed for MDS-OST reconnection - local MAX=$(( TIMEOUT * 2 )) - local WAIT=0 - while [ $WAIT -lt $MAX ]; do - local -a sync=($(do_nodes $(comma_list $(osts_nodes)) \ - "$LCTL get_param -n obdfilter.*.mds_sync")) - local con=1 - local i - for ((i=0; i<${#sync[@]}; i++)); do - [ ${sync[$i]} -eq 0 ] && continue - # there is a not finished MDS-OST synchronization - con=0 - break; - done - sleep 2 # increase waiting time and cover statfs cache - [ ${con} -eq 1 ] && return 0 - echo "Waiting $WAIT secs for $facet mds-ost sync done." - WAIT=$((WAIT + 2)) - done - echo "$facet recovery not done in $MAX sec. $STATUS" - return 1 + # just because recovery is done doesn't mean we've finished + # orphan cleanup. Wait for llogs to get synchronized. + echo "Waiting for orphan cleanup..." + # MAX value includes time needed for MDS-OST reconnection + local MAX=$(( TIMEOUT * 2 )) + local WAIT=0 + local new_wait=true + local list=$(comma_list $(mdts_nodes)) + local cmd="$LCTL get_param -n osp.*osc*.old_sync_processed" + if ! do_facet $SINGLEMDS \ + "$LCTL list_param osp.*osc*.old_sync_processed 2> /dev/null" + then + # old way, use mds_sync + new_wait=false + list=$(comma_list $(osts_nodes)) + cmd="$LCTL get_param -n obdfilter.*.mds_sync" + fi + while [ $WAIT -lt $MAX ]; do + local -a sync=($(do_nodes $list "$cmd")) + local con=1 + local i + for ((i=0; i<${#sync[@]}; i++)); do + if $new_wait; then + [ ${sync[$i]} -eq 1 ] && continue + else + [ ${sync[$i]} -eq 0 ] && continue + fi + # there is a not finished MDS-OST synchronization + con=0 + break; + done + sleep 2 # increase waiting time and cover statfs cache + [ ${con} -eq 1 ] && return 0 + echo "Waiting $WAIT secs for $facet mds-ost sync done." + WAIT=$((WAIT + 2)) + done + echo "$facet recovery not done in $MAX sec. $STATUS" + return 1 } wait_destroy_complete () { @@ -2159,8 +2196,12 @@ ost_evict_client() { } fail() { - facet_failover $* || error "failover: $?" - clients_up || error "post-failover df: $?" + local facets=$1 + local clients=${CLIENTS:-$HOSTNAME} + + facet_failover $* || error "failover: $?" + wait_clients_import_state "$clients" "$facets" FULL + clients_up || error "post-failover df: $?" } fail_nodf() { @@ -2394,6 +2435,24 @@ facet_active_host() { fi } +# Get the passive failover partner host of facet. +facet_passive_host() { + local facet=$1 + [[ $facet = client ]] && return + + local host=${facet}_HOST + local failover_host=${facet}failover_HOST + local active_host=$(facet_active_host $facet) + + [[ -z ${!failover_host} || ${!failover_host} = ${!host} ]] && return + + if [[ $active_host = ${!host} ]]; then + echo -n ${!failover_host} + else + echo -n ${!host} + fi +} + change_active() { local facetlist=$1 local facet @@ -2599,9 +2658,8 @@ ostdevname() { #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};; zfs ) - #try $OSTDEVn then $OSTDEVBASE + num then zfs default - local foo=${OSTDEVBASE:-${FSNAME}-ost${num}/ost}${num} - eval DEVPTR=${!DEVNAME:=$foo};; + #dataset name is independent of vdev device names + eval DEVPTR=${FSNAME}-ost${num}/ost${num};; * ) error "unknown fstype!";; esac @@ -2640,9 +2698,8 @@ mdsdevname() { #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num eval DEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};; zfs ) - # try $MDSDEVn then $MDSDEVBASE + num then zfs default - local foo=${MDSDEVBASE:-${FSNAME}-mdt${num}/mdt}${num} - eval DEVPTR=${!DEVNAME:=$foo};; + #dataset name is independent of vdev device names + eval DEVPTR=${FSNAME}-mdt${num}/mdt${num};; * ) error "unknown fstype!";; esac @@ -2671,37 +2728,50 @@ mdsvdevname() { } mgsdevname() { - local DEVNAME=MGSDEV - local MDSDEV1=$(mdsdevname 1) - - local fstype=$(facet_fstype mds1) + local DEVPTR + local fstype=$(facet_fstype mgs) case $fstype in - ldiskfs|zfs ) - #if $MGSDEV isn't defined, default is $MDSDEV1 - #ZFS independent mgsdev should be ${FSNAME}-mgs/mgs - eval DEVPTR=${!DEVNAME:=${MDSDEV1}};; - * ) - error "unknown fstype!";; + ldiskfs ) + if [ $(facet_host mgs) = $(facet_host mds1) ] && + ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsdevname 1) ] ); then + DEVPTR=$(mdsdevname 1) + else + DEVPTR=$MGSDEV + fi;; + zfs ) + if [ $(facet_host mgs) = $(facet_host mds1) ] && + ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsvdevname 1) ] ); then + DEVPTR=$(mdsdevname 1) + else + DEVPTR=${FSNAME}-mgs/mgs + fi;; + * ) + error "unknown fstype!";; esac - echo -n $DEVPTR + echo -n $DEVPTR } mgsvdevname() { + local VDEVPTR DEVNAME=MGSDEV - local fstype=$(facet_fstype mds1) + local fstype=$(facet_fstype mgs) case $fstype in - ldiskfs ) - # vdevs are not supported by ldiskfs - eval VDEVPTR="";; - zfs ) - #if $MGSDEV isn't defined, default is $MGSDEV1 - eval VDEVPTR=${!DEVNAME:=${MDSDEV1}};; - * ) - error "unknown fstype!";; + ldiskfs ) + # vdevs are not supported by ldiskfs + ;; + zfs ) + if [ $(facet_host mgs) = $(facet_host mds1) ] && + ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsvdevname 1) ] ); then + VDEVPTR=$(mdsvdevname 1) + else + VDEVPTR=$MGSDEV + fi;; + * ) + error "unknown fstype!";; esac echo -n $VDEVPTR @@ -2834,6 +2904,9 @@ mkfs_opts() { if [ $fstype == ldiskfs ]; then fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"} + if [ ! -z $EJOURNAL ]; then + fs_mkfs_opts+=${MDSJOURNALSIZE:+" device=$EJOURNAL"} + fi fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"} fi fi @@ -2954,7 +3027,7 @@ writeconf_facet() { local dev=$2 stop ${facet} -f - rm -f ${facet}active + rm -f $TMP/${facet}active do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || return 1 return 0 } @@ -3136,18 +3209,15 @@ init_facets_vars () { if ! remote_mds_nodsh; then for num in $(seq $MDSCOUNT); do DEVNAME=`mdsdevname $num` - eval export MDSDEV${num}=$DEVNAME init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS done fi - eval export MGSDEV=$(mgsdevname) combined_mgs_mds || init_facet_vars mgs $(mgsdevname) $MGS_MOUNT_OPTS if ! remote_ost_nodsh; then for num in $(seq $OSTCOUNT); do DEVNAME=$(ostdevname $num) - eval export OSTDEV${num}=$DEVNAME init_facet_vars ost$num $DEVNAME $OST_MOUNT_OPTS done fi @@ -3200,26 +3270,20 @@ init_param_vars () { osc_ensure_active $SINGLEMDS $TIMEOUT osc_ensure_active client $TIMEOUT - local jobid_var - if [ -z "$(lctl get_param -n mdc.*.connect_flags | grep jobstats)" ]; then - jobid_var="none" - elif [ $JOBSTATS_AUTO -ne 0 ]; then - echo "enable jobstats, set job scheduler as $JOBID_VAR" - jobid_var=$JOBID_VAR - else - jobid_var=`$LCTL get_param -n jobid_var` - if [ $jobid_var != "disable" ]; then - echo "disable jobstats as required" - jobid_var="disable" - else - jobid_var="none" - fi - fi + if [ -n "$(lctl get_param -n mdc.*.connect_flags|grep jobstats)" ]; then + local current_jobid_var=$($LCTL get_param -n jobid_var) + + if [ $JOBID_VAR = "existing" ]; then + echo "keeping jobstats as $current_jobid_var" + elif [ $current_jobid_var != $JOBID_VAR ]; then + echo "seting jobstats to $JOBID_VAR" - if [ $jobid_var == $JOBID_VAR -o $jobid_var == "disable" ]; then - do_facet mgs $LCTL conf_param $FSNAME.sys.jobid_var=$jobid_var - wait_update $HOSTNAME "$LCTL get_param -n jobid_var" \ - $jobid_var || return 1 + set_conf_param_and_check client \ + "$LCTL get_param -n jobid_var" \ + "$FSNAME.sys.jobid_var" $JOBID_VAR + fi + else + echo "jobstats not supported by server" fi if [ $QUOTA_AUTO -ne 0 ]; then @@ -3323,17 +3387,17 @@ is_mounted () { } is_empty_dir() { - [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0 - return 1 + [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0 + return 1 } # empty lustre filesystem may have empty directories lost+found and .lustre is_empty_fs() { - [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \ - -print | wc -l) = 1 ] || return 1 - [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found && return 0 - [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre && return 0 - return 1 + [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \ + -print | wc -l) = 1 ] || return 1 + [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found || return 1 + [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre || return 1 + return 0 } check_and_setup_lustre() { @@ -3394,6 +3458,12 @@ check_and_setup_lustre() { set_default_debug_nodes $(comma_list $(nodes_list)) fi + if [ -n "$OSD_TRACK_DECLARES_LBUG" ] ; then + do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \ + "$LCTL set_param osd-*.track_declares_assert=1" \ + > /dev/null + fi + init_gss if $GSS; then set_flavor_all $SEC @@ -3437,7 +3507,7 @@ get_mnt_devs() { local dev if [ "$type" == ost ]; then - devs=$(get_obdfilter_param $node "" mntdev) + devs=$(get_osd_param $node "" mntdev) else devs=$(do_node $node \ "lctl get_param -n osd-*.$FSNAME-M*.mntdev") @@ -3524,17 +3594,55 @@ generate_db() { done } +# Run lfsck on server node if lfsck can't be found on client (LU-2571) +run_lfsck_remote() { + local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT" + local client=$1 + local mounted=true + local rc=0 + + #Check if lustre is already mounted + do_rpc_nodes $client is_mounted $MOUNT || mounted=false + if ! $mounted; then + zconf_mount $client $MOUNT || + error "failed to mount Lustre on $client" + fi + #Run lfsck + echo $cmd + do_node $node $cmd || rc=$? + #Umount if necessary + if ! $mounted; then + zconf_umount $client $MOUNT || + error "failed to unmount Lustre on $client" + fi + + [ $rc -le $FSCK_MAX_ERR ] || + error "$cmd returned $rc, should be <= $FSCK_MAX_ERR" + echo "lfsck finished with rc=$rc" + + return $rc +} + run_lfsck() { - local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT" - echo $cmd - local rc=0 - eval $cmd || rc=$? - [ $rc -le $FSCK_MAX_ERR ] || \ - error "$cmd returned $rc, should be <= $FSCK_MAX_ERR" - echo "lfsck finished with rc=$rc" + local facets="client $SINGLEMDS" + local found=false + local facet + local node + local rc=0 - rm -rvf $MDSDB* $OSTDB* || true - return 0 + for facet in $facets; do + node=$(facet_active_host $facet) + if check_progs_installed $node $LFSCK_BIN; then + found=true + break + fi + done + ! $found && error "None of \"$facets\" supports lfsck" + + run_lfsck_remote $node || rc=$? + + rm -rvf $MDSDB* $OSTDB* || true + return $rc } check_and_cleanup_lustre() { @@ -3776,11 +3884,11 @@ drop_reint_reply() { } drop_update_reply() { -# OBD_FAIL_MDS_OBJ_UPDATE_NET +# OBD_FAIL_UPDATE_OBJ_NET_REP local index=$1 shift 1 RC=0 - do_facet mds${index} lctl set_param fail_loc=0x188 + do_facet mds${index} lctl set_param fail_loc=0x1701 do_facet client "$@" || RC=$? do_facet mds${index} lctl set_param fail_loc=0 return $RC @@ -4065,7 +4173,6 @@ run_test() { return $? fi LAST_SKIPPED="y" - echo -n "." return 0 fi @@ -4206,7 +4313,10 @@ run_one() { reset_fail_loc check_grant ${testnum} || error "check_grant $testnum failed with $?" check_catastrophe || error "LBUG/LASSERT detected" - ps auxww | grep -v grep | grep -q multiop && error "multiop still running" + if [ "$PARALLEL" != "yes" ]; then + ps auxww | grep -v grep | grep -q multiop && + error "multiop still running" + fi unset TESTNAME unset tdir umask $SAVE_UMASK @@ -4439,64 +4549,121 @@ local_mode () $(single_local_node $(comma_list $(nodes_list))) } -mdts_nodes () { - local MDSNODES - local NODES_sort - for num in `seq $MDSCOUNT`; do - MDSNODES="$MDSNODES $(facet_host mds$num)" - done - NODES_sort=$(for i in $MDSNODES; do echo $i; done | sort -u) - - echo $NODES_sort -} - remote_servers () { remote_ost && remote_mds } +# Get the active nodes for facets. facets_nodes () { - local facets=$1 - local nodes - local NODES_sort + local facets=$1 + local facet + local nodes + local nodes_sort + local i - for facet in ${facets//,/ }; do - if [ "$FAILURE_MODE" = HARD ]; then - nodes="$nodes $(facet_active_host $facet)" - else - nodes="$nodes $(facet_host $facet)" - fi - done - NODES_sort=$(for i in $nodes; do echo $i; done | sort -u) + for facet in ${facets//,/ }; do + nodes="$nodes $(facet_active_host $facet)" + done - echo $NODES_sort + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort } -osts_nodes () { - local facets=$(get_facets OST) - local nodes=$(facets_nodes $facets) +# Get all of the active MDS nodes. +mdts_nodes () { + echo -n $(facets_nodes $(get_facets MDS)) +} - echo $nodes +# Get all of the active OSS nodes. +osts_nodes () { + echo -n $(facets_nodes $(get_facets OST)) } +# Get all of the client nodes and active server nodes. nodes_list () { - # FIXME. We need a list of clients - local myNODES=$HOSTNAME - local myNODES_sort - - # CLIENTS (if specified) contains the local client - [ -n "$CLIENTS" ] && myNODES=${CLIENTS//,/ } + local nodes=$HOSTNAME + local nodes_sort + local i - if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then - myNODES="$myNODES $(facets_nodes $(get_facets))" - fi + # CLIENTS (if specified) contains the local client + [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ } - myNODES_sort=$(for i in $myNODES; do echo $i; done | sort -u) + if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then + nodes="$nodes $(facets_nodes $(get_facets))" + fi - echo $myNODES_sort + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort } +# Get all of the remote client nodes and remote active server nodes. remote_nodes_list () { - echo $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g" + echo -n $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g" +} + +# Get all of the MDS nodes, including active and passive nodes. +all_mdts_nodes () { + local host + local failover_host + local nodes + local nodes_sort + local i + + for i in $(seq $MDSCOUNT); do + host=mds${i}_HOST + failover_host=mds${i}failover_HOST + nodes="$nodes ${!host} ${!failover_host}" + done + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort +} + +# Get all of the OSS nodes, including active and passive nodes. +all_osts_nodes () { + local host + local failover_host + local nodes + local nodes_sort + local i + + for i in $(seq $OSTCOUNT); do + host=ost${i}_HOST + failover_host=ost${i}failover_HOST + nodes="$nodes ${!host} ${!failover_host}" + done + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort +} + +# Get all of the server nodes, including active and passive nodes. +all_server_nodes () { + local nodes + local nodes_sort + local i + + nodes="$mgs_HOST $mgsfailover_HOST $(all_mdts_nodes) $(all_osts_nodes)" + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort +} + +# Get all of the client and server nodes, including active and passive nodes. +all_nodes () { + local nodes=$HOSTNAME + local nodes_sort + local i + + # CLIENTS (if specified) contains the local client + [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ } + + if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then + nodes="$nodes $(all_server_nodes)" + fi + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort } init_clients_lists () { @@ -4797,21 +4964,33 @@ calc_osc_kbytes () { $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum } -# save_lustre_params(node, parameter_mask) -# generate a stream of formatted strings ( =) +# save_lustre_params(comma separated facet list, parameter_mask) +# generate a stream of formatted strings ( =) save_lustre_params() { - local s - do_nodesv $1 "lctl get_param $2 | while read s; do echo \\\$s; done" + local facets=$1 + local facet + local nodes + local node + + for facet in ${facets//,/ }; do + node=$(facet_active_host $facet) + [[ *\ $node\ * = " $nodes " ]] && continue + nodes="$nodes $node" + + do_node $node "$LCTL get_param $2 | + while read s; do echo $facet \\\$s; done" + done } # restore lustre parameters from input stream, produces by save_lustre_params restore_lustre_params() { - local node - local name - local val - while IFS=" =" read node name val; do - do_node ${node//:/} "lctl set_param -n $name $val" - done + local facet + local name + local val + + while IFS=" =" read facet name val; do + do_facet $facet "$LCTL set_param -n $name $val" + done } check_catastrophe() { @@ -4821,9 +5000,17 @@ check_catastrophe() { [ -z "$rnodes" ] && return 0 - do_nodes "$rnodes" "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0); + local data + data=$(do_nodes "$rnodes" "rc=\\\$([ -f $C ] && + echo \\\$(< $C) || echo 0); if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi - exit \\\$rc;" + exit \\\$rc") + local rc=$? + if [ -n "$data" ]; then + echo $data + return $rc + fi + return 0 } # CMD: determine mds index where directory inode presents @@ -4854,10 +5041,11 @@ get_mds_dir () { } mdsrate_cleanup () { - if [ -d $4 ]; then - mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6 - rmdir $4 - fi + if [ -d $4 ]; then + mpi_run -np $1 ${MACHINEFILE_OPTION} $2 ${MDSRATE} --unlink \ + --nfiles $3 --dir $4 --filefmt $5 $6 + rmdir $4 + fi } delayed_recovery_enabled () { @@ -4884,7 +5072,7 @@ convert_facet2label() { } get_clientosc_proc_path() { - echo "${1}-osc-[^M]*" + echo "${1}-osc-*" } get_lustre_version () { @@ -4948,7 +5136,7 @@ _wait_import_state () { local CONN_STATE local i=0 - CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq) while [ "${CONN_STATE}" != "${expected}" ]; do if [ "${expected}" == "DISCONN" ]; then # for disconn we can check after proc entry is removed @@ -4961,7 +5149,8 @@ _wait_import_state () { error "can't put import for $CONN_PROC into ${expected} state after $i sec, have ${CONN_STATE}" && \ return 1 sleep 1 - CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + # Add uniq for multi-mount case + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq) i=$(($i + 1)) done @@ -4980,6 +5169,14 @@ wait_import_state() { done } +wait_import_state_mount() { + if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then + return 0 + fi + + wait_import_state $* +} + # One client request could be timed out because server was not ready # when request was sent by client. # The request timeout calculation details : @@ -5020,20 +5217,42 @@ request_timeout () { echo $(( init_connect_timeout + at_min )) } -wait_osc_import_state() { +_wait_osc_import_state() { local facet=$1 local ost_facet=$2 local expected=$3 local ost=$(get_osc_import_name $facet $ost_facet) - local param="osc.${ost}.ost_server_uuid" + local param="osc.${ost}.ost_server_uuid" + local i=0 # 1. wait the deadline of client 1st request (it could be skipped) # 2. wait the deadline of client 2nd request local maxtime=$(( 2 * $(request_timeout $facet))) - if ! do_rpc_nodes "$(facet_host $facet)" \ - _wait_import_state $expected $param $maxtime; then + #During setup time, the osc might not be setup, it need wait + #until list_param can return valid value. And also if there + #are mulitple osc entries we should list all of them before + #go to wait. + local params=$($LCTL list_param $param 2>/dev/null || true) + while [ -z "$params" ]; do + if [ $i -ge $maxtime ]; then + echo "can't get $param by list_param in $maxtime secs" + if [[ $facet != client* ]]; then + echo "Go with $param directly" + params=$param + break + else + return 1 + fi + fi + sleep 1 + i=$((i + 1)) + params=$($LCTL list_param $param 2>/dev/null || true) + done + + if ! do_rpc_nodes "$(facet_active_host $facet)" \ + wait_import_state $expected "$params" $maxtime; then error "import is not in ${expected} state" return 1 fi @@ -5041,6 +5260,21 @@ wait_osc_import_state() { return 0 } +wait_osc_import_state() { + local facet=$1 + local ost_facet=$2 + local expected=$3 + local num + + if [[ $facet = mds ]]; then + for num in $(seq $MDSCOUNT); do + _wait_osc_import_state mds$num "$ost_facet" "$expected" + done + else + _wait_osc_import_state "$facet" "$ost_facet" "$expected" + fi +} + get_clientmdc_proc_path() { echo "${1}-mdc-*" } @@ -5052,7 +5286,9 @@ do_rpc_nodes () { [ -z "$list" ] && return 0 # Add paths to lustre tests for 32 and 64 bit systems. - local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH" + local LIBPATH="/usr/lib/lustre/tests:/usr/lib64/lustre/tests:" + local TESTPATH="$RLUSTRE/tests:" + local RPATH="PATH=${TESTPATH}${LIBPATH}${PATH}:/sbin:/bin:/usr/sbin:" do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ " } @@ -5078,7 +5314,7 @@ wait_clients_import_state () { local params=$(expand_list $params $proc_path) done - if ! do_rpc_nodes "$list" wait_import_state $expected $params; then + if ! do_rpc_nodes "$list" wait_import_state_mount $expected $params; then error "import is not in ${expected} state" return 1 fi @@ -5681,6 +5917,34 @@ run_llverfs() llverfs $partial_arg $llverfs_opts $dir } +#Remove objects from OST +remove_ost_objects() { + shift + local ostdev=$1 + local group=$2 + shift 2 + local objids="$@" + local facet=ost$((OSTIDX + 1)) + local mntpt=$(facet_mntpt $facet) + local opts=$OST_MOUNT_OPTS + local i + local rc + + echo "removing objects from $ostdev on $facet: $objids" + if ! do_facet $facet test -b $ostdev; then + opts=$(csa_add "$opts" -o loop) + fi + mount -t $(facet_fstype $facet) $opts $ostdev $mntpt || + return $? + rc=0; + for i in $objids; do + rm $mntpt/O/$group/d$((i % 32))/$i || { rc=$?; break; } + done + umount -f $mntpt || return $? + return $rc +} + +#Remove files from MDT remove_mdt_files() { local facet=$1 local mdtdev=$2 @@ -5691,7 +5955,7 @@ remove_mdt_files() { echo "removing files from $mdtdev on $facet: $files" if [ $(facet_fstype $facet) == ldiskfs ] && - ! do_facet $facet test -b ${!dev}; then + ! do_facet $facet test -b $mdtdev; then opts=$(csa_add "$opts" -o loop) fi mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt || @@ -5715,7 +5979,7 @@ duplicate_mdt_files() { echo "duplicating files on $mdtdev on $facet: $files" mkdir -p $mntpt || return $? if [ $(facet_fstype $facet) == ldiskfs ] && - ! do_facet $facet test -b ${!dev}; then + ! do_facet $facet test -b $mdtdev; then opts=$(csa_add "$opts" -o loop) fi mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt || @@ -5857,6 +6121,7 @@ mds_backup_restore() { local metadata=${TMP}/backup_restore.tgz local opts=${MDS_MOUNT_OPTS} local svc=${SINGLEMDS}_svc + local igif=$1 if ! ${rcmd} test -b ${devname}; then opts=$(csa_add "$opts" -o loop) @@ -5870,6 +6135,10 @@ mds_backup_restore() { ${rcmd} rm -f $metaea $metadata # step 3: mount dev ${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 1 + if [ ! -z $igif ]; then + # step 3.5: rm .lustre + ${rcmd} rm -rf $mntpt/ROOT/.lustre || return 1 + fi # step 4: backup metaea echo "backup EA" ${rcmd} "cd $mntpt && getfattr -R -d -m '.*' -P . > $metaea && cd -" || @@ -5883,8 +6152,9 @@ mds_backup_restore() { reformat_external_journal || return 5 # step 8: reformat dev echo "reformat new device" - add ${SINGLEMDS} $(mkfs_opts ${SINGLEMDS}) --backfstype ldiskfs \ - --reformat $devname > /dev/null || return 6 + add ${SINGLEMDS} $(mkfs_opts ${SINGLEMDS} ${devname}) --backfstype \ + ldiskfs --reformat ${devname} $(mdsvdevname 1) > /dev/null || + exit 6 # step 9: mount dev ${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 7 # step 10: restore metadata