export GSS_PIPEFS=false
export IDENTITY_UPCALL=default
export QUOTA_AUTO=1
-export JOBSTATS_AUTO=${JOBSTATS_AUTO:-1}
-export JOBID_VAR=${JOBID_VAR:-"procname_uid"}
+# specify environment variable containing batch job name for server statistics
+export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
# LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or
# LOAD_LLOOP is true. LOAD_LLOOP is false by default.
return 0
}
+fs_log_size() {
+ local facet=${1:-$SINGLEMDS}
+ local fstype=$(facet_fstype $facet)
+ local size=0
+ case $fstype in
+ ldiskfs) size=50;; # largest seen is 44, leave some headroom
+ zfs) size=256;;
+ esac
+
+ echo -n $size
+}
+
check_gss_daemon_nodes() {
local list=$1
dname=$2
}
sync_all_data() {
- do_node $(osts_nodes) "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
+ do_nodes $(comma_list $(mdts_nodes)) \
+ "lctl set_param -n osd*.*MDT*.force_sync 1"
+ do_nodes $(comma_list $(osts_nodes)) \
+ "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
grep -v 'Found no match'
}
}
wait_mds_ost_sync () {
- # just because recovery is done doesn't mean we've finished
- # orphan cleanup. Wait for llogs to get synchronized.
- echo "Waiting for orphan cleanup..."
- # MAX value includes time needed for MDS-OST reconnection
- local MAX=$(( TIMEOUT * 2 ))
- local WAIT=0
- while [ $WAIT -lt $MAX ]; do
- local -a sync=($(do_nodes $(comma_list $(mdts_nodes)) \
- "$LCTL get_param -n osp.*osc*.old_sync_processed"))
- local con=1
- local i
- for ((i=0; i<${#sync[@]}; i++)); do
- [ ${sync[$i]} -eq 1 ] && continue
- # there is a not finished MDS-OST synchronization
- con=0
- break;
- done
- sleep 2 # increase waiting time and cover statfs cache
- [ ${con} -eq 1 ] && return 0
- echo "Waiting $WAIT secs for $facet mds-ost sync done."
- WAIT=$((WAIT + 2))
- done
- echo "$facet recovery not done in $MAX sec. $STATUS"
- return 1
+ # just because recovery is done doesn't mean we've finished
+ # orphan cleanup. Wait for llogs to get synchronized.
+ echo "Waiting for orphan cleanup..."
+ # MAX value includes time needed for MDS-OST reconnection
+ local MAX=$(( TIMEOUT * 2 ))
+ local WAIT=0
+ local new_wait=true
+ local list=$(comma_list $(mdts_nodes))
+ local cmd="$LCTL get_param -n osp.*osc*.old_sync_processed"
+ if ! do_facet $SINGLEMDS \
+ "$LCTL list_param osp.*osc*.old_sync_processed 2> /dev/null"
+ then
+ # old way, use mds_sync
+ new_wait=false
+ list=$(comma_list $(osts_nodes))
+ cmd="$LCTL get_param -n obdfilter.*.mds_sync"
+ fi
+ while [ $WAIT -lt $MAX ]; do
+ local -a sync=($(do_nodes $list "$cmd"))
+ local con=1
+ local i
+ for ((i=0; i<${#sync[@]}; i++)); do
+ if $new_wait; then
+ [ ${sync[$i]} -eq 1 ] && continue
+ else
+ [ ${sync[$i]} -eq 0 ] && continue
+ fi
+ # there is a not finished MDS-OST synchronization
+ con=0
+ break;
+ done
+ sleep 2 # increase waiting time and cover statfs cache
+ [ ${con} -eq 1 ] && return 0
+ echo "Waiting $WAIT secs for $facet mds-ost sync done."
+ WAIT=$((WAIT + 2))
+ done
+ echo "$facet recovery not done in $MAX sec. $STATUS"
+ return 1
}
wait_destroy_complete () {
}
fail() {
- facet_failover $* || error "failover: $?"
- clients_up || error "post-failover df: $?"
+ local facets=$1
+ local clients=${CLIENTS:-$HOSTNAME}
+
+ facet_failover $* || error "failover: $?"
+ wait_clients_import_state "$clients" "$facets" FULL
+ clients_up || error "post-failover df: $?"
}
fail_nodf() {
fi
}
+# Get the passive failover partner host of facet.
+facet_passive_host() {
+ local facet=$1
+ [[ $facet = client ]] && return
+
+ local host=${facet}_HOST
+ local failover_host=${facet}failover_HOST
+ local active_host=$(facet_active_host $facet)
+
+ [[ -z ${!failover_host} || ${!failover_host} = ${!host} ]] && return
+
+ if [[ $active_host = ${!host} ]]; then
+ echo -n ${!failover_host}
+ else
+ echo -n ${!host}
+ fi
+}
+
change_active() {
local facetlist=$1
local facet
if [ $fstype == ldiskfs ]; then
fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"}
+ if [ ! -z $EJOURNAL ]; then
+ fs_mkfs_opts+=${MDSJOURNALSIZE:+" device=$EJOURNAL"}
+ fi
fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"}
fi
fi
local dev=$2
stop ${facet} -f
- rm -f ${facet}active
+ rm -f $TMP/${facet}active
do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || return 1
return 0
}
osc_ensure_active $SINGLEMDS $TIMEOUT
osc_ensure_active client $TIMEOUT
- local jobid_var
- if [ -z "$(lctl get_param -n mdc.*.connect_flags | grep jobstats)" ]; then
- jobid_var="none"
- elif [ $JOBSTATS_AUTO -ne 0 ]; then
- echo "enable jobstats, set job scheduler as $JOBID_VAR"
- jobid_var=$JOBID_VAR
- else
- jobid_var=`$LCTL get_param -n jobid_var`
- if [ $jobid_var != "disable" ]; then
- echo "disable jobstats as required"
- jobid_var="disable"
- else
- jobid_var="none"
- fi
- fi
+ if [ -n "$(lctl get_param -n mdc.*.connect_flags|grep jobstats)" ]; then
+ local current_jobid_var=$($LCTL get_param -n jobid_var)
- if [ $jobid_var == $JOBID_VAR -o $jobid_var == "disable" ]; then
- do_facet mgs $LCTL conf_param $FSNAME.sys.jobid_var=$jobid_var
- wait_update $HOSTNAME "$LCTL get_param -n jobid_var" \
- $jobid_var || return 1
+ if [ $JOBID_VAR = "existing" ]; then
+ echo "keeping jobstats as $current_jobid_var"
+ elif [ $current_jobid_var != $JOBID_VAR ]; then
+ echo "seting jobstats to $JOBID_VAR"
+
+ set_conf_param_and_check client \
+ "$LCTL get_param -n jobid_var" \
+ "$FSNAME.sys.jobid_var" $JOBID_VAR
+ fi
+ else
+ echo "jobstats not supported by server"
fi
if [ $QUOTA_AUTO -ne 0 ]; then
}
is_empty_dir() {
- [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0
- return 1
+ [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0
+ return 1
}
# empty lustre filesystem may have empty directories lost+found and .lustre
is_empty_fs() {
- [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \
- -print | wc -l) = 1 ] || return 1
- [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found && return 0
- [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre && return 0
- return 1
+ [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \
+ -print | wc -l) = 1 ] || return 1
+ [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found || return 1
+ [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre || return 1
+ return 0
}
check_and_setup_lustre() {
set_default_debug_nodes $(comma_list $(nodes_list))
fi
+ if [ -n "$OSD_TRACK_DECLARES_LBUG" ] ; then
+ do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
+ "$LCTL set_param osd-*.track_declares_assert=1" \
+ > /dev/null
+ fi
+
init_gss
if $GSS; then
set_flavor_all $SEC
done
}
+# Run lfsck on server node if lfsck can't be found on client (LU-2571)
+run_lfsck_remote() {
+ local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT"
+ local client=$1
+ local mounted=true
+ local rc=0
+
+ #Check if lustre is already mounted
+ do_rpc_nodes $client is_mounted $MOUNT || mounted=false
+ if ! $mounted; then
+ zconf_mount $client $MOUNT ||
+ error "failed to mount Lustre on $client"
+ fi
+ #Run lfsck
+ echo $cmd
+ do_node $node $cmd || rc=$?
+ #Umount if necessary
+ if ! $mounted; then
+ zconf_umount $client $MOUNT ||
+ error "failed to unmount Lustre on $client"
+ fi
+
+ [ $rc -le $FSCK_MAX_ERR ] ||
+ error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
+ echo "lfsck finished with rc=$rc"
+
+ return $rc
+}
+
run_lfsck() {
- local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT"
- echo $cmd
- local rc=0
- eval $cmd || rc=$?
- [ $rc -le $FSCK_MAX_ERR ] || \
- error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
- echo "lfsck finished with rc=$rc"
+ local facets="client $SINGLEMDS"
+ local found=false
+ local facet
+ local node
+ local rc=0
- rm -rvf $MDSDB* $OSTDB* || true
- return 0
+ for facet in $facets; do
+ node=$(facet_active_host $facet)
+ if check_progs_installed $node $LFSCK_BIN; then
+ found=true
+ break
+ fi
+ done
+ ! $found && error "None of \"$facets\" supports lfsck"
+
+ run_lfsck_remote $node || rc=$?
+
+ rm -rvf $MDSDB* $OSTDB* || true
+ return $rc
}
check_and_cleanup_lustre() {
}
drop_update_reply() {
-# OBD_FAIL_UPDATE_OBJ_NET
+# OBD_FAIL_UPDATE_OBJ_NET_REP
local index=$1
shift 1
RC=0
- do_facet mds${index} lctl set_param fail_loc=0x1500
+ do_facet mds${index} lctl set_param fail_loc=0x1701
do_facet client "$@" || RC=$?
do_facet mds${index} lctl set_param fail_loc=0
return $RC
return $?
fi
LAST_SKIPPED="y"
- echo -n "."
return 0
fi
$(single_local_node $(comma_list $(nodes_list)))
}
-mdts_nodes () {
- local MDSNODES
- local NODES_sort
- for num in `seq $MDSCOUNT`; do
- MDSNODES="$MDSNODES $(facet_host mds$num)"
- done
- NODES_sort=$(for i in $MDSNODES; do echo $i; done | sort -u)
-
- echo $NODES_sort
-}
-
remote_servers () {
remote_ost && remote_mds
}
+# Get the active nodes for facets.
facets_nodes () {
- local facets=$1
- local nodes
- local NODES_sort
+ local facets=$1
+ local facet
+ local nodes
+ local nodes_sort
+ local i
- for facet in ${facets//,/ }; do
- if [ "$FAILURE_MODE" = HARD ]; then
- nodes="$nodes $(facet_active_host $facet)"
- else
- nodes="$nodes $(facet_host $facet)"
- fi
- done
- NODES_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ for facet in ${facets//,/ }; do
+ nodes="$nodes $(facet_active_host $facet)"
+ done
- echo $NODES_sort
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
-osts_nodes () {
- local facets=$(get_facets OST)
- local nodes=$(facets_nodes $facets)
+# Get all of the active MDS nodes.
+mdts_nodes () {
+ echo -n $(facets_nodes $(get_facets MDS))
+}
- echo $nodes
+# Get all of the active OSS nodes.
+osts_nodes () {
+ echo -n $(facets_nodes $(get_facets OST))
}
+# Get all of the client nodes and active server nodes.
nodes_list () {
- # FIXME. We need a list of clients
- local myNODES=$HOSTNAME
- local myNODES_sort
-
- # CLIENTS (if specified) contains the local client
- [ -n "$CLIENTS" ] && myNODES=${CLIENTS//,/ }
+ local nodes=$HOSTNAME
+ local nodes_sort
+ local i
- if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
- myNODES="$myNODES $(facets_nodes $(get_facets))"
- fi
+ # CLIENTS (if specified) contains the local client
+ [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ }
- myNODES_sort=$(for i in $myNODES; do echo $i; done | sort -u)
+ if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
+ nodes="$nodes $(facets_nodes $(get_facets))"
+ fi
- echo $myNODES_sort
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
+# Get all of the remote client nodes and remote active server nodes.
remote_nodes_list () {
- echo $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g"
+ echo -n $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g"
+}
+
+# Get all of the MDS nodes, including active and passive nodes.
+all_mdts_nodes () {
+ local host
+ local failover_host
+ local nodes
+ local nodes_sort
+ local i
+
+ for i in $(seq $MDSCOUNT); do
+ host=mds${i}_HOST
+ failover_host=mds${i}failover_HOST
+ nodes="$nodes ${!host} ${!failover_host}"
+ done
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the OSS nodes, including active and passive nodes.
+all_osts_nodes () {
+ local host
+ local failover_host
+ local nodes
+ local nodes_sort
+ local i
+
+ for i in $(seq $OSTCOUNT); do
+ host=ost${i}_HOST
+ failover_host=ost${i}failover_HOST
+ nodes="$nodes ${!host} ${!failover_host}"
+ done
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the server nodes, including active and passive nodes.
+all_server_nodes () {
+ local nodes
+ local nodes_sort
+ local i
+
+ nodes="$mgs_HOST $mgsfailover_HOST $(all_mdts_nodes) $(all_osts_nodes)"
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the client and server nodes, including active and passive nodes.
+all_nodes () {
+ local nodes=$HOSTNAME
+ local nodes_sort
+ local i
+
+ # CLIENTS (if specified) contains the local client
+ [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ }
+
+ if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
+ nodes="$nodes $(all_server_nodes)"
+ fi
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
init_clients_lists () {
$LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum
}
-# save_lustre_params(node, parameter_mask)
-# generate a stream of formatted strings (<node> <param name>=<param value>)
+# save_lustre_params(comma separated facet list, parameter_mask)
+# generate a stream of formatted strings (<facet> <param name>=<param value>)
save_lustre_params() {
- local s
- do_nodesv $1 "lctl get_param $2 | while read s; do echo \\\$s; done"
+ local facets=$1
+ local facet
+ local nodes
+ local node
+
+ for facet in ${facets//,/ }; do
+ node=$(facet_active_host $facet)
+ [[ *\ $node\ * = " $nodes " ]] && continue
+ nodes="$nodes $node"
+
+ do_node $node "$LCTL get_param $2 |
+ while read s; do echo $facet \\\$s; done"
+ done
}
# restore lustre parameters from input stream, produces by save_lustre_params
restore_lustre_params() {
- local node
- local name
- local val
- while IFS=" =" read node name val; do
- do_node ${node//:/} "lctl set_param -n $name $val"
- done
+ local facet
+ local name
+ local val
+
+ while IFS=" =" read facet name val; do
+ do_facet $facet "$LCTL set_param -n $name $val"
+ done
}
check_catastrophe() {
[ -z "$rnodes" ] && return 0
- do_nodes "$rnodes" "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0);
+ local data
+ data=$(do_nodes "$rnodes" "rc=\\\$([ -f $C ] &&
+ echo \\\$(< $C) || echo 0);
if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi
- exit \\\$rc;"
+ exit \\\$rc")
+ local rc=$?
+ if [ -n "$data" ]; then
+ echo $data
+ return $rc
+ fi
+ return 0
}
# CMD: determine mds index where directory inode presents
}
get_clientosc_proc_path() {
- echo "${1}-osc-[^M]*"
+ echo "${1}-osc-*"
}
get_lustre_version () {
local CONN_STATE
local i=0
- CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2)
+ CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq)
while [ "${CONN_STATE}" != "${expected}" ]; do
if [ "${expected}" == "DISCONN" ]; then
# for disconn we can check after proc entry is removed
error "can't put import for $CONN_PROC into ${expected} state after $i sec, have ${CONN_STATE}" && \
return 1
sleep 1
- CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2)
+ # Add uniq for multi-mount case
+ CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq)
i=$(($i + 1))
done
done
}
+wait_import_state_mount() {
+ if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
+ return 0
+ fi
+
+ wait_import_state $*
+}
+
# One client request could be timed out because server was not ready
# when request was sent by client.
# The request timeout calculation details :
echo $(( init_connect_timeout + at_min ))
}
-wait_osc_import_state() {
+_wait_osc_import_state() {
local facet=$1
local ost_facet=$2
local expected=$3
local ost=$(get_osc_import_name $facet $ost_facet)
- local param="osc.${ost}.ost_server_uuid"
+ local param="osc.${ost}.ost_server_uuid"
+ local i=0
# 1. wait the deadline of client 1st request (it could be skipped)
# 2. wait the deadline of client 2nd request
local maxtime=$(( 2 * $(request_timeout $facet)))
- if ! do_rpc_nodes "$(facet_host $facet)" \
- _wait_import_state $expected $param $maxtime; then
+ #During setup time, the osc might not be setup, it need wait
+ #until list_param can return valid value. And also if there
+ #are mulitple osc entries we should list all of them before
+ #go to wait.
+ local params=$($LCTL list_param $param 2>/dev/null || true)
+ while [ -z "$params" ]; do
+ if [ $i -ge $maxtime ]; then
+ echo "can't get $param by list_param in $maxtime secs"
+ if [[ $facet != client* ]]; then
+ echo "Go with $param directly"
+ params=$param
+ break
+ else
+ return 1
+ fi
+ fi
+ sleep 1
+ i=$((i + 1))
+ params=$($LCTL list_param $param 2>/dev/null || true)
+ done
+
+ if ! do_rpc_nodes "$(facet_active_host $facet)" \
+ wait_import_state $expected "$params" $maxtime; then
error "import is not in ${expected} state"
return 1
fi
return 0
}
+wait_osc_import_state() {
+ local facet=$1
+ local ost_facet=$2
+ local expected=$3
+ local num
+
+ if [[ $facet = mds ]]; then
+ for num in $(seq $MDSCOUNT); do
+ _wait_osc_import_state mds$num "$ost_facet" "$expected"
+ done
+ else
+ _wait_osc_import_state "$facet" "$ost_facet" "$expected"
+ fi
+}
+
get_clientmdc_proc_path() {
echo "${1}-mdc-*"
}
[ -z "$list" ] && return 0
# Add paths to lustre tests for 32 and 64 bit systems.
- local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH"
+ local LIBPATH="/usr/lib/lustre/tests:/usr/lib64/lustre/tests:"
+ local TESTPATH="$RLUSTRE/tests:"
+ local RPATH="PATH=${TESTPATH}${LIBPATH}${PATH}:/sbin:/bin:/usr/sbin:"
do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ "
}
local params=$(expand_list $params $proc_path)
done
- if ! do_rpc_nodes "$list" wait_import_state $expected $params; then
+ if ! do_rpc_nodes "$list" wait_import_state_mount $expected $params; then
error "import is not in ${expected} state"
return 1
fi
llverfs $partial_arg $llverfs_opts $dir
}
+#Remove objects from OST
+remove_ost_objects() {
+ shift
+ local ostdev=$1
+ local group=$2
+ shift 2
+ local objids="$@"
+ local facet=ost$((OSTIDX + 1))
+ local mntpt=$(facet_mntpt $facet)
+ local opts=$OST_MOUNT_OPTS
+ local i
+ local rc
+
+ echo "removing objects from $ostdev on $facet: $objids"
+ if ! do_facet $facet test -b $ostdev; then
+ opts=$(csa_add "$opts" -o loop)
+ fi
+ mount -t $(facet_fstype $facet) $opts $ostdev $mntpt ||
+ return $?
+ rc=0;
+ for i in $objids; do
+ rm $mntpt/O/$group/d$((i % 32))/$i || { rc=$?; break; }
+ done
+ umount -f $mntpt || return $?
+ return $rc
+}
+
+#Remove files from MDT
remove_mdt_files() {
local facet=$1
local mdtdev=$2
reformat_external_journal || return 5
# step 8: reformat dev
echo "reformat new device"
- add ${SINGLEMDS} $(mkfs_opts ${SINGLEMDS}) --backfstype ldiskfs \
- --reformat $devname > /dev/null || return 6
+ add ${SINGLEMDS} $(mkfs_opts ${SINGLEMDS} ${devname}) --backfstype \
+ ldiskfs --reformat ${devname} $(mdsvdevname 1) > /dev/null ||
+ exit 6
# step 9: mount dev
${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 7
# step 10: restore metadata