export ZFS=${ZFS:-zfs}
export ZPOOL=${ZPOOL:-zpool}
export ZDB=${ZDB:-zdb}
+ export PARTPROBE=${PARTPROBE:-partprobe}
#[ -d /r ] && export ROOT=${ROOT:-/r}
export TMP=${TMP:-$ROOT/tmp}
fi
export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
+ export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
# command line
grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; }
grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; }
grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; }
+ load_module lfsck/lfsck
[ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
modprobe zfs
local size=0
case $fstype in
ldiskfs) size=50;; # largest seen is 44, leave some headroom
- zfs) size=256;;
+ zfs) size=400;; # largest seen is 384
esac
echo -n $size
}
#
+# Get the device of a facet.
+#
+facet_device() {
+ local facet=$1
+ local device
+
+ case $facet in
+ mgs) device=$(mgsdevname) ;;
+ mds*) device=$(mdsdevname $(facet_number $facet)) ;;
+ ost*) device=$(ostdevname $(facet_number $facet)) ;;
+ fs2mds) device=$(mdsdevname 1_2) ;;
+ fs2ost) device=$(ostdevname 1_2) ;;
+ fs3ost) device=$(ostdevname 2_2) ;;
+ *) ;;
+ esac
+
+ echo -n $device
+}
+
+#
+# Get the virtual device of a facet.
+#
+facet_vdevice() {
+ local facet=$1
+ local device
+
+ case $facet in
+ mgs) device=$(mgsvdevname) ;;
+ mds*) device=$(mdsvdevname $(facet_number $facet)) ;;
+ ost*) device=$(ostvdevname $(facet_number $facet)) ;;
+ fs2mds) device=$(mdsvdevname 1_2) ;;
+ fs2ost) device=$(ostvdevname 1_2) ;;
+ fs3ost) device=$(ostvdevname 2_2) ;;
+ *) ;;
+ esac
+
+ echo -n $device
+}
+
+#
+# Re-read the partition table on failover partner host.
+# After a ZFS storage pool is created on a shared device, the partition table
+# on the device may change. However, the operating system on the failover
+# host may not notice the change automatically. Without the up-to-date partition
+# block devices, 'zpool import ..' cannot find the labels, whose positions are
+# relative to partition rather than disk beginnings.
+#
+# This function performs partprobe on the failover host to make it re-read the
+# partition table.
+#
+refresh_partition_table() {
+ local facet=$1
+ local device=$2
+ local host
+
+ host=$(facet_passive_host $facet)
+ if [[ -n "$host" ]]; then
+ do_node $host "$PARTPROBE $device"
+ fi
+}
+
+#
+# Get ZFS storage pool name.
+#
+zpool_name() {
+ local facet=$1
+ local device
+ local poolname
+
+ device=$(facet_device $facet)
+ # poolname is string before "/"
+ poolname="${device%%/*}"
+
+ echo -n $poolname
+}
+
+#
+# Export ZFS storage pool.
+# Before exporting the pool, all datasets within the pool should be unmounted.
+#
+export_zpool() {
+ local facet=$1
+ shift
+ local opts="$@"
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ grep -q ^$poolname/ /proc/mounts ||
+ $ZPOOL export $opts $poolname"
+ fi
+}
+
+#
+# Import ZFS storage pool.
+# Force importing, even if the pool appears to be potentially active.
+#
+import_zpool() {
+ local facet=$1
+ shift
+ local opts=${@:-"-o cachefile=none"}
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ opts+=" -d $(dirname $(facet_vdevice $facet))"
+ do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL import -f $opts $poolname"
+ fi
+}
+
+#
+# Set the "cachefile=none" property on ZFS storage pool so that the pool
+# is not automatically imported on system startup.
+#
+# In a failover environment, this will provide resource level fencing which
+# will ensure that the same ZFS storage pool will not be imported concurrently
+# on different nodes.
+#
+disable_zpool_cache() {
+ local facet=$1
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ do_facet $facet "$ZPOOL set cachefile=none $poolname"
+ fi
+}
+
+#
# This and set_osd_param() shall be used to access OSD parameters
# once existed under "obdfilter":
#
opts=$(csa_add "$opts" -o loop)
fi
+ if [[ $(facet_fstype $facet) == zfs ]]; then
+ # import ZFS storage pool
+ import_zpool $facet || return ${PIPESTATUS[0]}
+ fi
+
echo "Starting ${facet}: $opts ${!dev} $mntpt"
# for testing LU-482 error handling in mount_facets() and test_0a()
if [ -f $TMP/test-lu482-trigger ]; then
return $RC
}
-#
-# When a ZFS OSD is made read-only by replay_barrier(), its pool is "freezed".
-# Because stopping corresponding target may not clear this in-memory state, we
-# need to zap the pool from memory by exporting and reimporting the pool.
-#
-# Although the uberblocks are not updated when a pool is freezed, transactions
-# are still written to the disks. Modified blocks may be cached in memory when
-# tests try reading them back. The export-and-reimport process also evicts any
-# cached pool data from memory to provide the correct "data loss" semantics.
-#
-refresh_disk() {
- local facet=$1
- local fstype=$(facet_fstype $facet)
- local _dev
- local dev
- local poolname
-
- if [ "${fstype}" == "zfs" ]; then
- _dev=$(facet_active $facet)_dev
- dev=${!_dev} # expand _dev to its value, e.g. ${mds1_dev}
- poolname="${dev%%/*}" # poolname is string before "/"
-
- if [ "${poolname}" == "" ]; then
- echo "invalid dataset name: $dev"
- return
- fi
- do_facet $facet "cp /etc/zfs/zpool.cache /tmp/zpool.cache.back"
- do_facet $facet "$ZPOOL export ${poolname}"
- do_facet $facet "$ZPOOL import -f -c /tmp/zpool.cache.back \
- ${poolname}"
- fi
-}
-
stop() {
local running
local facet=$1
do_facet ${facet} umount -d $@ $mntpt
fi
- # umount should block, but we should wait for unrelated obd's
- # like the MGS or MGC to also stop.
- wait_exit_ST ${facet}
+ # umount should block, but we should wait for unrelated obd's
+ # like the MGS or MGC to also stop.
+ wait_exit_ST ${facet} || return ${PIPESTATUS[0]}
+
+ if [[ $(facet_fstype $facet) == zfs ]]; then
+ # export ZFS storage pool
+ export_zpool $facet
+ fi
}
# save quota version (both administrative and operational quotas)
if [ "$FAILURE_MODE" = HARD ]; then
reboot_node $(facet_active_host $facet)
else
- refresh_disk ${facet}
sleep 10
fi
}
TESTNAME=$TESTNAME \
DBENCH_LIB=$DBENCH_LIB \
DBENCH_SRC=$DBENCH_SRC \
+CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
LFS=$LFS \
run_${load}.sh" &
local ppid=$!
}
wait_update () {
- local node=$1
- local TEST=$2
- local FINAL=$3
- local MAX=${4:-90}
-
- local RESULT
- local WAIT=0
- local sleep=1
- local print=10
- while [ true ]; do
- RESULT=$(do_node $node "$TEST")
- if [ "$RESULT" == "$FINAL" ]; then
- [ -z "$RESULT" -o $WAIT -le $sleep ] ||
- echo "Updated after ${WAIT}s: wanted '$FINAL' got '$RESULT'"
- return 0
- fi
- [ $WAIT -ge $MAX ] && break
- [ $((WAIT % print)) -eq 0 ] &&
- echo "Waiting $((MAX - WAIT)) secs for update"
- WAIT=$((WAIT + sleep))
- sleep $sleep
- done
- echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'"
- return 3
+ local verbose=false
+ if [[ "$1" == "--verbose" ]]; then
+ shift
+ verbose=true
+ fi
+
+ local node=$1
+ local TEST=$2
+ local FINAL=$3
+ local MAX=${4:-90}
+ local RESULT
+ local PREV_RESULT
+ local WAIT=0
+ local sleep=1
+ local print=10
+
+ while [ true ]; do
+ RESULT=$(do_node $node "$TEST")
+ if [[ "$RESULT" == "$FINAL" ]]; then
+ [[ -z "$RESULT" || $WAIT -le $sleep ]] ||
+ echo "Updated after ${WAIT}s: wanted '$FINAL'"\
+ "got '$RESULT'"
+ return 0
+ fi
+ if [[ $verbose && "$RESULT" != "$PREV_RESULT" ]]; then
+ echo "Changed after ${WAIT}s: from '$PREV_RESULT'"\
+ "to '$RESULT'"
+ PREV_RESULT=$RESULT
+ fi
+ [[ $WAIT -ge $MAX ]] && break
+ [[ $((WAIT % print)) -eq 0 ]] &&
+ echo "Waiting $((MAX - WAIT)) secs for update"
+ WAIT=$((WAIT + sleep))
+ sleep $sleep
+ done
+ echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'"
+ return 3
}
wait_update_facet() {
}
sync_all_data() {
- do_node $(osts_nodes) "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
+ do_nodes $(comma_list $(mdts_nodes)) \
+ "lctl set_param -n osd*.*MDT*.force_sync 1"
+ do_nodes $(comma_list $(osts_nodes)) \
+ "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
grep -v 'Found no match'
}
do_facet $facet "sync; sync; sync"
df $MOUNT
- # make sure there will be no seq change
+ # make sure there will be no seq change
local clients=${CLIENTS:-$HOSTNAME}
local f=fsa-\\\$\(hostname\)
do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f"
local svc=${facet}_svc
do_facet $facet $LCTL --device ${!svc} notransno
+ #
+ # If a ZFS OSD is made read-only here, its pool is "freezed". This
+ # in-memory state has to be cleared by either rebooting the host or
+ # exporting and reimporting the pool.
+ #
+ # Although the uberblocks are not updated when a pool is freezed,
+ # transactions are still written to the disks. Modified blocks may be
+ # cached in memory when tests try reading them back. The
+ # export-and-reimport process also evicts any cached pool data from
+ # memory to provide the correct "data loss" semantics.
+ #
+ # In the test framework, the exporting and importing operations are
+ # handled by stop() and mount_facet() separately, which are used
+ # inside fail() and fail_abort().
+ #
do_facet $facet $LCTL --device ${!svc} readonly
do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
$LCTL mark "local REPLAY BARRIER on ${!svc}"
fail_abort() {
local facet=$1
stop $facet
- refresh_disk ${facet}
change_active $facet
wait_for_facet $facet
mount_facet $facet -o abort_recovery
fi
}
+# Get the passive failover partner host of facet.
+facet_passive_host() {
+ local facet=$1
+ [[ $facet = client ]] && return
+
+ local host=${facet}_HOST
+ local failover_host=${facet}failover_HOST
+ local active_host=$(facet_active_host $facet)
+
+ [[ -z ${!failover_host} || ${!failover_host} = ${!host} ]] && return
+
+ if [[ $active_host = ${!host} ]]; then
+ echo -n ${!failover_host}
+ else
+ echo -n ${!host}
+ fi
+}
+
change_active() {
local facetlist=$1
local facet
}
add() {
- local facet=$1
- shift
- # make sure its not already running
- stop ${facet} -f
- rm -f $TMP/${facet}active
- [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
- do_facet ${facet} $MKFS $*
+ local facet=$1
+ shift
+ # make sure its not already running
+ stop ${facet} -f
+ rm -f $TMP/${facet}active
+ [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
+ do_facet ${facet} $MKFS $* || return ${PIPESTATUS[0]}
+
+ if [[ $(facet_fstype $facet) == zfs ]]; then
+ #
+ # After formatting a ZFS target, "cachefile=none" property will
+ # be set on the ZFS storage pool so that the pool is not
+ # automatically imported on system startup. And then the pool
+ # will be exported so as to leave the importing and exporting
+ # operations handled by mount_facet() and stop() separately.
+ #
+ refresh_partition_table $facet $(facet_vdevice $facet)
+ disable_zpool_cache $facet
+ export_zpool $facet
+ fi
}
ostdevname() {
local dev=$2
stop ${facet} -f
- rm -f ${facet}active
+ rm -f $TMP/${facet}active
do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || return 1
return 0
}
# empty lustre filesystem may have empty directories lost+found and .lustre
is_empty_fs() {
+ # exclude .lustre & lost+found
[ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \
-print | wc -l) = 1 ] || return 1
[ ! -d $1/lost+found ] || is_empty_dir $1/lost+found || return 1
- [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre || return 1
+ if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.0) ]; then
+ # exclude .lustre/fid (LU-2780)
+ [ $(find $1/.lustre -maxdepth 1 -name fid -prune -o \
+ -print | wc -l) = 1 ] || return 1
+ else
+ [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre || return 1
+ fi
return 0
}
set_default_debug_nodes $(comma_list $(nodes_list))
fi
+ if [ $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ] ; then
+ local facets="$(get_facets OST),$(get_facets MDS),mgs"
+ local nodes="$(facets_hosts ${facets})"
+ if [ -n "$nodes" ] ; then
+ do_nodes $nodes "$LCTL set_param \
+ osd-ldiskfs.track_declares_assert=1 || true"
+ fi
+ fi
+
init_gss
if $GSS; then
set_flavor_all $SEC
##################################
error_noexit() {
- local TYPE=${TYPE:-"FAIL"}
+ local TYPE=${TYPE:-"FAIL"}
- local dump=true
- # do not dump logs if $1=false
- if [ "x$1" = "xfalse" ]; then
- shift
- dump=false
- fi
+ local dump=true
+ # do not dump logs if $1=false
+ if [ "x$1" = "xfalse" ]; then
+ shift
+ dump=false
+ fi
- log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
- log_trace_dump
- mkdir -p $LOGDIR
- # We need to dump the logs on all nodes
- if $dump; then
- gather_logs $(comma_list $(nodes_list))
- fi
+ log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
+ log_trace_dump
+
+ mkdir -p $LOGDIR
+ # We need to dump the logs on all nodes
+ if $dump; then
+ gather_logs $(comma_list $(nodes_list))
+ fi
debugrestore
[ "$TESTSUITELOG" ] &&
echo "$TESTSUITE: $TYPE: $TESTNAME $@" >> $TESTSUITELOG
- echo "$@" > $LOGDIR/err
+ if [ -z "$*" ]; then
+ echo "error() without useful message, please fix" > $LOGDIR/err
+ else
+ echo "$@" > $LOGDIR/err
+ fi
}
exit_status () {
}
error() {
- error_noexit "$@"
- exit 1
+ error_noexit "$@"
+ exit 1
}
error_exit() {
- error "$@"
+ error "$@"
}
# use only if we are ignoring failures for this test, bugno required.
# (like ALWAYS_EXCEPT, but run the test and ignore the results.)
# e.g. error_ignore 5494 "your message"
error_ignore() {
- local TYPE="IGNORE (bz$1)"
- shift
- error_noexit "$@"
+ local TYPE="IGNORE (bz$1)"
+ shift
+ error_noexit "$@"
}
error_and_remount() {
}
skip_env () {
- $FAIL_ON_SKIP_ENV && error false $@ || skip $@
+ $FAIL_ON_SKIP_ENV && error false $@ || skip $@
}
skip() {
$(single_local_node $(comma_list $(nodes_list)))
}
-mdts_nodes () {
- local MDSNODES
- local NODES_sort
- for num in `seq $MDSCOUNT`; do
- MDSNODES="$MDSNODES $(facet_host mds$num)"
- done
- NODES_sort=$(for i in $MDSNODES; do echo $i; done | sort -u)
-
- echo $NODES_sort
-}
-
remote_servers () {
remote_ost && remote_mds
}
+# Get the active nodes for facets.
facets_nodes () {
- local facets=$1
- local nodes
- local NODES_sort
+ local facets=$1
+ local facet
+ local nodes
+ local nodes_sort
+ local i
- for facet in ${facets//,/ }; do
- if [ "$FAILURE_MODE" = HARD ]; then
- nodes="$nodes $(facet_active_host $facet)"
- else
- nodes="$nodes $(facet_host $facet)"
- fi
- done
- NODES_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ for facet in ${facets//,/ }; do
+ nodes="$nodes $(facet_active_host $facet)"
+ done
- echo $NODES_sort
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
-osts_nodes () {
- local facets=$(get_facets OST)
- local nodes=$(facets_nodes $facets)
+# Get all of the active MDS nodes.
+mdts_nodes () {
+ echo -n $(facets_nodes $(get_facets MDS))
+}
- echo $nodes
+# Get all of the active OSS nodes.
+osts_nodes () {
+ echo -n $(facets_nodes $(get_facets OST))
}
+# Get all of the client nodes and active server nodes.
nodes_list () {
- # FIXME. We need a list of clients
- local myNODES=$HOSTNAME
- local myNODES_sort
-
- # CLIENTS (if specified) contains the local client
- [ -n "$CLIENTS" ] && myNODES=${CLIENTS//,/ }
+ local nodes=$HOSTNAME
+ local nodes_sort
+ local i
- if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
- myNODES="$myNODES $(facets_nodes $(get_facets))"
- fi
+ # CLIENTS (if specified) contains the local client
+ [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ }
- myNODES_sort=$(for i in $myNODES; do echo $i; done | sort -u)
+ if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
+ nodes="$nodes $(facets_nodes $(get_facets))"
+ fi
- echo $myNODES_sort
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
+# Get all of the remote client nodes and remote active server nodes.
remote_nodes_list () {
- echo $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g"
+ echo -n $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g"
+}
+
+# Get all of the MDS nodes, including active and passive nodes.
+all_mdts_nodes () {
+ local host
+ local failover_host
+ local nodes
+ local nodes_sort
+ local i
+
+ for i in $(seq $MDSCOUNT); do
+ host=mds${i}_HOST
+ failover_host=mds${i}failover_HOST
+ nodes="$nodes ${!host} ${!failover_host}"
+ done
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the OSS nodes, including active and passive nodes.
+all_osts_nodes () {
+ local host
+ local failover_host
+ local nodes
+ local nodes_sort
+ local i
+
+ for i in $(seq $OSTCOUNT); do
+ host=ost${i}_HOST
+ failover_host=ost${i}failover_HOST
+ nodes="$nodes ${!host} ${!failover_host}"
+ done
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the server nodes, including active and passive nodes.
+all_server_nodes () {
+ local nodes
+ local nodes_sort
+ local i
+
+ nodes="$mgs_HOST $mgsfailover_HOST $(all_mdts_nodes) $(all_osts_nodes)"
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the client and server nodes, including active and passive nodes.
+all_nodes () {
+ local nodes=$HOSTNAME
+ local nodes_sort
+ local i
+
+ # CLIENTS (if specified) contains the local client
+ [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ }
+
+ if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
+ nodes="$nodes $(all_server_nodes)"
+ fi
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
init_clients_lists () {
}
get_stripe () {
- local file=$1/stripe
- touch $file
- $LFS getstripe -v $file || error
- rm -f $file
+ local file=$1/stripe
+
+ touch $file
+ $LFS getstripe -v $file || error "getstripe $file failed"
+ rm -f $file
}
setstripe_nfsserver () {
llverfs $partial_arg $llverfs_opts $dir
}
+#Remove objects from OST
+remove_ost_objects() {
+ local facet=$1
+ local ostdev=$2
+ local group=$3
+ shift 3
+ local objids="$@"
+ local mntpt=$(facet_mntpt $facet)
+ local opts=$OST_MOUNT_OPTS
+ local i
+ local rc
+
+ echo "removing objects from $ostdev on $facet: $objids"
+ if ! test -b $ostdev; then
+ opts=$(csa_add "$opts" -o loop)
+ fi
+ mount -t $(facet_fstype $facet) $opts $ostdev $mntpt ||
+ return $?
+ rc=0
+ for i in $objids; do
+ rm $mntpt/O/$group/d$((i % 32))/$i || { rc=$?; break; }
+ done
+ umount -f $mntpt || return $?
+ return $rc
+}
+
+#Remove files from MDT
remove_mdt_files() {
local facet=$1
local mdtdev=$2
fi
mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt ||
return $?
- rc=0;
+ rc=0
for f in $files; do
rm $mntpt/ROOT/$f || { rc=$?; break; }
done
local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
- do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 | grep -q large_xattr"
+ do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 |
+ grep -E -q '(ea_inode|large_xattr)'"
return ${PIPESTATUS[0]}
}
echo "$TESTLOG_PREFIX.$TESTNAME.$logname.$(hostname -s).log"
}
-# mkdir directory on different MDTs
+# make directory on different MDTs
test_mkdir() {
local option
local parent
local child
local path
- local dir
local rc=0
- if [ $# -eq 2 ]; then
- option=$1
- path=$2
- else
- path=$1
- fi
-
- child=${path##*/}
- parent=${path%/*}
+ case $# in
+ 1) path=$1;;
+ 2) option=$1
+ path=$2;;
+ *) error "Only creating single directory is supported";;
+ esac
- if [ "$parent" == "$child" ]; then
- parent=$(pwd)
- fi
+ child=$(basename $path)
+ parent=$(dirname $path)
- if [ "$option" == "-p" -a -d ${parent}/${child} ]; then
+ if [ "$option" == "-p" -a -d $parent/$child ]; then
return $rc
fi
- # it needs to check whether there is further / in child
- dir=$(echo $child | awk -F '/' '{print $2}')
- if [ ! -z "$dir" ]; then
- local subparent=$(echo $child | awk -F '/' '{ print $1 }')
- parent=${parent}"/"${subparent}
- child=$dir
- fi
-
if [ ! -d ${parent} ]; then
if [ "$option" == "-p" ]; then
mkdir -p ${parent}
fi
if [ $MDSCOUNT -le 1 ]; then
- mkdir $option ${parent}/${child} || rc=$?
+ mkdir $option $parent/$child || rc=$?
else
local mdt_idx=$($LFS getstripe -M $parent)
+ local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
if [ "$mdt_idx" -ne 0 ]; then
- mkdir $option ${parent}/${child} || rc=$?
- return $rc
+ mkdir $option $parent/$child || rc=$?
+ else
+ mdt_idx=$((test_num % MDSCOUNT))
+ echo "mkdir $mdt_idx for $parent/$child"
+ $LFS setdirstripe -i $mdt_idx $parent/$child || rc=$?
fi
-
- local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
- local mdt_idx=$((test_num % MDSCOUNT))
- echo "mkdir $mdt_idx for ${parent}/${child}"
- $LFS setdirstripe -i $mdt_idx ${parent}/${child} || rc=$?
fi
return $rc
}