ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-2828
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64"
+# bug number for skipped test:
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
is_sles11() # LU-2181
run_test 1 "start up ost twice (should return errors)"
test_2() {
- start_mdt 1 || error "MDT0 start fail"
+ start_mds || error "MDT start failed"
echo "start mds second time.."
- start_mdt 1 && error "2nd MDT start should fail"
+ start_mds && error "2nd MDT start should fail"
start_ost || error "OST start failed"
mount_client $MOUNT || error "mount_client failed to start client"
check_mount || error "check_mount failed"
# the MDS must remain up until last MDT
stop_mds
MDS=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
- awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -n1)
+ awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
[ -z "$MDS" ] && error "No MDT"
cleanup_fs2
cleanup_nocli || error "cleanup_nocli failed with rc $?"
# Check 3.
# prepare a non-symlink parameter in the OSD
- name="lma_self_repair"
+ name="auto_scrub"
param="$device.osd.$name"
cmd="$LCTL get_param -n osd-*.$device.$name"
done
echo "waiting for dev on $node: dev $devices loop $loops given up"
do_rpc_nodes $node "losetup -a"
- do_rpc_nodes $node "$LCTL devices_list"
+ do_rpc_nodes $node "$LCTL device_list"
return 1
}
local img_bspace
local img_ispace
local fsname=t32fs
- local nid=$($r $LCTL list_nids | head -n1)
+ local nid=$($r $LCTL list_nids | head -1)
local mopts
local uuid
local nrpcs_orig
echo " Commit: $img_commit"
echo " Kernel: $img_kernel"
echo " Arch: $img_arch"
- echo "OST version: $(get_lustre_version ost1)"
+ echo "OST version: $(lustre_build_version ost1)"
# The conversion can be made only when both of the following
# conditions are satisfied:
"(Need MGS version at least 2.3.59)"; return 0; }
local osthost=$(facet_active_host ost1)
- local ostnid=$(do_node $osthost $LCTL list_nids |
- head -n1)
+ local ostnid=$(do_node $osthost $LCTL list_nids | head -1)
mopts=nosvc
if [ $fstype == "ldiskfs" ]; then
log "Set up a fake failnode for the MDS"
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
- awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -n1)
+ awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
do_facet mgs "$LCTL conf_param \
${device}.failover.node=$(h2$NETTYPE $FAKENID)" ||
error "Setting ${device}.failover.node=\
log "Set up a fake failnode for the MDS"
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
- awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -n1)
+ awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
do_facet mgs "$LCTL conf_param \
${device}.failover.node=$(h2$NETTYPE $FAKENID)" ||
error "Set ${device}.failover.node=\
local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
- start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
+ start_mdt 1 -o nosvc -n
+ if [ $MDSCOUNT -ge 2 ]; then
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
+ fi
start ost1 $(ostdevname 1) $OST_MOUNT_OPTS
- start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
+ start_mdt 1 -o nomgs,force
mount_client $MOUNT || error "mount_client $MOUNT failed"
sleep 5
reformat
local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
- start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
+ start_mdt 1 -o nosvc -n
+ if [ $MDSCOUNT -ge 2 ]; then
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
+ fi
start_ost || error "Unable to start OST1"
- start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
+ start_mdt 1 -o nomgs,force
mount_client $MOUNT || error "mount_client $MOUNT failed"
sleep 5
{ skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; }
cleanup
- # MDT concurent start
+ # MDT concurrent start
#define OBD_FAIL_TGT_DELAY_CONNECT 0x703
do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x703"
start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
echo "2nd MDT start succeed"
else
stop mds1 -f
- error "unexpected concurent MDT mounts result, rc=$rc rc2=$rc2"
+ error "unexpected concurrent MDT mounts result, rc=$rc rc2=$rc2"
+ fi
+
+ if [ $MDSCOUNT -ge 2 ]; then
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
fi
- # OST concurent start
+ # OST concurrent start
+
#define OBD_FAIL_TGT_DELAY_CONNECT 0x703
do_facet ost1 "$LCTL set_param fail_loc=0x703"
start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
echo "1st OST start failed with EALREADY"
echo "2nd OST start succeed"
else
- stop mds1 -f
+ stop_mds -f
stop ost1 -f
- error "unexpected concurent OST mounts result, rc=$rc rc2=$rc2"
+ error "unexpected concurrent OST mounts result, rc=$rc rc2=$rc2"
fi
# cleanup
- stop mds1 -f
+ stop_mds
stop ost1 -f
# verify everything ok
start_mds
if [ $? != 0 ]
then
- stop mds1 -f
+ stop_mds
error "MDT(s) start failed"
fi
start_ost
if [ $? != 0 ]
then
- stop mds1 -f
+ stop_mds
stop ost1 -f
error "OST(s) start failed"
fi
mount_client $MOUNT
if [ $? != 0 ]
then
- stop mds1 -f
+ stop_mds
stop ost1 -f
error "client start failed"
fi
check_mount
if [ $? != 0 ]
then
- stop mds1 -f
+ stop_mds
stop ost1 -f
error "client mount failed"
fi
cleanup
}
-run_test 41c "concurent mounts of MDT/OST should all fail but one"
+run_test 41c "concurrent mounts of MDT/OST should all fail but one"
test_42() { #bug 14693
setup
cleanup_48() {
trap 0
- # reformat after this test is needed - if test will failed
+ # reformat after this test is needed - if the test fails,
# we will have unkillable file at FS
reformat_and_config
}
[ $? -eq 0 ] || { error "Unable to move objects"; return 14; }
# recover objects dry-run
- echo "ll_recover_lost_found_objs dry_run"
- do_node $ost1node "ll_recover_lost_found_objs -n -d $ost1mnt/O" ||
- error "ll_recover_lost_found_objs failed"
+ if [ $(lustre_version_code ost1) -ge $(version_code 2.5.56) ]; then
+ echo "ll_recover_lost_found_objs dry_run"
+ do_node $ost1node \
+ "ll_recover_lost_found_objs -n -d $ost1mnt/O" ||
+ error "ll_recover_lost_found_objs failed"
+ fi
# recover objects
echo "ll_recover_lost_found_objs fix run"
}
run_test 52 "check recovering objects from lost+found"
-thread_param_get() {
- local facet=$1
- local pbase=$2
- local param=$3
-
- do_facet $facet "lctl get_param -n $pbase.$param" || echo 0
-}
-
-cleanup_thread_sanity() {
- trap 0
- cleanup
-}
-
# Checks threads_min/max/started for some service
#
# Arguments: service name (OST or MDT), facet (e.g., ost1, $SINGLEMDS), and a
# parameter pattern prefix like 'ost.*.ost'.
thread_sanity() {
- local modname=$1
- local facet=$2
- local ppat=$3
- local opts=$4
+ local modname=$1
+ local facet=$2
+ local parampat=$3
+ local opts=$4
local basethr=$5
- local tmin
- local tmin2
- local tmax
- local tmax2
- local tstarted
- local tstarted2
- local pname
- local pbase
- local msg="Insane $modname thread counts"
- local ncpts
+ local tmin
+ local tmin2
+ local tmax
+ local tmax2
+ local tstarted
+ local paramp
+ local msg="Insane $modname thread counts"
+ local ncpts=$(check_cpt_number $facet)
local nthrs
- shift 4
-
- setup
- check_mount || { error "filesystem is not mounted"; return 40; }
- trap cleanup_thread_sanity EXIT
+ shift 4
- ncpts=$(check_cpt_number $facet)
+ check_mount || return 41
- # We need to expand $ppat, but it may match multiple parameters,
- # so we'll pick the first one
- pname=$(do_facet $facet "$LCTL get_param -N $ppat.threads_min" |
- head -n1)
- [ -n "$pname" ] || { error "Can't expand $ppat.threads_min"; return 20;}
+ # We need to expand $parampat, but it may match multiple parameters, so
+ # we'll pick the first one
+ if ! paramp=$(do_facet $facet "lctl get_param -N ${parampat}.threads_min"|head -1); then
+ error "Couldn't expand ${parampat}.threads_min parameter name"
+ return 22
+ fi
# Remove the .threads_min part
- pbase=${pname%.threads_min}
+ paramp=${paramp%.threads_min}
# Check for sanity in defaults
- tmin=$(thread_param_get $facet $pbase threads_min)
- tmax=$(thread_param_get $facet $pbase threads_max)
- tstarted=$(thread_param_get $facet $pbase threads_started)
- lassert 21 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' ||
+ tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" ||
+ echo 0)
+ tstarted=$(do_facet $facet "$LCTL get_param \
+ -n ${paramp}.threads_started" || echo 0)
+ lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' ||
return $?
- lassert 22 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax))' ||
+ lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' ||
return $?
nthrs=$(expr $tmax - $tmin)
if [ $nthrs -lt $ncpts ]; then
[ $tmin -eq $tmax -a $tmin -eq $tstarted ] &&
skip_env "module parameter forced $facet thread count" &&
- return 0
+ tmin=3 && tmax=$((3 * tmax))
# Check that we can change min/max
- do_facet $facet "$LCTL set_param $pbase.threads_min=$((tmin + nthrs))"
- do_facet $facet "$LCTL set_param $pbase.threads_max=$((tmax - nthrs))"
- tmin2=$(thread_param_get $facet $pbase threads_min)
- tmax2=$(thread_param_get $facet $pbase threads_max)
- lassert 23 "$msg" '(($tmin2 == ($tmin + $nthrs)))' || return $?
- lassert 24 "$msg" '(($tmax2 == ($tmax - $nthrs)))' || return $?
-
- sleep 3 # give threads a chance to start
- tstarted=$(thread_param_get $facet $pbase threads_started)
- lassert 25 "$msg" '(($tstarted >= $tmin2))' || return $?
+ do_facet $facet "$LCTL set_param \
+ ${paramp}.threads_min=$((tmin + nthrs))"
+ do_facet $facet "$LCTL set_param \
+ ${paramp}.threads_max=$((tmax - nthrs))"
+ tmin2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ tmax2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" ||
+ echo 0)
+ lassert 25 "$msg" '(($tmin2 == ($tmin + $nthrs) &&
+ $tmax2 == ($tmax - $nthrs)))' || return $?
# Check that we can set min/max to the same value
- do_facet $facet "$LCTL set_param $pbase.threads_max=$tmin2"
- tmax2=$(thread_param_get $facet $pbase threads_max)
- lassert 26 "$msg" '(($tmax2 == $tmin2))' || return $?
+ tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ do_facet $facet "$LCTL set_param ${paramp}.threads_max=$tmin"
+ tmin2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ tmax2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" ||
+ echo 0)
+ lassert 26 "$msg" '(($tmin2 == $tmin && $tmax2 == $tmin))' || return $?
# Check that we can't set max < min
- do_facet $facet "$LCTL set_param $pbase.threads_max=$((tmin - 1))"
- tmax2=$(thread_param_get $facet $pbase threads_max)
- lassert 27 "$msg" '(($tmax2 < $tmin))' || return $?
+ do_facet $facet "$LCTL set_param ${paramp}.threads_max=$((tmin - 1))"
+ tmin2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" ||
+ echo 0)
+ tmax2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" ||
+ echo 0)
+ lassert 27 "$msg" '(($tmin2 <= $tmax2))' || return $?
# We need to ensure that we get the module options desired; to do this
# we set LOAD_MODULES_REMOTE=true and we call setmodopts below.
LOAD_MODULES_REMOTE=true
cleanup
local oldvalue
- local newvalue="${opts}=$((basethr * ncpts))"
+ local newvalue="${opts}=$(expr $basethr \* $ncpts)"
setmodopts -a $modname "$newvalue" oldvalue
load_modules
setup
- check_mount || { error "filesystem failed remount"; return 41; }
+ check_mount || return 41
# Restore previous setting of MODOPTS_*
setmodopts $modname "$oldvalue"
# Check that $opts took
- tmin=$(thread_param_get $facet $pbase threads_min)
- tmax=$(thread_param_get $facet $pbase threads_max)
- tstarted=$(thread_param_get $facet $pbase threads_started)
- lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' ||
+ tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min")
+ tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max")
+ tstarted=$(do_facet $facet \
+ "$LCTL get_param -n ${paramp}.threads_started")
+ lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' ||
return $?
+ cleanup
- cleanup_thread_sanity || error "cleanup failed with rc $?"
load_modules
+ setup
}
test_53a() {
- thread_sanity OST ost1 'ost.*.ost' oss_num_threads 16
+ setup
+ thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16'
+ cleanup || error "cleanup failed with rc $?"
}
run_test 53a "check OSS thread count params"
test_53b() {
- thread_sanity MDT $SINGLEMDS 'mds.*.*' mds_num_threads 16
+ setup
+ local mds=$(do_facet $SINGLEMDS "$LCTL get_param \
+ -N mds.*.*.threads_max 2>/dev/null")
+ if [ -z "$mds" ]; then
+ #running this on an old MDT
+ thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' 16
+ else
+ thread_sanity MDT $SINGLEMDS 'mds.*.*.' 'mds_num_threads' 16
+ fi
+ cleanup || error "cleanup failed with $?"
}
run_test 53b "check MDS thread count params"
{ skip "Need MGS version at least 2.3.59"; return 0; }
setup
- local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -n1)
- local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -n1)
+ local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1)
+ local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1)
echo "replace_nids should fail if MDS, OSTs and clients are UP"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
start_mdt 1 || error "MDT0 start fail"
start_ost || error "OST0 start fail"
-
- start_mdt 2 || error "MDT1 start fail"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
mount_client $MOUNT || error "mount client fails"
start_ost || error "OST0 start fail"
- start_mdt 1 || error "MDT0 start fail"
- start_mdt 2 || error "MDT1 start fail"
+ start_mds || error "MDS start fail"
mount_client $MOUNT || error "mount client fails"
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
local MDTIDX=1
- start_mdt 1 || error "MDT0 start fail"
- start_mdt 2 || error "MDT1 start fail"
+ start_mds || error "MDS start fail"
start_ost || error "OST0 start fail"
mount_client $MOUNT || error "mount client fails"
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
local MDTIDX=1
- start_mdt 1 || error "MDT0 start fail"
- start_mdt 2 || error "MDT1 start fail"
+ start_mds || error "MDS start fail"
start_ost || error "OST0 start fail"
mount_client $MOUNT || error "mount client fails"
start_mdt 1 || error "MDT0 start fail"
start_ost || error "OST0 start fail"
- start_mdt 2 || error "MDT1 start fail"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
+
start_ost2 || error "OST1 start fail"
mount_client $MOUNT || error "mount client fails"
rm -rf $DIR/$tdir || error "delete dir fail"
umount_client $MOUNT || error "umount_client failed"
- stop_mdt 1 || error "MDT0 stop fail"
- stop_mdt 2 || error "MDT1 stop fail"
+ stop_mds || error "MDS stop fail"
stop_ost || error "OST0 stop fail"
stop_ost2 || error "OST1 stop fail"
}
fi
local MDTIDX=1
- start_mdt 2 || error "MDT1 start fail"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
start_ost || error "OST0 start fail"
start_mdt 1 || error "MDT0 start fail"
start_ost2 || error "OST1 start fail"
rm -rf $DIR/$tdir || error "delete dir fail"
umount_client $MOUNT || error "umount_client failed"
- stop_mdt 1 || error "MDT0 stop fail"
- stop_mdt 2 || error "MDT1 stop fail"
+ stop_mds || error "MDT0 stop fail"
stop_ost || error "OST0 stop fail"
stop_ost2 || error "OST1 stop fail"
}
start_ost || error "OST0 start fail"
start_ost2 || error "OST1 start fail"
- start_mdt 2 || error "MDT1 start fail"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
start_mdt 1 || error "MDT0 start fail"
mount_client $MOUNT || error "mount client fails"
rm -rf $DIR/$tdir || error "delete dir fail"
umount_client $MOUNT || error "umount_client failed"
- stop_mdt 1 || error "MDT0 stop fail"
- stop_mdt 2 || error "MDT1 stop fail"
+ stop_mds || error "MDS stop fail"
stop_ost || error "OST0 stop fail"
stop_ost2 || error "OST1 stop fail"
local MDTIDX=1
start_ost || error "OST0 start fail"
- start_mdt 2 || error "MDT0 start fail"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
start_mdt 1 || error "MDT0 start fail"
start_ost2 || error "OST1 start fail"
rm -rf $DIR/$tdir || error "delete dir fail"
umount_client $MOUNT || error "umount_client failed"
- stop_mdt 1 || error "MDT0 stop fail"
- stop_mdt 2 || error "MDT1 stop fail"
+ stop_mds || error "MDS stop fail"
stop_ost || error "OST0 stop fail"
stop_ost2 || error "OST1 stop fail"
local MDTIDX=1
start_ost || error "OST0 start fail"
- start_mdt 2 || error "MDT1 start fail"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num || return
+ done
start_ost2 || error "OST1 start fail"
start_mdt 1 || error "MDT0 start fail"
rm -rf $DIR/$tdir || error "delete dir fail"
umount_client $MOUNT || error "umount_client failed"
- stop_mdt 1 || error "MDT0 stop fail"
- stop_mdt 2 || error "MDT1 stop fail"
+ stop_mds || error "MDS stop fail"
stop_ost || error "OST0 stop fail"
stop_ost2 || error "OST1 stop fail"
add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
--reformat $(mdsdevname $num) $(mdsvdevname $num) ||
error "add mds $num failed"
- $TUNE2FS -O extents $(mdsdevname $num)
+ do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" ||
+ error "$TUNE2FS failed on mds${num}"
done
add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev ||
setup
local MDMB_PARAM="osc.*.max_dirty_mb"
echo "Change MGS params"
- local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -n1)
+ local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM |
+ head -1)
echo "max_dirty_mb: $MAX_DIRTY_MB"
local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB))
echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB"
do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB
wait_update $HOSTNAME "$LCTL get_param -n $MDMB_PARAM |
- head -n1" $NEW_MAX_DIRTY_MB
- MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -n1)
+ head -1" $NEW_MAX_DIRTY_MB
+ MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1)
echo "$MAX_DIRTY_MB"
[ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] ||
error "error while apply max_dirty_mb"
stopall
setupall
wait_update $HOSTNAME "$LCTL get_param -n $MDMB_PARAM |
- head -n1" $NEW_MAX_DIRTY_MB
- MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -n1)
+ head -1" $NEW_MAX_DIRTY_MB
+ MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1)
[ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] ||
error "max_dirty_mb is not saved after remount"
CLIENT_PARAM="obdfilter.*.client_cache_count"
local CLIENT_CACHE_COUNT
CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM |
- head -n1)
+ head -1)
echo "client_cache_count: $CLIENT_CACHE_COUNT"
NEW_CLIENT_CACHE_COUNT=$((CLIENT_CACHE_COUNT+CLIENT_CACHE_COUNT))
echo "new_client_cache_count: $NEW_CLIENT_CACHE_COUNT"
do_facet mgs $LCTL set_param -P $CLIENT_PARAM=$NEW_CLIENT_CACHE_COUNT
wait_update $(facet_host ost1) "$LCTL get_param -n $CLIENT_PARAM |
- head -n1" $NEW_CLIENT_CACHE_COUNT
+ head -1" $NEW_CLIENT_CACHE_COUNT
CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM |
- head -n1)
+ head -1)
echo "$CLIENT_CACHE_COUNT"
[ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] ||
error "error while apply client_cache_count"
stopall
setupall
wait_update $(facet_host ost1) "$LCTL get_param -n $CLIENT_PARAM |
- head -n1" $NEW_CLIENT_CACHE_COUNT
+ head -1" $NEW_CLIENT_CACHE_COUNT
CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM |
- head -n1)
+ head -1)
echo "$CLIENT_CACHE_COUNT"
[ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] ||
error "client_cache_count is not saved after remount"
run_test 82b "specify OSTs for file with --pool and --ost-list options"
test_83() {
- local dev
- local ostmnt
- local fstype
- local mnt_opts
-
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] ||
+ { skip "Need OST version at least 2.6.91" && return 0; }
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
skip "Only applicable to ldiskfs-based MDTs"
return
fi
+ local dev
+ local ostmnt
+ local fstype
+ local mnt_opts
+
dev=$(ostdevname 1)
ostmnt=$(facet_mntpt ost1)
fstype=$(facet_fstype ost1)