local o=$(echo $O | tr "[:upper:]" "[:lower:]")
o=${o//_/-}
o=${o//tyn/tyN}
- local log=${TMP}/${o}.log
+ local log=${TMP}/${o}.log
[ -f $log ] && skipped=$(grep excluded $log | awk '{ printf " %s", $3 }' | sed 's/test_//g')
[ -f $log ] && slow=$(grep SLOW $log | awk '{ printf " %s", $3 }' | sed 's/test_//g')
[ "${!O}" = "done" ] && \
export PATH=:$PATH:$LUSTRE/utils:$LUSTRE/utils/gss:$LUSTRE/tests
export LCTL=${LCTL:-"$LUSTRE/utils/lctl"}
- [ ! -f "$LCTL" ] && export LCTL=$(which lctl)
+ [ ! -f "$LCTL" ] && export LCTL=$(which lctl)
export LFS=${LFS:-"$LUSTRE/utils/lfs"}
- [ ! -f "$LFS" ] && export LFS=$(which lfs)
+ [ ! -f "$LFS" ] && export LFS=$(which lfs)
export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"}
if [ ! -f "$L_GETIDENTITY" ]; then
if `which l_getidentity > /dev/null 2>&1`; then
fi
fi
export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"}
- [ ! -f "$MKFS" ] && export MKFS=$(which mkfs.lustre)
+ [ ! -f "$MKFS" ] && export MKFS=$(which mkfs.lustre)
export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"}
- [ ! -f "$TUNEFS" ] && export TUNEFS=$(which tunefs.lustre)
+ [ ! -f "$TUNEFS" ] && export TUNEFS=$(which tunefs.lustre)
export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} "
export FSYTPE=${FSTYPE:-"ldiskfs"}
export NAME=${NAME:-local}
[ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \
export LGSSD=$(which lgssd)
export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"}
- [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd)
+ [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd)
export KRB5DIR=${KRB5DIR:-"/usr/kerberos"}
export DIR2
;;
esac
- # Paths on remote nodes, if different
+ # Paths on remote nodes, if different
export RLUSTRE=${RLUSTRE:-$LUSTRE}
export RPWD=${RPWD:-$PWD}
export I_MOUNTED=${I_MOUNTED:-"no"}
# command line
-
- while getopts "rvf:" opt $*; do
+
+ while getopts "rvf:" opt $*; do
case $opt in
f) CONFIG=$OPTARG;;
r) REFORMAT=--reformat;;
}
-have_modules () {
- lsmod | grep -q lnet
-}
-
load_module() {
EXT=".ko"
module=$1
# use modprobe
return 0
fi
- # we already loaded ?
- have_modules && return 0
+ if [ "$HAVE_MODULES" = true ]; then
+ # we already loaded
+ return 0
+ fi
+ HAVE_MODULES=true
echo Loading modules from $LUSTRE
load_module ../lnet/libcfs/libcfs
MODULES=$($LCTL modules | awk '{ print $2 }')
if [ -n "$MODULES" ]; then
echo "Modules still loaded: "
- echo $MODULES
+ echo $MODULES
if [ -e $LPROC ]; then
echo "Lustre still loaded"
cat $LPROC/devices || true
wait_for_lnet || return 3
fi
fi
+ HAVE_MODULES=false
LEAK_LUSTRE=$(dmesg | tail -n 30 | grep "obd mem.*leaked" || true)
LEAK_PORTALS=$(dmesg | tail -n 20 | grep "Portals memory leaked" || true)
}
# Facet functions
-# start facet device options
+# start facet device options
start() {
facet=$1
shift
shift
echo "Starting ${facet}: $@ ${device} ${MOUNT%/*}/${facet}"
do_facet ${facet} mkdir -p ${MOUNT%/*}/${facet}
- do_facet ${facet} mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
+ do_facet ${facet} mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
RC=${PIPESTATUS[0]}
if [ $RC -ne 0 ]; then
- echo mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
+ echo mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
echo Start of ${device} on ${facet} failed ${RC}
- else
+ else
do_facet ${facet} "sysctl -w lnet.debug=$PTLDEBUG; \
sysctl -w lnet.subsystem_debug=${SUBSYSTEM# }; \
sysctl -w lnet.debug_mb=${DEBUG_SIZE}"
exit 1
fi
- echo "Starting client: $OPTIONS $device $mnt"
+ echo "Starting client: $OPTIONS $device $mnt"
do_node $client mkdir -p $mnt
do_node $client mount -t lustre $OPTIONS $device $mnt || return 1
facet=$1
if [ "$FAILURE_MODE" = HARD ]; then
$POWER_DOWN `facet_active_host $facet`
- sleep 2
+ sleep 2
elif [ "$FAILURE_MODE" = SOFT ]; then
stop $facet
fi
echo "Waiting $(($MAX - $WAIT)) secs for MDS recovery done"
done
echo "MDS recovery not done in $MAX sec"
- return 1
+ return 1
}
wait_exit_ST () {
df $MOUNT || error "post-failover df: $?"
}
+fail_nodf() {
+ local facet=$1
+ facet_failover $facet
+}
+
fail_abort() {
local facet=$1
stop $facet
h2name_or_ip() {
if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
- echo $1"@$2"
+ echo $1"@$2"
fi
}
fi
active=${!activevar}
- if [ -z "$active" ] ; then
+ if [ -z "$active" ] ; then
echo -n ${facet}
else
echo -n ${active}
change_active() {
local facet=$1
- failover=${facet}failover
+ failover=${facet}failover
host=`facet_host $failover`
[ -z "$host" ] && return
curactive=`facet_active $facet`
if [ $activemds != "mds1" ]; then
fail mds1
fi
-
- # assume client mount is local
+
+ # assume client mount is local
grep " $MOUNT " /proc/mounts && zconf_umount $HOSTNAME $MOUNT $*
grep " $MOUNT2 " /proc/mounts && zconf_umount $HOSTNAME $MOUNT2 $*
[ "$CLIENTONLY" ] && return
if [ "$ONLY" == "cleanup" -o "`mount | grep $MOUNT`" ]; then
sysctl -w lnet.debug=0 || true
cleanupall
- if [ "$ONLY" == "cleanup" ]; then
+ if [ "$ONLY" == "cleanup" ]; then
exit 0
fi
fi
check_and_cleanup_lustre() {
if [ "`mount | grep $MOUNT`" ]; then
- rm -rf $DIR/[Rdfs][0-9]*
- rm -f $DIR/${TESTSUITE}/[Rdfs][1-9]*
+ [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]*
fi
if [ "$I_MOUNTED" = "yes" ]; then
cleanupall -f || error "cleanup failed"
unset I_MOUNTED
}
-#######
+#######
# General functions
check_network() {
$LCTL mark "cancel_lru_locks $1 stop"
}
+default_lru_size()
+{
+ NR_CPU=$(grep -c "processor" /proc/cpuinfo)
+ DEFAULT_LRU_SIZE=$((100 * NR_CPU))
+ echo "$DEFAULT_LRU_SIZE"
+}
+
+lru_resize_enable()
+{
+ NS=$1
+ test "x$NS" = "x" && NS="mdc"
+ for F in $LPROC/ldlm/namespaces/*$NS*/lru_size; do
+ D=$(dirname $F)
+ log "Enable lru resize for $(basename $D)"
+ echo "0" > $F
+ done
+}
+
+lru_resize_disable()
+{
+ NS=$1
+ test "x$NS" = "x" && NS="mdc"
+ for F in $LPROC/ldlm/namespaces/*$NS*/lru_size; do
+ D=$(dirname $F)
+ log "Disable lru resize for $(basename $D)"
+ DEFAULT_LRU_SIZE=$(default_lru_size)
+ echo "$DEFAULT_LRU_SIZE" > $F
+ done
+}
pgcache_empty() {
for a in /proc/fs/lustre/llite/*/dump_page_cache; do
}
##################################
-# Test interface
+# Test interface
##################################
error() {
fi
run_one $1 "$2"
-
+
return $?
}
log() {
echo "$*"
-
- local HAVE_MODULES=""
- lsmod | grep lnet > /dev/null || { load_modules && HAVE_MODULES="yes"; }
+ lsmod | grep lnet > /dev/null || load_modules
local MSG="$*"
# Get rif of '
for NODE in $NODES; do
do_node $NODE $LCTL mark "$MSG" 2> /dev/null || true
done
-
- [ -z "$HAVE_MODULES" ] || unload_modules
}
trace() {
testnum=$1
message=$2
tfile=f${testnum}
- export tdir=d${TESTSUITE}/d${base}
+ export tdir=d0.${TESTSUITE}/d${base}
local SAVE_UMASK=`umask`
umask 0022
mkdir -p $DIR/$tdir
}
sync_clients() {
- [ -d $DIR1 ] && cd $DIR1 && sync; sleep 1; sync
- [ -d $DIR2 ] && cd $DIR2 && sync; sleep 1; sync
+ [ -d $DIR1 ] && cd $DIR1 && sync; sleep 1; sync
+ [ -d $DIR2 ] && cd $DIR2 && sync; sleep 1; sync
cd $SAVE_PWD
}
for i in `seq $OSTCOUNT`; do
$LFS setstripe $DIR1/${tfile}_check_grant_$i -i $(($i -1)) -c 1
dd if=/dev/zero of=$DIR1/${tfile}_check_grant_$i bs=4k \
- count=1 > /dev/null 2>&1
+ count=1 > /dev/null 2>&1
done
# sync all the data and make sure no pending data on server
sync_clients
- #get client grant and server grant
+ #get client grant and server grant
client_grant=0
- for d in ${LPROC}/osc/*/cur_grant_bytes; do
+ for d in ${LPROC}/osc/*/cur_grant_bytes; do
client_grant=$((client_grant + `cat $d`))
done
server_grant=0
rm $DIR1/${tfile}_check_grant_$i
done
- #check whether client grant == server grant
+ #check whether client grant == server grant
if [ $client_grant != $server_grant ]; then
echo "failed: client:${client_grant} server: ${server_grant}"
return 1
# FIXME. We need a list of clients
local myNODES=$HOSTNAME
local myNODES_sort
-
+
if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
myNODES="$myNODES $(osts_nodes) $(mdts_nodes)"
fi
chmod 0755 $DIR
chown $myRUNAS_ID:$myRUNAS_ID $DIR/d0_runas_test
$myRUNAS touch $DIR/d0_runas_test/f$$ || \
- error "unable to write to $DIR/d0_runas_test as UID $myRUNAS_ID.
- Please set RUNAS_ID to some UID which exists on MDS and client or
+ error "unable to write to $DIR/d0_runas_test as UID $myRUNAS_ID.
+ Please set RUNAS_ID to some UID which exists on MDS and client or
add user $myRUNAS_ID:$myRUNAS_ID on these nodes."
rm -rf $DIR/d0_runas_test
}