# bug number for skipped tests: LU-2036
ALWAYS_EXCEPT=" 76 $ALWAYS_EXCEPT"
-is_sles11() # LU-4351
-{
- if [ -r /etc/SuSE-release ]
- then
- local vers=`grep VERSION /etc/SuSE-release | awk '{print $3}'`
- if [ $vers -eq 11 ]
- then
- return 0
- fi
- fi
- return 1
-}
-
-if is_sles11; then # LU-4351
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 54c"
-fi
-
SRCDIR=$(cd $(dirname $0); echo $PWD)
export PATH=$PATH:/sbin
LFIND=${LFIND:-"$LFS find"}
LVERIFY=${LVERIFY:-ll_dirstripe_verify}
LCTL=${LCTL:-lctl}
-MCREATE=${MCREATE:-mcreate}
OPENFILE=${OPENFILE:-openfile}
OPENUNLINK=${OPENUNLINK:-openunlink}
export MULTIOP=${MULTIOP:-multiop}
local i
local rc=0
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.0) ] &&
[ $(lustre_version_code $SINGLEMDS) -le $(version_code 2.2.93) ] &&
skip "MDS 2.2.0-2.2.93 do not NUL-terminate symlinks" && return
test_17n() {
local i
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.0) ] &&
[ $(lustre_version_code $SINGLEMDS) -le $(version_code 2.2.93) ] &&
skip "MDS 2.2.0-2.2.93 do not NUL-terminate symlinks" && return
run_test 17n "run e2fsck against master/slave MDT which contains remote dir"
test_17o() {
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.64) ] &&
skip "Need MDS version at least 2.3.64" && return
}
run_test 27C "check full striping across all OSTs"
+test_27D() {
+ [ $OSTCOUNT -lt 2 ] && skip "needs >= 2 OSTs" && return
+ local POOL=${POOL:-testpool}
+ local first_ost=0
+ local last_ost=$(($OSTCOUNT - 1))
+ local ost_step=1
+ local ost_list=$(seq $first_ost $ost_step $last_ost)
+ local ost_range="$first_ost $last_ost $ost_step"
+
+ mkdir -p $DIR/$tdir
+ pool_add $POOL || error "pool_add failed"
+ pool_add_targets $POOL $ost_range || error "pool_add_targets failed"
+ llapi_layout_test -d$DIR/$tdir -p$POOL -o$OSTCOUNT ||
+ error "llapi_layout_test failed"
+ cleanup_pools || error "cleanup_pools failed"
+}
+run_test 27D "validate llapi_layout API"
+
# createtest also checks that device nodes are created and
# then visible correctly (#2091)
test_28() { # bug 2091
run_test 79 "df report consistency check ======================="
test_80() { # bug 10718
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
# relax strong synchronous semantics for slow backends like ZFS
local soc="obdfilter.*.sync_on_lock_cancel"
run_test 133d "Verifying rename_stats ========================================"
test_133e() {
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
local testdir=$DIR/${tdir}/stats_testdir
local ctr f0 f1 bs=32768 count=42 sum
- remote_ost_nodsh && skip "remote OST with nodsh" && return
mkdir -p ${testdir} || error "mkdir failed"
$SETSTRIPE -c 1 -i 0 ${testdir}/${tfile}
run_test 133e "Verifying OST {read,write}_bytes nid stats ================="
test_133f() {
- local proc_dirs="/proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/"
+ local proc_dirs
+
+ local dirs="/proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/ \
+/sys/fs/lustre/ /sys/fs/lnet/"
+ local dir
+ for dir in $dirs; do
+ if [ -d $dir ]; then
+ proc_dirs="$proc_dirs $dir"
+ fi
+ done
+
local facet
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
# First without trusting modes.
find $proc_dirs -exec cat '{}' \; &> /dev/null
run_test 133f "Check for LBUGs/Oopses/unreadable files in /proc"
test_133g() {
- local proc_dirs="/proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/"
+ local proc_dirs
+
+ local dirs="/proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/ \
+/sys/fs/lustre/ /sys/fs/lnet/"
+ local dir
+ for dir in $dirs; do
+ if [ -d $dir ]; then
+ proc_dirs="$proc_dirs $dir"
+ fi
+ done
+
local facet
# Second verifying readability.
$OPENFILE -f O_LOV_DELAY_CREATE:O_CREAT $test_dir/$tfile-2
fid=$($LFS path2fid $test_dir/$tfile-2)
- echo "cp /etc/passwd $MOUNT/.lustre/fid/$fid"
- cp /etc/passwd $MOUNT/.lustre/fid/$fid &&
- error "create lov data thru .lustre should fail."
+
+ if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.50) ]
+ then # LU-5424
+ echo "cp /etc/passwd $MOUNT/.lustre/fid/$fid"
+ cp /etc/passwd $MOUNT/.lustre/fid/$fid ||
+ error "create lov data thru .lustre failed"
+ fi
echo "cp /etc/passwd $test_dir/$tfile-2"
cp /etc/passwd $test_dir/$tfile-2 ||
error "copy to $test_dir/$tfile-2 failed."
local rc=0
mkdir -p $DIR/$tdir
- $LFS mkdir -i $MDTIDX -c $MDSCOUNT $remote_dir ||
+ $LFS mkdir -i $MDTIDX $remote_dir ||
error "create remote directory failed"
cp /etc/hosts $remote_dir/$tfile
run_test 155h "Verify big file correctness: read cache:off write_cache:off"
test_156() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
local CPAGES=3
local BEFORE
run_test 161b "link ea sanity under remote directory"
test_161c() {
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
[[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.1.5) ]] &&
skip "Need MDS version at least 2.1.5" && return
test_180c() { # LU-2598
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
[[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.0) ]] &&
skip "Need MDS version at least 2.4.0" && return
run_test 182 "Disable MDC RPCs semaphore wouldn't crash client ================"
test_183() { # LU-2275
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.56) ]] &&
skip "Need MDS version at least 2.3.56" && return
}
run_test 187b "Test data version change on volatile file"
-# OST pools tests
-check_file_in_pool()
-{
- local file=$1
- local pool=$2
- local tlist="$3"
- local res=$($GETSTRIPE $file | grep 0x | cut -f2)
- for i in $res
- do
- for t in $tlist ; do
- [ "$i" -eq "$t" ] && continue 2
- done
-
- echo "pool list: $tlist"
- echo "striping: $res"
- error_noexit "$file not allocated in $pool"
- return 1
- done
- return 0
-}
-
-pool_add() {
- echo "Creating new pool"
- local pool=$1
-
- create_pool $FSNAME.$pool ||
- { error_noexit "No pool created, result code $?"; return 1; }
- [ $($LFS pool_list $FSNAME | grep -c $pool) -eq 1 ] ||
- { error_noexit "$pool not in lfs pool_list"; return 2; }
-}
-
-pool_add_targets() {
- echo "Adding targets to pool"
- local pool=$1
- local first=$2
- local last=$3
- local step=${4:-1}
-
- local list=$(seq $first $step $last)
-
- local t=$(for i in $list; do printf "$FSNAME-OST%04x_UUID " $i; done)
- do_facet mgs $LCTL pool_add \
- $FSNAME.$pool $FSNAME-OST[$first-$last/$step]
- wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$pool \
- | sort -u | tr '\n' ' ' " "$t" || {
- error_noexit "Add to pool failed"
- return 1
- }
- local lfscount=$($LFS pool_list $FSNAME.$pool | grep -c "\-OST")
- local addcount=$(((last - first) / step + 1))
- [ $lfscount -eq $addcount ] || {
- error_noexit "lfs pool_list bad ost count" \
- "$lfscount != $addcount"
- return 2
- }
-}
-
-pool_set_dir() {
- local pool=$1
- local tdir=$2
- echo "Setting pool on directory $tdir"
-
- $SETSTRIPE -c 2 -p $pool $tdir && return 0
-
- error_noexit "Cannot set pool $pool to $tdir"
- return 1
-}
-
-pool_check_dir() {
- local pool=$1
- local tdir=$2
- echo "Checking pool on directory $tdir"
-
- local res=$($GETSTRIPE --pool $tdir | sed "s/\s*$//")
- [ "$res" = "$pool" ] && return 0
-
- error_noexit "Pool on '$tdir' is '$res', not '$pool'"
- return 1
-}
-
-pool_dir_rel_path() {
- echo "Testing relative path works well"
- local pool=$1
- local tdir=$2
- local root=$3
-
- mkdir -p $root/$tdir/$tdir
- cd $root/$tdir
- pool_set_dir $pool $tdir || return 1
- pool_set_dir $pool ./$tdir || return 2
- pool_set_dir $pool ../$tdir || return 3
- pool_set_dir $pool ../$tdir/$tdir || return 4
- rm -rf $tdir; cd - > /dev/null
-}
-
-pool_alloc_files() {
- echo "Checking files allocation from directory pool"
- local pool=$1
- local tdir=$2
- local count=$3
- local tlist="$4"
-
- local failed=0
- for i in $(seq -w 1 $count)
- do
- local file=$tdir/file-$i
- touch $file
- check_file_in_pool $file $pool "$tlist" || \
- failed=$((failed + 1))
- done
- [ "$failed" = 0 ] && return 0
-
- error_noexit "$failed files not allocated in $pool"
- return 1
-}
-
-pool_create_files() {
- echo "Creating files in pool"
- local pool=$1
- local tdir=$2
- local count=$3
- local tlist="$4"
-
- mkdir -p $tdir
- local failed=0
- for i in $(seq -w 1 $count)
- do
- local file=$tdir/spoo-$i
- $SETSTRIPE -p $pool $file
- check_file_in_pool $file $pool "$tlist" || \
- failed=$((failed + 1))
- done
- [ "$failed" = 0 ] && return 0
-
- error_noexit "$failed files not allocated in $pool"
- return 1
-}
-
-pool_lfs_df() {
- echo "Checking 'lfs df' output"
- local pool=$1
-
- local t=$($LCTL get_param -n lov.$FSNAME-clilov-*.pools.$pool |
- tr '\n' ' ')
- local res=$($LFS df --pool $FSNAME.$pool |
- awk '{print $1}' |
- grep "$FSNAME-OST" |
- tr '\n' ' ')
- [ "$res" = "$t" ] && return 0
-
- error_noexit "Pools OSTs '$t' is not '$res' that lfs df reports"
- return 1
-}
-
-pool_file_rel_path() {
- echo "Creating files in a pool with relative pathname"
- local pool=$1
- local tdir=$2
-
- mkdir -p $tdir ||
- { error_noexit "unable to create $tdir"; return 1 ; }
- local file="/..$tdir/$tfile-1"
- $SETSTRIPE -p $pool $file ||
- { error_noexit "unable to create $file" ; return 2 ; }
-
- cd $tdir
- $SETSTRIPE -p $pool $tfile-2 || {
- error_noexit "unable to create $tfile-2 in $tdir"
- return 3
- }
-}
-
-pool_remove_first_target() {
- echo "Removing first target from a pool"
- local pool=$1
-
- local pname="lov.$FSNAME-*.pools.$pool"
- local t=$($LCTL get_param -n $pname | head -n1)
- do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
- wait_update $HOSTNAME "lctl get_param -n $pname | grep $t" "" || {
- error_noexit "$t not removed from $FSNAME.$pool"
- return 1
- }
-}
-
-pool_remove_all_targets() {
- echo "Removing all targets from pool"
- local pool=$1
- local file=$2
- local pname="lov.$FSNAME-*.pools.$pool"
- for t in $($LCTL get_param -n $pname | sort -u)
- do
- do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
- done
- wait_update $HOSTNAME "lctl get_param -n $pname" "" || {
- error_noexit "Pool $FSNAME.$pool cannot be drained"
- return 1
- }
- # striping on an empty/nonexistant pool should fall back
- # to "pool of everything"
- touch $file || {
- error_noexit "failed to use fallback striping for empty pool"
- return 2
- }
- # setstripe on an empty pool should fail
- $SETSTRIPE -p $pool $file 2>/dev/null && {
- error_noexit "expected failure when creating file" \
- "with empty pool"
- return 3
- }
- return 0
-}
-
-pool_remove() {
- echo "Destroying pool"
- local pool=$1
- local file=$2
-
- do_facet mgs $LCTL pool_destroy $FSNAME.$pool
-
- sleep 2
- # striping on an empty/nonexistant pool should fall back
- # to "pool of everything"
- touch $file || {
- error_noexit "failed to use fallback striping for missing pool"
- return 1
- }
- # setstripe on an empty pool should fail
- $SETSTRIPE -p $pool $file 2>/dev/null && {
- error_noexit "expected failure when creating file" \
- "with missing pool"
- return 2
- }
-
- # get param should return err once pool is gone
- if wait_update $HOSTNAME "lctl get_param -n \
- lov.$FSNAME-*.pools.$pool 2>/dev/null || echo foo" "foo"
- then
- remove_pool_from_list $FSNAME.$pool
- return 0
- fi
- error_noexit "Pool $FSNAME.$pool is not destroyed"
- return 3
-}
-
test_200() {
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
remote_mgs_nodsh && skip "remote MGS with nodsh" && return
test_205() { # Job stats
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
[ -z "$(lctl get_param -n mdc.*.connect_flags | grep jobstats)" ] &&
skip "Server doesn't support jobstats" && return 0
[[ $JOBID_VAR = disable ]] && skip "jobstats is disabled" && return
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.52) ]] ||
{ skip "Need MDS version at least 2.4.52"; return 0; }
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
echo "==== test 1: verify get lease work"
$MULTIOP $DIR/$tfile oO_CREAT:O_RDWR:eRE+eU || error "get lease error"
# having "abc" as 1st arg, creates $TMP/lnet_abc.out and $TMP/lnet_abc.sys
create_lnet_proc_files() {
- cat /proc/sys/lnet/$1 >$TMP/lnet_$1.out || error "cannot read /proc/sys/lnet/$1"
+ lctl get_param -n $1 >$TMP/lnet_$1.out || error "cannot read lnet.$1"
sysctl lnet.$1 >$TMP/lnet_$1.sys_tmp || error "cannot read lnet.$1"
sed "s/^lnet.$1\ =\ //g" "$TMP/lnet_$1.sys_tmp" >$TMP/lnet_$1.sys
local L2 # regexp for 2nd line (optional)
local BR # regexp for the rest (body)
- # /proc/sys/lnet/stats should look as 11 space-separated non-negative numerics
+ # lnet.stats should look as 11 space-separated non-negative numerics
BR="^$N $N $N $N $N $N $N $N $N $N $N$"
create_lnet_proc_files "stats"
- check_lnet_proc_stats "stats.out" "/proc/sys/lnet/stats" "$BR"
check_lnet_proc_stats "stats.sys" "lnet.stats" "$BR"
remove_lnet_proc_files "stats"
- # /proc/sys/lnet/routes should look like this:
+ # lnet.routes should look like this:
# Routing disabled/enabled
# net hops priority state router
# where net is a string like tcp0, hops > 0, priority >= 0,
L2="^net +hops +priority +state +router$"
BR="^$NET +$N +(0|1) +(up|down) +$NID$"
create_lnet_proc_files "routes"
- check_lnet_proc_entry "routes.out" "/proc/sys/lnet/routes" "$BR" "$L1" "$L2"
check_lnet_proc_entry "routes.sys" "lnet.routes" "$BR" "$L1" "$L2"
remove_lnet_proc_files "routes"
- # /proc/sys/lnet/routers should look like this:
+ # lnet.routers should look like this:
# ref rtr_ref alive_cnt state last_ping ping_sent deadline down_ni router
# where ref > 0, rtr_ref > 0, alive_cnt >= 0, state is up/down,
# last_ping >= 0, ping_sent is boolean (0/1), deadline and down_ni are
L1="^ref +rtr_ref +alive_cnt +state +last_ping +ping_sent +deadline +down_ni +router$"
BR="^$P +$P +$N +(up|down) +$N +(0|1) +$I +$I +$NID$"
create_lnet_proc_files "routers"
- check_lnet_proc_entry "routers.out" "/proc/sys/lnet/routers" "$BR" "$L1"
check_lnet_proc_entry "routers.sys" "lnet.routers" "$BR" "$L1"
remove_lnet_proc_files "routers"
- # /proc/sys/lnet/peers should look like this:
+ # lnet.peers should look like this:
# nid refs state last max rtr min tx min queue
# where nid is a string like 192.168.1.1@tcp2, refs > 0,
# state is up/down/NA, max >= 0. last, rtr, min, tx, min are
L1="^nid +refs +state +last +max +rtr +min +tx +min +queue$"
BR="^$NID +$P +(up|down|NA) +$I +$N +$I +$I +$I +$I +$N$"
create_lnet_proc_files "peers"
- check_lnet_proc_entry "peers.out" "/proc/sys/lnet/peers" "$BR" "$L1"
check_lnet_proc_entry "peers.sys" "lnet.peers" "$BR" "$L1"
remove_lnet_proc_files "peers"
- # /proc/sys/lnet/buffers should look like this:
+ # lnet.buffers should look like this:
# pages count credits min
# where pages >=0, count >=0, credits and min are numeric (0 or >0 or <0)
L1="^pages +count +credits +min$"
BR="^ +$N +$N +$I +$I$"
create_lnet_proc_files "buffers"
- check_lnet_proc_entry "buffers.out" "/proc/sys/lnet/buffers" "$BR" "$L1"
check_lnet_proc_entry "buffers.sys" "lnet.buffers" "$BR" "$L1"
remove_lnet_proc_files "buffers"
- # /proc/sys/lnet/nis should look like this:
+ # lnet.nis should look like this:
# nid status alive refs peer rtr max tx min
# where nid is a string like 192.168.1.1@tcp2, status is up/down,
# alive is numeric (0 or >0 or <0), refs >= 0, peer >= 0,
L1="^nid +status +alive +refs +peer +rtr +max +tx +min$"
BR="^$NID +(up|down) +$I +$N +$N +$N +$N +$I +$I$"
create_lnet_proc_files "nis"
- check_lnet_proc_entry "nis.out" "/proc/sys/lnet/nis" "$BR" "$L1"
check_lnet_proc_entry "nis.sys" "lnet.nis" "$BR" "$L1"
remove_lnet_proc_files "nis"
- # can we successfully write to /proc/sys/lnet/stats?
- echo "0" >/proc/sys/lnet/stats || error "cannot write to /proc/sys/lnet/stats"
+ # can we successfully write to lnet.stats?
+ lctl set_param -n stats=0 || error "cannot write to lnet.stats"
sysctl -w lnet.stats=0 || error "cannot write to lnet.stats"
}
-run_test 215 "/proc/sys/lnet exists and has proper content - bugs 18102, 21079, 21517"
+run_test 215 "lnet exists and has proper content - bugs 18102, 21079, 21517"
test_216() { # bug 20317
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
# LU-1512 try to reuse idle OI blocks
test_228a() {
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
skip "non-ldiskfs backend" && return
test_228b() {
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
skip "non-ldiskfs backend" && return
#LU-1881
test_228c() {
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
skip "non-ldiskfs backend" && return
}
run_test 240 "race between ldlm enqueue and the connection RPC (no ASSERT)"
+test_241_bio() {
+ for LOOP in $(seq $1); do
+ dd if=$DIR/$tfile of=/dev/null bs=40960 count=1 2>/dev/null
+ cancel_lru_locks osc
+ done
+}
+
+test_241_dio() {
+ for LOOP in $(seq $1); do
+ dd if=$DIR/$tfile of=/dev/null bs=40960 count=1 \
+ iflag=direct 2>/dev/null
+ done
+}
+
+test_241() {
+ dd if=/dev/zero of=$DIR/$tfile count=1 bs=40960
+ ls -la $DIR/$tfile
+ cancel_lru_locks osc
+ test_241_bio 1000 &
+ PID=$!
+ test_241_dio 1000
+ wait $PID
+}
+run_test 241 "bio vs dio"
+
cleanup_test_300() {
trap 0
umask $SAVE_UMASK
local ls
#define OBD_FAIL_MGC_PAUSE_PROCESS_LOG 0x903
$LCTL set_param fail_loc=0x903
- # cancel_lru_locks mgc - does not work due to lctl set_param syntax
- for ls in /proc/fs/lustre/ldlm/namespaces/MGC*/lru_size; do
- echo "clear" > $ls
- done
+
+ cancel_lru_locks MGC
+
FAIL_ON_ERROR=true cleanup
FAIL_ON_ERROR=true setup
}