local xsize=${1:-1024} # in bytes
local file=$DIR/$tfile
- [ -z $(lctl get_param -n mdc.*.connect_flags | grep xattr) ] &&
+ [ -z "$(lctl get_param -n mdc.*.connect_flags | grep xattr)" ] &&
skip "must have user_xattr" && return 0
[ -z "$(which setfattr 2>/dev/null)" ] &&
skip_env "could not find setfattr" && return 0
test_116a() { # was previously test_116()
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+
[[ $OSTCOUNT -lt 2 ]] && skip_env "$OSTCOUNT < 2 OSTs" && return
echo -n "Free space priority "
test_116b() { # LU-2093
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+
#define OBD_FAIL_MDS_OSC_CREATE_FAIL 0x147
local old_rr=$(do_facet $SINGLEMDS lctl get_param -n \
lo*.$FSNAME-MDT0000-mdtlov.qos_threshold_rr | head -1)
local AFTER
local file="$DIR/$tfile"
- [ "$(facet_fstype ost1)" = "zfs" ] &&
- skip "LU-1956/LU-2261: stats unimplemented on OSD ZFS" &&
+ [ "$(facet_fstype ost1)" = "zfs" -a \
+ $(lustre_version_code ost1 -lt $(version_code 2.6.93)) ] &&
+ skip "LU-1956/LU-2261: stats not implemented on OSD ZFS" &&
return
roc_hit_init
- log "Turn on read and write cache"
- set_cache read on
- set_cache writethrough on
-
- log "Write data and read it back."
- log "Read should be satisfied from the cache."
- dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
- BEFORE=`roc_hit`
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
- if ! let "AFTER - BEFORE == CPAGES"; then
- error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
- else
- log "cache hits:: before: $BEFORE, after: $AFTER"
- fi
-
- log "Read again; it should be satisfied from the cache."
- BEFORE=$AFTER
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
- if ! let "AFTER - BEFORE == CPAGES"; then
- error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
- else
- log "cache hits:: before: $BEFORE, after: $AFTER"
- fi
+ log "Turn on read and write cache"
+ set_cache read on
+ set_cache writethrough on
+ log "Write data and read it back."
+ log "Read should be satisfied from the cache."
+ dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
+ BEFORE=$(roc_hit)
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
+ if ! let "AFTER - BEFORE == CPAGES"; then
+ error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
+ else
+ log "cache hits:: before: $BEFORE, after: $AFTER"
+ fi
- log "Turn off the read cache and turn on the write cache"
- set_cache read off
- set_cache writethrough on
+ log "Read again; it should be satisfied from the cache."
+ BEFORE=$AFTER
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
+ if ! let "AFTER - BEFORE == CPAGES"; then
+ error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
+ else
+ log "cache hits:: before: $BEFORE, after: $AFTER"
+ fi
- log "Read again; it should be satisfied from the cache."
- BEFORE=`roc_hit`
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
- if ! let "AFTER - BEFORE == CPAGES"; then
- error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
- else
- log "cache hits:: before: $BEFORE, after: $AFTER"
- fi
+ log "Turn off the read cache and turn on the write cache"
+ set_cache read off
+ set_cache writethrough on
- log "Read again; it should not be satisfied from the cache."
- BEFORE=$AFTER
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
- if ! let "AFTER - BEFORE == 0"; then
- error "IN CACHE: before: $BEFORE, after: $AFTER"
- else
- log "cache hits:: before: $BEFORE, after: $AFTER"
- fi
+ log "Read again; it should be satisfied from the cache."
+ BEFORE=$(roc_hit)
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
+ if ! let "AFTER - BEFORE == CPAGES"; then
+ error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
+ else
+ log "cache hits:: before: $BEFORE, after: $AFTER"
+ fi
- log "Write data and read it back."
- log "Read should be satisfied from the cache."
- dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
- BEFORE=`roc_hit`
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
- if ! let "AFTER - BEFORE == CPAGES"; then
- error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
- else
- log "cache hits:: before: $BEFORE, after: $AFTER"
- fi
+ log "Read again; it should not be satisfied from the cache."
+ BEFORE=$AFTER
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
+ if ! let "AFTER - BEFORE == 0"; then
+ error "IN CACHE: before: $BEFORE, after: $AFTER"
+ else
+ log "cache hits:: before: $BEFORE, after: $AFTER"
+ fi
- log "Read again; it should not be satisfied from the cache."
- BEFORE=$AFTER
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
- if ! let "AFTER - BEFORE == 0"; then
- error "IN CACHE: before: $BEFORE, after: $AFTER"
- else
- log "cache hits:: before: $BEFORE, after: $AFTER"
- fi
+ log "Write data and read it back."
+ log "Read should be satisfied from the cache."
+ dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
+ BEFORE=$(roc_hit)
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
+ if ! let "AFTER - BEFORE == CPAGES"; then
+ error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
+ else
+ log "cache hits:: before: $BEFORE, after: $AFTER"
+ fi
+ log "Read again; it should not be satisfied from the cache."
+ BEFORE=$AFTER
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
+ if ! let "AFTER - BEFORE == 0"; then
+ error "IN CACHE: before: $BEFORE, after: $AFTER"
+ else
+ log "cache hits:: before: $BEFORE, after: $AFTER"
+ fi
- log "Turn off read and write cache"
- set_cache read off
- set_cache writethrough off
+ log "Turn off read and write cache"
+ set_cache read off
+ set_cache writethrough off
- log "Write data and read it back"
- log "It should not be satisfied from the cache."
- rm -f $file
- dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
- cancel_lru_locks osc
- BEFORE=`roc_hit`
- cat $file >/dev/null
- AFTER=`roc_hit`
+ log "Write data and read it back"
+ log "It should not be satisfied from the cache."
+ rm -f $file
+ dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
+ cancel_lru_locks osc
+ BEFORE=$(roc_hit)
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
if ! let "AFTER - BEFORE == 0"; then
error_ignore bz20762 "IN CACHE: before: $BEFORE, after: $AFTER"
else
log "cache hits:: before: $BEFORE, after: $AFTER"
fi
- log "Turn on the read cache and turn off the write cache"
- set_cache read on
- set_cache writethrough off
+ log "Turn on the read cache and turn off the write cache"
+ set_cache read on
+ set_cache writethrough off
- log "Write data and read it back"
- log "It should not be satisfied from the cache."
- rm -f $file
- dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
- BEFORE=`roc_hit`
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
+ log "Write data and read it back"
+ log "It should not be satisfied from the cache."
+ rm -f $file
+ dd if=/dev/urandom of=$file bs=4k count=$CPAGES || error "dd failed"
+ BEFORE=$(roc_hit)
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
if ! let "AFTER - BEFORE == 0"; then
error_ignore bz20762 "IN CACHE: before: $BEFORE, after: $AFTER"
else
log "cache hits:: before: $BEFORE, after: $AFTER"
fi
- log "Read again; it should be satisfied from the cache."
- BEFORE=`roc_hit`
- cancel_lru_locks osc
- cat $file >/dev/null
- AFTER=`roc_hit`
- if ! let "AFTER - BEFORE == CPAGES"; then
- error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
- else
- log "cache hits:: before: $BEFORE, after: $AFTER"
- fi
+ log "Read again; it should be satisfied from the cache."
+ BEFORE=$(roc_hit)
+ cancel_lru_locks osc
+ cat $file >/dev/null
+ AFTER=$(roc_hit)
+ if ! let "AFTER - BEFORE == CPAGES"; then
+ error "NOT IN CACHE: before: $BEFORE, after: $AFTER"
+ else
+ log "cache hits:: before: $BEFORE, after: $AFTER"
+ fi
- rm -f $file
+ rm -f $file
}
-run_test 156 "Verification of tunables ============================"
+run_test 156 "Verification of tunables"
#Changelogs
err17935 () {
run_test 160b "Verify that very long rename doesn't crash in changelog"
test_160c() {
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+
local rc=0
local server_version=$(lustre_version_code $SINGLEMDS)
run_test 185 "Volatile file support"
test_187a() {
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.0) ] &&
+ skip "Need MDS version at least 2.3.0" && return
+
local dir0=$DIR/$tdir/$testnum
mkdir -p $dir0 || error "creating dir $dir0"
run_test 187a "Test data version change"
test_187b() {
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.0) ] &&
+ skip "Need MDS version at least 2.3.0" && return
+
local dir0=$DIR/$tdir/$testnum
mkdir -p $dir0 || error "creating dir $dir0"
test_205() { # Job stats
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
+
[ -z "$(lctl get_param -n mdc.*.connect_flags | grep jobstats)" ] &&
skip "Server doesn't support jobstats" && return 0
[[ $JOBID_VAR = disable ]] && skip "jobstats is disabled" && return
# for now as only exclusive open is supported. After generic lease
# is done, this test suite should be revised. - Jinshan
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.52) ]] ||
{ skip "Need MDS version at least 2.4.52"; return 0; }
- remote_mds_nodsh && skip "remote MDS with nodsh" && return
echo "==== test 1: verify get lease work"
$MULTIOP $DIR/$tfile oO_CREAT:O_RDWR:eRE+eU || error "get lease error"
test_220() { #LU-325
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
remote_ost_nodsh && skip "remote OST with nodsh" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
local OSTIDX=0
- test_mkdir -p $DIR/$tdir
- local OST=$($LFS osts | grep ${OSTIDX}": " | \
- awk '{print $2}' | sed -e 's/_UUID$//')
+ # create on MDT0000 so the last_id and next_id are correct
+ mkdir $DIR/$tdir
+ local OST=$($LFS df $DIR | awk '/OST:'$OSTIDX'/ { print $1 }')
+ OST=${OST%_UUID}
# on the mdt's osc
local mdtosc_proc1=$(get_mdtosc_proc_path $SINGLEMDS $OST)
MDSSURVEY=${MDSSURVEY:-$(which mds-survey 2>/dev/null || true)}
test_225a () {
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
if [ -z ${MDSSURVEY} ]; then
skip_env "mds-survey not found" && return
fi
test_225b () {
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
-
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
if [ -z ${MDSSURVEY} ]; then
skip_env "mds-survey not found" && return
fi
ln -s $other_dir/$tfile $migrate_dir/${tfile}_ln_other
$LFS migrate -m $MDTIDX $migrate_dir ||
- error "migrate remote dir error"
+ error "fails on migrating remote dir to MDT1"
echo "migratate to MDT1, then checking.."
for ((i = 0; i < 10; i++)); do
#migrate back to MDT0
MDTIDX=0
+
$LFS migrate -m $MDTIDX $migrate_dir ||
- error "migrate remote dir error"
+ error "fails on migrating remote dir to MDT0"
echo "migrate back to MDT0, checking.."
for file in $(find $migrate_dir); do
test_230c() {
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
local MDTIDX=1
local mdt_index
local t=$(ls $migrate_dir | wc -l)
$LFS migrate --mdt-index $MDTIDX $migrate_dir &&
error "migrate should fail after 5 entries"
+
+ mkdir $migrate_dir/dir &&
+ error "mkdir succeeds under migrating directory"
+ touch $migrate_dir/file &&
+ error "touch file succeeds under migrating directory"
+
local u=$(ls $migrate_dir | wc -l)
[ "$u" == "$t" ] || error "$u != $t during migration"
}
run_test 244 "sendfile with group lock tests"
+test_245() {
+ local flagname="multi_mod_rpcs"
+ local connect_data_name="max_mod_rpcs"
+ local out
+
+ # check if multiple modify RPCs flag is set
+ out=$($LCTL get_param mdc.$FSNAME-MDT0000-*.import |
+ grep "connect_flags:")
+ echo "$out"
+
+ echo "$out" | grep -qw $flagname
+ if [ $? -ne 0 ]; then
+ echo "connect flag $flagname is not set"
+ return
+ fi
+
+ # check if multiple modify RPCs data is set
+ out=$($LCTL get_param mdc.$FSNAME-MDT0000-*.import)
+ echo "$out"
+
+ echo "$out" | grep -qw $connect_data_name ||
+ error "import should have connect data $connect_data_name"
+}
+run_test 245 "check mdc connection flag/data: multiple modify RPCs"
+
test_250() {
[ "$(facet_fstype ost$(($($GETSTRIPE -i $DIR/$tfile) + 1)))" = "zfs" ] \
&& skip "no 16TB file size limit on ZFS" && return
}
run_test 251 "Handling short read and write correctly"
+test_252() {
+ local tgt
+ local dev
+ local out
+ local uuid
+ local num
+ local gen
+
+ if [ "$(facet_fstype ost1)" != "ldiskfs" -o \
+ "$(facet_fstype mds1)" != "ldiskfs" ]; then
+ skip "can only run lr_reader on ldiskfs target"
+ return
+ fi
+
+ # check lr_reader on OST0000
+ tgt=ost1
+ dev=$(facet_device $tgt)
+ out=$(do_facet $tgt $LR_READER $dev)
+ [ $? -eq 0 ] || error "$LR_READER failed on target $tgt device $dev"
+ echo "$out"
+ uuid=$(echo "$out" | grep -i uuid | awk '{ print $2 }')
+ [ "$uuid" == "$(ostuuid_from_index 0)" ] ||
+ error "Invalid uuid returned by $LR_READER on target $tgt"
+ echo -e "uuid returned by $LR_READER is '$uuid'\n"
+
+ # check lr_reader -c on MDT0000
+ tgt=mds1
+ dev=$(facet_device $tgt)
+ if ! do_facet $tgt $LR_READER -h | grep -q OPTIONS; then
+ echo "$LR_READER does not support additional options"
+ return 0
+ fi
+ out=$(do_facet $tgt $LR_READER -c $dev)
+ [ $? -eq 0 ] || error "$LR_READER failed on target $tgt device $dev"
+ echo "$out"
+ num=$(echo "$out" | grep -c "mdtlov")
+ [ "$num" -eq $((MDSCOUNT - 1)) ] ||
+ error "Invalid number of mdtlov clients returned by $LR_READER"
+ echo -e "Number of mdtlov clients returned by $LR_READER is '$num'\n"
+
+ # check lr_reader -cr on MDT0000
+ out=$(do_facet $tgt $LR_READER -cr $dev)
+ [ $? -eq 0 ] || error "$LR_READER failed on target $tgt device $dev"
+ echo "$out"
+ echo "$out" | grep -q "^reply_data:$" ||
+ error "$LR_READER should have returned 'reply_data' section"
+ num=$(echo "$out" | grep -c "client_generation")
+ echo -e "Number of reply data returned by $LR_READER is '$num'\n"
+}
+run_test 252 "check lr_reader tool"
+
+
cleanup_test_300() {
trap 0
umask $SAVE_UMASK
}
run_test 300k "test large striped directory"
+test_300l() {
+ [ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ local stripe_index
+
+ test_mkdir -p $DIR/$tdir/striped_dir
+ chown $RUNAS_ID $DIR/$tdir/striped_dir ||
+ error "chown $RUNAS_ID failed"
+ $LFS setdirstripe -i 1 -D $DIR/$tdir/striped_dir ||
+ error "set default striped dir failed"
+
+ #define OBD_FAIL_MDS_STALE_DIR_LAYOUT 0x158
+ $LCTL set_param fail_loc=0x80000158
+ $RUNAS mkdir $DIR/$tdir/striped_dir/test_dir || error "create dir fails"
+
+ stripe_index=$($LFS getdirstripe -i $DIR/$tdir/striped_dir/test_dir)
+ [ $stripe_index -eq 1 ] ||
+ error "expect 1 get $stripe_index for $dir"
+}
+run_test 300l "non-root user to create dir under striped dir with stale layout"
+
prepare_remote_file() {
mkdir $DIR/$tdir/src_dir ||
error "create remote source failed"