ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-8972
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101"
+# bug number for skipped test:
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-is_sles11() # LU-2181
-{
- if [ -r /etc/SuSE-release ]
- then
- local vers=$(grep VERSION /etc/SuSE-release | awk '{print $3}')
- local patchlev=$(grep PATCHLEVEL /etc/SuSE-release |
- awk '{ print $3 }')
- if [ $vers -eq 11 ] && [ $patchlev -eq 2 ]
- then
- return 0
- fi
- fi
- return 1
-}
-
-if [ "$FAILURE_MODE" = "HARD" ]; then
- CONFIG_EXCEPTIONS="24a " &&
- echo "Except the tests: $CONFIG_EXCEPTIONS for " \
- "FAILURE_MODE=$FAILURE_MODE, b=23573" &&
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
-fi
-
# bug number for skipped test:
# a tool to create lustre filesystem images
ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
STORED_MDSSIZE=$MDSSIZE
STORED_OSTSIZE=$OSTSIZE
MDSSIZE=200000
+[ $(facet_fstype $SINGLEMDS) = "zfs" ] && MDSSIZE=400000
OSTSIZE=200000
+[ $(facet_fstype ost1) = "zfs" ] && OSTSIZE=400000
fs2mds_HOST=$mds_HOST
fs2ost_HOST=$ost_HOST
[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
# bug number for skipped test:
ALWAYS_EXCEPT="$ALWAYS_EXCEPT"
+# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
init_logging
}
start_mgs () {
- echo "start mgs"
- start mgs $(mgsdevname) $MGS_MOUNT_OPTS
+ echo "start mgs service on $(facet_active_host mgs)"
+ start mgs $(mgsdevname) $MGS_MOUNT_OPTS $@
}
start_mdt() {
}
run_test 9 "test ptldebug and subsystem for mkfs"
-is_blkdev () {
- local facet=$1
- local dev=$2
- local size=${3:-""}
-
- local rc=0
- do_facet $facet "test -b $dev" || rc=1
- if [[ "$size" ]]; then
- local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k \
- count=1 skip=$size 2>&1" |
- awk '($3 == "in") { print $1 }')
- [[ $in = "1+0" ]] || rc=1
- fi
- return $rc
-}
-
#
# Test 16 was to "verify that lustre will correct the mode of OBJECTS".
# But with new MDS stack we don't care about the mode of local objects
fi
mount_client $MOUNT || error "mount_client $MOUNT failed"
wait_osc_import_state mds ost FULL
- wait_osc_import_state client ost FULL
+ wait_osc_import_ready client ost
check_mount || error "check_mount failed"
pass
"MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
ps -ef | grep mount
fi
- stop_mds || error "stopping MDSes failed"
- stop_ost || error "stopping OSSes failed"
+ cleanup || error "cleanup failed with rc $?"
}
run_test 23a "interrupt client during recovery mount delay"
-umount_client $MOUNT
-cleanup_nocli
-
test_23b() { # was test_23
start_mds || error "MDS start failed"
start_ost || error "Unable to start OST1"
}
run_test 27b "Reacquire MGS lock after failover"
-test_28() {
+test_28A() { # was test_28
setup
TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
error "third set_conf_param_and_check client failed"
cleanup || error "cleanup failed with rc $?"
}
-run_test 28 "permanent parameter setting"
+run_test 28A "permanent parameter setting"
test_28a() { # LU-4221
[[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] ||
local node=$1
local all_removed=false
local i=0
+ local fstype=$(facet_fstype $SINGLEMDS)
+
+ [ $fstype == "zfs" ] && do_rpc_nodes $node "service zed stop"
while ((i < 20)); do
echo "Unloading modules on $node: Attempt $i"
- do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) &&
+ do_rpc_nodes $node $LUSTRE_RMMOD $fstype &&
all_removed=true
do_rpc_nodes $node check_mem_leak || return 1
if $all_removed; then
do_rpc_nodes $node load_modules
return 0
fi
+ if [ $fstype == "zfs" ]; then
+ do_rpc_nodes $node "$ZPOOL status -v"
+ fi
sleep 5
i=$((i + 1))
done
mdt_index=$($LFS getdirstripe -i $dir)
stripe_cnt=$($LFS getdirstripe -c $dir)
if [ $mdt_index = 0 -a $stripe_cnt -le 1 ]; then
- $LFS mv -M 1 $dir || {
+ $LFS migrate -m 1 $dir || {
popd
error_noexit "migrate MDT1 failed"
return 1
mdt_index=$($LFS getdirstripe -i $dir)
stripe_cnt=$($LFS getdirstripe -c $dir)
if [ $mdt_index = 1 -a $stripe_cnt -le 1 ]; then
- $LFS mv -M 0 $dir || {
+ $LFS migrate -m 0 $dir || {
popd
error_noexit "migrate MDT0 failed"
return 1
error_noexit "Unmounting the MDT2"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-mdt2"
+ fi
shall_cleanup_mdt1=false
fi
error_noexit "Unmounting the MDT"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-mdt1"
+ fi
shall_cleanup_mdt=false
$r $UMOUNT $tmp/mnt/ost || {
error_noexit "Unmounting the OST"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-ost1"
+ fi
shall_cleanup_ost=false
t32_reload_modules $node || {
--reformat $fs2mgsdev $fs2mgsvdev || error "add fs2mgs failed"
start $fs2mgs $fs2mgsdev $MGS_MOUNT_OPTS || error "start fs2mgs failed"
stop $fs2mgs -f || error "stop fs2mgs failed"
+ cleanup || error "cleanup failed with $?"
}
run_test 43b "parse nosquash_nids with commas in expr_list"
-umount_client $MOUNT
-cleanup_nocli
-
test_44() { # 16317
setup
check_mount || error "check_mount"
# wait until osts in sync
for (( i=2; i<=$OSTCOUNT; i++ )); do
wait_osc_import_state mds ost$i FULL
- wait_osc_import_state client ost$i FULL
+ wait_osc_import_ready client ost$i
done
#second client see all ost's
[ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
wait $PID || { RC1=$?; log "multiop return error "; }
- $LFS df &
+ $LFS df -l &
PID=$!
sleep 5
kill -s 0 $PID
setup
start_ost2 || error "Unable to start OST2"
wait_osc_import_state mds ost2 FULL
- wait_osc_import_state client ost2 FULL
+ wait_osc_import_ready client ost2
local PARAM="${FSNAME}-OST0001.osc.active"
done
echo
+ # sync all the data and make sure no pending data on the client,
+ # thus the SOM xattr would not be changed any more.
+ cancel_lru_locks osc
+
# backup files
echo backup files to $TMP/$tdir
local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
setup_noconfig
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
createmany -o $DIR/$tdir/$tfile-%d 100
- # make sure that OSTs do not cancel llog cookies before we unmount the MDS
-#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x601"
unlinkmany $DIR/$tdir/$tfile-%d 100
stop_mds || error "Unable to stop MDS"
local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1)
local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1)
+ # add EXCLUDE records to config log, they are not to be
+ # removed by lctl replace_nids
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n osc.$FSNAME-OST0000-osc-MDT0000.active" \
+ "$FSNAME-OST0000.osc.active" \
+ "0"
+
echo "replace_nids should fail if MDS, OSTs and clients are UP"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
error "replace_nids fail"
stop_mds || error "Unable to stop MDS"
fi
- setup_noconfig
+ start_mgsmds || error "start mgsmds failed"
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n osc.$FSNAME-OST0000-osc-MDT0000.active" \
+ "$FSNAME-OST0000.osc.active" \
+ "1"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "mount client failed"
+
check_mount || error "error after nid replace"
cleanup || error "cleanup failed"
reformat
local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM |
head -1)
echo "max_dirty_mb: $MAX_DIRTY_MB"
- local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB))
+ local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB - 10))
echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB"
do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB
wait_update $HOSTNAME "$LCTL get_param -n $MDMB_PARAM |
run_test 86 "Replacing mkfs.lustre -G option"
test_87() { #LU-6544
- [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] ||
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.51) ]] ||
{ skip "Need MDS version at least 2.9.51" && return; }
[[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
{ skip "ldiskfs only test" && return; }
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
error "Create file with 3 components failed"
$TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed"
i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed"
#shows that osp code is buggy
do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0
- cleanupall
+ stopall
}
run_test 106 "check osp llog processing when catalog is wrapped"
}
run_test 107 "Unknown config param should not fail target mounting"
+t_108_prep() {
+ local facet
+
+ $rcmd rm -rf $tmp > /dev/null 2>&1
+ $rcmd mkdir -p $tmp/{mnt,images} || error "failed to mkdir remotely"
+
+ for facet in $facets; do
+ [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+ $rcmd $ZPOOL -f export lustre-$facet > /dev/null 2>&1
+ $rcmd mkdir $tmp/mnt/$facet ||
+ error "failed to mkdir $tmp/mnt/$facet"
+ $rcmd dd if=/dev/zero of=$tmp/images/$facet \
+ seek=199 bs=1M count=1 ||
+ error "failed to create $tmp/images/$facet"
+ done
+}
+
+t_108_mkfs() {
+ local role=$1
+ local idx=$2
+ local bkfs=$3
+ local mgs=$4
+ local facet=${role}$((idx + 1))
+ local pool=""
+ [ $# -eq 5 ] && pool=$5
+
+ do_facet $SINGLEMDS $MKFS --fsname=lustre --$mgs \
+ --$role --index=$idx --replace --backfstype=$bkfs \
+ --device-size=200000 --reformat $pool $tmp/images/$facet ||
+ error "failed to mkfs for $facet"
+}
+
+t_108_check() {
+ echo "mounting client..."
+ mount -t lustre ${nid}:/lustre $MOUNT ||
+ error "failed to mount lustre"
+
+ echo "check list"
+ ls -l $MOUNT/local_dir || error "failed to list"
+
+ echo "check truncate && write"
+ echo "dummmmmmmmmmmmm" > $MOUNT/remote_dir/fsx.c ||
+ error "failed to tuncate & write"
+
+ echo "check create"
+ touch $MOUNT/foooo ||
+ error "failed to create"
+
+ echo "check read && write && append"
+ sha1sum $MOUNT/conf-sanity.sh |
+ awk '{ print $1 }' > $MOUNT/checksum.new ||
+ error "failed to read(1)"
+ sha1sum $MOUNT/remote_dir/unlinkmany.c |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(2)"
+ sha1sum $MOUNT/striped_dir/lockahead_test.o |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(3)"
+
+ echo "verify data"
+ diff $MOUNT/checksum.new $MOUNT/checksum.src ||
+ error "failed to verify data"
+
+ echo "done."
+}
+
+t_108_cleanup() {
+ trap 0
+ local facet
+
+ echo "cleanup..."
+ umount -f $MOUNT || error "failed to umount client"
+ for facet in $facets; do
+ $rcmd umount -f $tmp/mnt/$facet ||
+ error "failed to umount $facet"
+ if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+ $rcmd $ZPOOL export -f lustre-$facet ||
+ error "failed to export lustre-$facet"
+ fi
+ done
+
+ $rcmd rm -rf $tmp || error "failed to rm the dir $tmp"
+}
+
+test_108a() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing" && return
+
+ [ $(facet_fstype $SINGLEMDS) != "zfs" ] &&
+ skip "zfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 zfs mgs lustre-mdt1/mdt1
+ t_108_mkfs mdt 1 zfs mgsnode=$nid lustre-mdt2/mdt2
+ t_108_mkfs ost 0 zfs mgsnode=$nid lustre-ost1/ost1
+ t_108_mkfs ost 1 zfs mgsnode=$nid lustre-ost2/ost2
+
+ for facet in $facets; do
+ $rcmd zfs set mountpoint=$tmp/mnt/$facet canmount=on \
+ lustre-$facet/$facet ||
+ error "failed to zfs set for $facet (1)"
+ $rcmd zfs mount lustre-$facet/$facet ||
+ error "failed to local mount $facet"
+ $rcmd tar jxf $LUSTRE/tests/ldiskfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="trusted.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd zfs umount lustre-$facet/$facet ||
+ error "failed to local umount $facet"
+ $rcmd zfs set canmount=off lustre-$facet/$facet ||
+ error "failed to zfs set $facet (2)"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids lustre-MDT0000 $nid
+ $rcmd lctl replace_nids lustre-MDT0001 $nid
+ $rcmd lctl replace_nids lustre-OST0000 $nid
+ $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o abort_recov lustre-$facet/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ # ZFS backend can detect migration and trigger OI scrub automatically
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108a "migrate from ldiskfs to ZFS"
+
+test_108b() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing" && return
+
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local scrub_list="MDT0000 MDT0001 OST0000 OST0001"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 ldiskfs mgs
+ t_108_mkfs mdt 1 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 0 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 1 ldiskfs mgsnode=$nid
+
+ for facet in $facets; do
+ $rcmd mount -t ldiskfs -o loop $tmp/images/$facet \
+ $tmp/mnt/$facet ||
+ error "failed to local mount $facet"
+ $rcmd tar jxf $LUSTRE/tests/zfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="*.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd umount $tmp/mnt/$facet ||
+ error "failed to local umount $facet"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids lustre-MDT0000 $nid
+ $rcmd lctl replace_nids lustre-MDT0001 $nid
+ $rcmd lctl replace_nids lustre-OST0000 $nid
+ $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o loop,abort_recov $tmp/images/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ for facet in $scrub_list; do
+ $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub ||
+ error "failed to start OI scrub on $facet"
+ done
+
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108b "migrate from ZFS to ldiskfs"
+
+
+#
+# set number of permanent parameters
+#
+test_109_set_params() {
+ local fsname=$1
+
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "62"
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "63"
+ set_conf_param_and_check client \
+ "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "32"
+ set_conf_param_and_check client \
+ "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "64"
+ create_pool $fsname.pool1 || error "create pool failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+ do_facet mgs $LCTL pool_remove $fsname.pool1 OST0000 ||
+ error "pool_remove failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+}
+
+#
+# check permanent parameters
+#
+test_109_test_params() {
+ local fsname=$1
+
+ local atime_diff=$(do_facet mds $LCTL \
+ get_param -n mdd.$fsname-MDT0000.atime_diff)
+ [ $atime_diff == 63 ] || error "wrong mdd parameter after clear_conf"
+ local max_read_ahead_mb=$(do_facet client $LCTL \
+ get_param -n llite.$fsname*.max_read_ahead_mb)
+ [ $max_read_ahead_mb == 64 ] ||
+ error "wrong llite parameter after clear_conf"
+ local ost_in_pool=$(do_facet mds $LCTL pool_list $fsname.pool1 |
+ grep -v "^Pool:" | sed 's/_UUID//')
+ [ $ost_in_pool = "$fsname-OST0000" ] ||
+ error "wrong pool after clear_conf"
+}
+
+#
+# run lctl clear_conf, store CONFIGS before and after that
+#
+test_109_clear_conf()
+{
+ local clear_conf_arg=$1
+
+ local mgsdev
+ if ! combined_mgs_mds ; then
+ mgsdev=$MGSDEV
+ stop_mgs || error "stop_mgs failed"
+ start_mgs "-o nosvc" || error "start_mgs nosvc failed"
+ else
+ mgsdev=$(mdsdevname 1)
+ start_mdt 1 "-o nosvc" || error "start_mdt 1 nosvc failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf1; mkdir -p $TMP/${tdir}/conf1;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf1\\\" \
+ $mgsdev"
+
+ #
+ # the command being tested
+ #
+ do_facet mgs $LCTL clear_conf $clear_conf_arg ||
+ error "clear_conf failed"
+ if ! combined_mgs_mds ; then
+ stop_mgs || error "stop_mgs failed"
+ else
+ stop_mdt 1 || error "stop_mdt 1 failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf2; mkdir -p $TMP/${tdir}/conf2;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf2\\\" \
+ $mgsdev"
+}
+
+test_109_file_shortened() {
+ local file=$1
+ local sizes=($(do_facet mgs "stat -c %s " \
+ "$TMP/${tdir}/conf1/CONFIGS/$file" \
+ "$TMP/${tdir}/conf2/CONFIGS/$file"))
+ [ ${sizes[1]} -lt ${sizes[0]} ] && return 0
+ return 1
+}
+
+test_109a()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME
+ #
+ # make sure that all configs are cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client ||
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109a "test lctl clear_conf fsname"
+
+test_109b()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME-MDT0000
+ #
+ # make sure that only one config is cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client &&
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109b "test lctl clear_conf one config"
+
+cleanup_115()
+{
+ trap 0
+ stopall
+ rm -f $TMP/$tdir/lustre-mdt
+ formatall
+}
+
+test_115() {
+ IMAGESIZE=$((3072 << 30)) # 3072 GiB
+
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ stopall
+ # We need MDT size 3072GB, because it is smallest
+ # partition that can store 2B inodes
+ do_facet $SINGLEMDS "mkdir -p $TMP/$tdir"
+ local mdsimgname=$TMP/$tdir/lustre-mdt
+ do_facet $SINGLEMDS "rm -f $mdsimgname"
+ do_facet $SINGLEMDS "touch $mdsimgname"
+ trap cleanup_115 RETURN EXIT
+ do_facet $SINGLEMDS "$TRUNCATE $mdsimgname $IMAGESIZE" ||
+ { skip "Backend FS doesn't support sparse files"; return 0; }
+ local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
+ do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
+
+ local mds_opts="$(mkfs_opts mds1 ${mdsdev}) --device-size=$IMAGESIZE \
+ --mkfsoptions='-O lazy_itable_init,large_xattr,^resize_inode,meta_bg \
+ -i 1024'"
+ add mds1 $mds_opts --mgs --reformat $mdsdev ||
+ { skip_env "format large MDT failed"; return 0; }
+ add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
+ --reformat $(ostdevname 1) $(ostvdevname 1)
+
+ start $SINGLEMDS ${mdsdev} $MDS_MOUNT_OPTS || error "start MDS failed"
+ start_ost || error "start OSS failed"
+ mount_client $MOUNT || error "mount client failed"
+
+ mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir fail"
+ for goal in $(do_facet $SINGLEMDS "ls /sys/fs/ldiskfs/*/inode_goal"); do
+ do_facet $SINGLEMDS "echo 2147483947 >> $goal; grep . $goal"
+ done
+
+ touch $DIR/$tdir/$tfile
+
+ # Add > 5k bytes to xattr
+ for i in {1..30}; do
+ ln $DIR/$tdir/$tfile $DIR/$tdir/$(printf "link%0250d" $i) ||
+ error "Can't make link"
+ done
+
+ sync; sleep 5; sync
+
+ local inode_num=$(do_facet $SINGLEMDS \
+ "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsimgname" |
+ awk '/link =/ { print $4 }' |
+ sed -e 's/>//' -e 's/<//' -e 's/\"//')
+ echo "inode num: $inode_num"
+ [ $inode_num -ge 2147483947 ] || error "inode $inode_num too small"
+ do_facet $SINGLEMDS "losetup -d $mdsdev"
+ cleanup_115
+}
+run_test 115 "Access large xattr with inodes number over 2TB"
+
+test_116() {
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.59) ] &&
+ skip "Need server version at least 2.10.59" && return
+
+ do_facet $SINGLEMDS which mkfs.xfs || {
+ skip_env "No mkfs.xfs installed"
+ return
+ }
+
+ stopall
+ load_modules
+
+ local tmpmnt=$TMP/$tdir
+ local mdtimg=$tfile-mdt0
+
+ do_facet $SINGLEMDS mkdir -p $tmpmnt
+ stack_trap "do_facet $SINGLEMDS rmdir $tmpmnt" EXIT
+
+ do_facet $SINGLEMDS touch $TMP/$mdtimg
+ stack_trap "do_facet $SINGLEMDS rm -f $TMP/$mdtimg" EXIT
+ do_facet $SINGLEMDS mkfs -t xfs -d file,size=1t,name=$TMP/$mdtimg ||
+ error "mkfs temporary xfs image"
+
+ do_facet $SINGLEMDS mount $TMP/$mdtimg $tmpmnt ||
+ error "mount temporary xfs image"
+ stack_trap "do_facet $SINGLEMDS umount $tmpmnt" EXIT
+ local old_mdssize=$MDSSIZE
+ local old_mdsisize=$MDSISIZE
+
+ MDSSIZE=$((17 * 1024 * 1024 * 1024)) # 17T MDT
+ MDSISIZE=$((16 << 20))
+ local opts17t="$(mkfs_opts $SINGLEMDS)"
+
+ MDSSIZE=$old_mdssize
+ MDSISIZE=$old_mdsisize
+ do_facet $SINGLEMDS $MKFS $opts17t $tmpmnt/$mdtimg ||
+ error "failed to mkfs for $tmpmnt/$mdtimg"
+
+ do_facet $SINGLEMDS $TUNE2FS -l $tmpmnt/$mdtimg |
+ grep -qw 'features.*extent' || error "extent should be enabled"
+}
+run_test 116 "big size MDT support"
+
+test_122() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] ||
+ { skip "Need OST version at least 2.11.53" && return 0; }
+
+
+ reformat
+ LOAD_MODULES_REMOTE=true load_modules
+#define OBD_FAIL_OFD_SET_OID 0x1e0
+ do_facet ost1 $LCTL set_param fail_loc=0x00001e0
+
+ setupall
+ $LFS mkdir -i1 -c1 $DIR/$tdir
+ $LFS setstripe -i0 -c1 $DIR/$tdir
+ do_facet ost1 $LCTL set_param fail_loc=0
+ createmany -o $DIR/$tdir/file_ 1000 ||
+ error "Fail to create a new sequence"
+
+ reformat
+}
+run_test 122 "Check OST sequence update"
+
if ! combined_mgs_mds ; then
stop mgs
fi
reformat
complete $SECONDS
+check_and_cleanup_lustre
exit_status