ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-is_sles11() # LU-2181
-{
- if [ -r /etc/SuSE-release ]
- then
- local vers=$(grep VERSION /etc/SuSE-release | awk '{print $3}')
- local patchlev=$(grep PATCHLEVEL /etc/SuSE-release |
- awk '{ print $3 }')
- if [ $vers -eq 11 ] && [ $patchlev -eq 2 ]
- then
- return 0
- fi
- fi
- return 1
-}
-
if [ "$FAILURE_MODE" = "HARD" ]; then
CONFIG_EXCEPTIONS="24a " &&
echo "Except the tests: $CONFIG_EXCEPTIONS for " \
}
start_mgs () {
- echo "start mgs"
- start mgs $(mgsdevname) $MGS_MOUNT_OPTS
+ echo "start mgs service on $(facet_active_host mgs)"
+ start mgs $(mgsdevname) $MGS_MOUNT_OPTS $@
}
start_mdt() {
}
run_test 27b "Reacquire MGS lock after failover"
-test_28() {
+test_28A() { # was test_28
setup
TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
error "third set_conf_param_and_check client failed"
cleanup || error "cleanup failed with rc $?"
}
-run_test 28 "permanent parameter setting"
+run_test 28A "permanent parameter setting"
test_28a() { # LU-4221
[[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] ||
mdt_index=$($LFS getdirstripe -i $dir)
stripe_cnt=$($LFS getdirstripe -c $dir)
if [ $mdt_index = 0 -a $stripe_cnt -le 1 ]; then
- $LFS mv -M 1 $dir || {
+ $LFS migrate -m 1 $dir || {
popd
error_noexit "migrate MDT1 failed"
return 1
mdt_index=$($LFS getdirstripe -i $dir)
stripe_cnt=$($LFS getdirstripe -c $dir)
if [ $mdt_index = 1 -a $stripe_cnt -le 1 ]; then
- $LFS mv -M 0 $dir || {
+ $LFS migrate -m 0 $dir || {
popd
error_noexit "migrate MDT0 failed"
return 1
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
error "Create file with 3 components failed"
$TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed"
i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed"
}
run_test 107 "Unknown config param should not fail target mounting"
+t_108_prep() {
+ local facet
+
+ $rcmd rm -rf $tmp > /dev/null 2>&1
+ $rcmd mkdir -p $tmp/{mnt,images} || error "failed to mkdir remotely"
+
+ for facet in $facets; do
+ [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+ $rcmd $ZPOOL -f export lustre-$facet > /dev/null 2>&1
+ $rcmd mkdir $tmp/mnt/$facet ||
+ error "failed to mkdir $tmp/mnt/$facet"
+ $rcmd dd if=/dev/zero of=$tmp/images/$facet \
+ seek=199 bs=1M count=1 ||
+ error "failed to create $tmp/images/$facet"
+ done
+}
+
+t_108_mkfs() {
+ local role=$1
+ local idx=$2
+ local bkfs=$3
+ local mgs=$4
+ local facet=${role}$((idx + 1))
+ local pool=""
+ [ $# -eq 5 ] && pool=$5
+
+ do_facet $SINGLEMDS $MKFS --fsname=lustre --$mgs \
+ --$role --index=$idx --replace --backfstype=$bkfs \
+ --device-size=200000 --reformat $pool $tmp/images/$facet ||
+ error "failed to mkfs for $facet"
+}
+
+t_108_check() {
+ echo "mounting client..."
+ mount -t lustre ${nid}:/lustre $MOUNT ||
+ error "failed to mount lustre"
+
+ echo "check list"
+ ls -l $MOUNT/local_dir || error "failed to list"
+
+ echo "check truncate && write"
+ echo "dummmmmmmmmmmmm" > $MOUNT/remote_dir/fsx.c ||
+ error "failed to tuncate & write"
+
+ echo "check create"
+ touch $MOUNT/foooo ||
+ error "failed to create"
+
+ echo "check read && write && append"
+ sha1sum $MOUNT/conf-sanity.sh |
+ awk '{ print $1 }' > $MOUNT/checksum.new ||
+ error "failed to read(1)"
+ sha1sum $MOUNT/remote_dir/unlinkmany.c |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(2)"
+ sha1sum $MOUNT/striped_dir/lockahead_test.o |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(3)"
+
+ echo "verify data"
+ diff $MOUNT/checksum.new $MOUNT/checksum.src ||
+ error "failed to verify data"
+
+ echo "done."
+}
+
+t_108_cleanup() {
+ trap 0
+ local facet
+
+ echo "cleanup..."
+ umount -f $MOUNT || error "failed to umount client"
+ for facet in $facets; do
+ $rcmd umount -f $tmp/mnt/$facet ||
+ error "failed to umount $facet"
+ if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+ $rcmd $ZPOOL export -f lustre-$facet ||
+ error "failed to export lustre-$facet"
+ fi
+ done
+
+ $rcmd rm -rf $tmp || error "failed to rm the dir $tmp"
+}
+
+test_108a() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing" && return
+
+ [ $(facet_fstype $SINGLEMDS) != "zfs" ] &&
+ skip "zfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 zfs mgs lustre-mdt1/mdt1
+ t_108_mkfs mdt 1 zfs mgsnode=$nid lustre-mdt2/mdt2
+ t_108_mkfs ost 0 zfs mgsnode=$nid lustre-ost1/ost1
+ t_108_mkfs ost 1 zfs mgsnode=$nid lustre-ost2/ost2
+
+ for facet in $facets; do
+ $rcmd zfs set mountpoint=$tmp/mnt/$facet canmount=on \
+ lustre-$facet/$facet ||
+ error "failed to zfs set for $facet (1)"
+ $rcmd zfs mount lustre-$facet/$facet ||
+ error "failed to local mount $facet"
+ $rcmd tar jxf $LUSTRE/tests/ldiskfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="trusted.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd zfs umount lustre-$facet/$facet ||
+ error "failed to local umount $facet"
+ $rcmd zfs set canmount=off lustre-$facet/$facet ||
+ error "failed to zfs set $facet (2)"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids lustre-MDT0000 $nid
+ $rcmd lctl replace_nids lustre-MDT0001 $nid
+ $rcmd lctl replace_nids lustre-OST0000 $nid
+ $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o abort_recov lustre-$facet/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ # ZFS backend can detect migration and trigger OI scrub automatically
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108a "migrate from ldiskfs to ZFS"
+
+test_108b() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing" && return
+
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local scrub_list="MDT0000 MDT0001 OST0000 OST0001"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 ldiskfs mgs
+ t_108_mkfs mdt 1 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 0 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 1 ldiskfs mgsnode=$nid
+
+ for facet in $facets; do
+ $rcmd mount -t ldiskfs -o loop $tmp/images/$facet \
+ $tmp/mnt/$facet ||
+ error "failed to local mount $facet"
+ $rcmd tar jxf $LUSTRE/tests/zfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="*.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd umount $tmp/mnt/$facet ||
+ error "failed to local umount $facet"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids lustre-MDT0000 $nid
+ $rcmd lctl replace_nids lustre-MDT0001 $nid
+ $rcmd lctl replace_nids lustre-OST0000 $nid
+ $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o loop,abort_recov $tmp/images/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ for facet in $scrub_list; do
+ $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub ||
+ error "failed to start OI scrub on $facet"
+ done
+
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108b "migrate from ZFS to ldiskfs"
+
+
+#
+# set number of permanent parameters
+#
+test_109_set_params() {
+ local fsname=$1
+
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "62"
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "63"
+ set_conf_param_and_check client \
+ "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "32"
+ set_conf_param_and_check client \
+ "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "64"
+ create_pool $fsname.pool1 || error "create pool failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+ do_facet mgs $LCTL pool_remove $fsname.pool1 OST0000 ||
+ error "pool_remove failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+}
+
+#
+# check permanent parameters
+#
+test_109_test_params() {
+ local fsname=$1
+
+ local atime_diff=$(do_facet mds $LCTL \
+ get_param -n mdd.$fsname-MDT0000.atime_diff)
+ [ $atime_diff == 63 ] || error "wrong mdd parameter after clear_conf"
+ local max_read_ahead_mb=$(do_facet client $LCTL \
+ get_param -n llite.$fsname*.max_read_ahead_mb)
+ [ $max_read_ahead_mb == 64 ] ||
+ error "wrong llite parameter after clear_conf"
+ local ost_in_pool=$(do_facet mds $LCTL pool_list $fsname.pool1 |
+ grep -v "^Pool:" | sed 's/_UUID//')
+ [ $ost_in_pool = "$fsname-OST0000" ] ||
+ error "wrong pool after clear_conf"
+}
+
+#
+# run lctl clear_conf, store CONFIGS before and after that
+#
+test_109_clear_conf()
+{
+ local clear_conf_arg=$1
+
+ local mgsdev
+ if ! combined_mgs_mds ; then
+ mgsdev=$MGSDEV
+ stop_mgs || error "stop_mgs failed"
+ start_mgs "-o nosvc" || error "start_mgs nosvc failed"
+ else
+ mgsdev=$(mdsdevname 1)
+ start_mdt 1 "-o nosvc" || error "start_mdt 1 nosvc failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf1; mkdir -p $TMP/${tdir}/conf1;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf1\\\" \
+ $mgsdev"
+
+ #
+ # the command being tested
+ #
+ do_facet mgs $LCTL clear_conf $clear_conf_arg ||
+ error "clear_conf failed"
+ if ! combined_mgs_mds ; then
+ stop_mgs || error "stop_mgs failed"
+ else
+ stop_mdt 1 || error "stop_mdt 1 failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf2; mkdir -p $TMP/${tdir}/conf2;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf2\\\" \
+ $mgsdev"
+}
+
+test_109_file_shortened() {
+ local file=$1
+ local sizes=($(do_facet mgs "stat -c %s " \
+ "$TMP/${tdir}/conf1/CONFIGS/$file" \
+ "$TMP/${tdir}/conf2/CONFIGS/$file"))
+ [ ${sizes[1]} -lt ${sizes[0]} ] && return 0
+ return 1
+}
+
+test_109a()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME
+ #
+ # make sure that all configs are cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client ||
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109a "test lctl clear_conf fsname"
+
+test_109b()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME-MDT0000
+ #
+ # make sure that only one config is cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client &&
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109b "test lctl clear_conf one config"
+
+cleanup_115()
+{
+ trap 0
+ stopall
+ rm -f $TMP/$tdir/lustre-mdt
+ formatall
+}
+
+test_115() {
+ IMAGESIZE=$((3072 << 30)) # 3072 GiB
+
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ stopall
+ # We need MDT size 3072GB, because it is smallest
+ # partition that can store 2B inodes
+ do_facet $SINGLEMDS "mkdir -p $TMP/$tdir"
+ local mdsimgname=$TMP/$tdir/lustre-mdt
+ do_facet $SINGLEMDS "rm -f $mdsimgname"
+ do_facet $SINGLEMDS "touch $mdsimgname"
+ trap cleanup_115 RETURN EXIT
+ do_facet $SINGLEMDS "$TRUNCATE $mdsimgname $IMAGESIZE" ||
+ { skip "Backend FS doesn't support sparse files"; return 0; }
+ local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
+ do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
+
+ local mds_opts="$(mkfs_opts mds1 ${mdsdev}) --device-size=$IMAGESIZE \
+ --mkfsoptions='-O lazy_itable_init,large_xattr,^resize_inode,meta_bg \
+ -i 1024'"
+ add mds1 $mds_opts --mgs --reformat $mdsdev ||
+ { skip_env "format large MDT failed"; return 0; }
+ add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
+ --reformat $(ostdevname 1) $(ostvdevname 1)
+
+ start $SINGLEMDS ${mdsdev} $MDS_MOUNT_OPTS || error "start MDS failed"
+ start_ost || error "start OSS failed"
+ mount_client $MOUNT || error "mount client failed"
+
+ mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir fail"
+ for goal in $(do_facet $SINGLEMDS "ls /sys/fs/ldiskfs/*/inode_goal"); do
+ do_facet $SINGLEMDS "echo 2147483947 >> $goal; grep . $goal"
+ done
+
+ touch $DIR/$tdir/$tfile
+
+ # Add > 5k bytes to xattr
+ for i in {1..30}; do
+ ln $DIR/$tdir/$tfile $DIR/$tdir/$(printf "link%0250d" $i) ||
+ error "Can't make link"
+ done
+
+ sync; sleep 5; sync
+
+ local inode_num=$(do_facet $SINGLEMDS \
+ "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsimgname" |
+ awk '/link =/ { print $4 }' |
+ sed -e 's/>//' -e 's/<//' -e 's/\"//')
+ echo "inode num: $inode_num"
+ [ $inode_num -ge 2147483947 ] || error "inode $inode_num too small"
+ do_facet $SINGLEMDS "losetup -d $mdsdev"
+ cleanup_115
+}
+run_test 115 "Access large xattr with inodes number over 2TB"
+
+test_116() {
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ stopall
+ load_modules
+
+ local tmpmnt=/mnt/$tdir
+ local mdtimg=$tfile-mdt0
+
+ do_facet $SINGLEMDS mkdir -p $tmpmnt
+ stack_trap "do_facet $SINGLEMDS rmdir $tmpmnt" EXIT
+
+ do_facet $SINGLEMDS touch $TMP/$mdtimg
+ stack_trap "do_facet $SINGLEMDS rm -f $TMP/$mdtimg" EXIT
+ do_facet $SINGLEMDS mkfs -t xfs -d file,size=1t,name=$TMP/$mdtimg ||
+ error "mkfs temporary xfs image"
+
+ do_facet $SINGLEMDS mount $TMP/$mdtimg $tmpmnt ||
+ error "mount temporary xfs image"
+ stack_trap "do_facet $SINGLEMDS umount $tmpmnt" EXIT
+ local old_mdssize=$MDSSIZE
+ local old_mdsisize=$MDSISIZE
+
+ MDSSIZE=$((17 * 1024 * 1024 * 1024)) # 17T MDT
+ MDSISIZE=$((16 << 20))
+ local opts17t="$(mkfs_opts $SINGLEMDS)"
+
+ MDSSIZE=$old_mdssize
+ MDSISIZE=$old_mdsisize
+ do_facet $SINGLEMDS $MKFS $opts17t $tmpmnt/$mdtimg ||
+ error "failed to mkfs for $tmpmnt/$mdtimg"
+
+ do_facet $SINGLEMDS $TUNE2FS -l $tmpmnt/$mdtimg |
+ grep -qw 'features.*extent' || error "extent should be enabled"
+}
+run_test 116 "big size MDT support"
+
if ! combined_mgs_mds ; then
stop mgs
fi