ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-8972
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101"
+# bug number for skipped test:
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-if [ "$FAILURE_MODE" = "HARD" ]; then
- CONFIG_EXCEPTIONS="24a " &&
- echo "Except the tests: $CONFIG_EXCEPTIONS for " \
- "FAILURE_MODE=$FAILURE_MODE, b=23573" &&
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
-fi
-
# bug number for skipped test:
# a tool to create lustre filesystem images
ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
STORED_MDSSIZE=$MDSSIZE
STORED_OSTSIZE=$OSTSIZE
MDSSIZE=200000
+[ $(facet_fstype $SINGLEMDS) = "zfs" ] && MDSSIZE=400000
OSTSIZE=200000
+[ $(facet_fstype ost1) = "zfs" ] && OSTSIZE=400000
fs2mds_HOST=$mds_HOST
fs2ost_HOST=$ost_HOST
[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
# bug number for skipped test:
ALWAYS_EXCEPT="$ALWAYS_EXCEPT"
+# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
init_logging
}
start_mgs () {
- echo "start mgs"
- start mgs $(mgsdevname) $MGS_MOUNT_OPTS
+ echo "start mgs service on $(facet_active_host mgs)"
+ start mgs $(mgsdevname) $MGS_MOUNT_OPTS $@
}
start_mdt() {
}
run_test 9 "test ptldebug and subsystem for mkfs"
-is_blkdev () {
- local facet=$1
- local dev=$2
- local size=${3:-""}
-
- local rc=0
- do_facet $facet "test -b $dev" || rc=1
- if [[ "$size" ]]; then
- local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k \
- count=1 skip=$size 2>&1" |
- awk '($3 == "in") { print $1 }')
- [[ $in = "1+0" ]] || rc=1
- fi
- return $rc
-}
-
#
# Test 16 was to "verify that lustre will correct the mode of OBJECTS".
# But with new MDS stack we don't care about the mode of local objects
fi
mount_client $MOUNT || error "mount_client $MOUNT failed"
wait_osc_import_state mds ost FULL
- wait_osc_import_state client ost FULL
+ wait_osc_import_ready client ost
check_mount || error "check_mount failed"
pass
"MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
ps -ef | grep mount
fi
- stop_mds || error "stopping MDSes failed"
- stop_ost || error "stopping OSSes failed"
+ cleanup || error "cleanup failed with rc $?"
}
run_test 23a "interrupt client during recovery mount delay"
-umount_client $MOUNT
-cleanup_nocli
-
test_23b() { # was test_23
start_mds || error "MDS start failed"
start_ost || error "Unable to start OST1"
}
run_test 27b "Reacquire MGS lock after failover"
-test_28() {
+test_28A() { # was test_28
setup
TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
error "third set_conf_param_and_check client failed"
cleanup || error "cleanup failed with rc $?"
}
-run_test 28 "permanent parameter setting"
+run_test 28A "permanent parameter setting"
test_28a() { # LU-4221
[[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] ||
local node=$1
local all_removed=false
local i=0
+ local fstype=$(facet_fstype $SINGLEMDS)
+
+ [ $fstype == "zfs" ] && do_rpc_nodes $node "service zed stop"
while ((i < 20)); do
echo "Unloading modules on $node: Attempt $i"
- do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) &&
+ do_rpc_nodes $node $LUSTRE_RMMOD $fstype &&
all_removed=true
do_rpc_nodes $node check_mem_leak || return 1
if $all_removed; then
do_rpc_nodes $node load_modules
return 0
fi
+ if [ $fstype == "zfs" ]; then
+ do_rpc_nodes $node "$ZPOOL status -v"
+ fi
sleep 5
i=$((i + 1))
done
error_noexit "Unmounting the MDT2"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-mdt2"
+ fi
shall_cleanup_mdt1=false
fi
error_noexit "Unmounting the MDT"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-mdt1"
+ fi
shall_cleanup_mdt=false
$r $UMOUNT $tmp/mnt/ost || {
error_noexit "Unmounting the OST"
return 1
}
+ if [[ $fstype == zfs ]]; then
+ $r "$ZPOOL export t32fs-ost1"
+ fi
shall_cleanup_ost=false
t32_reload_modules $node || {
--reformat $fs2mgsdev $fs2mgsvdev || error "add fs2mgs failed"
start $fs2mgs $fs2mgsdev $MGS_MOUNT_OPTS || error "start fs2mgs failed"
stop $fs2mgs -f || error "stop fs2mgs failed"
+ cleanup || error "cleanup failed with $?"
}
run_test 43b "parse nosquash_nids with commas in expr_list"
-umount_client $MOUNT
-cleanup_nocli
-
test_44() { # 16317
setup
check_mount || error "check_mount"
# wait until osts in sync
for (( i=2; i<=$OSTCOUNT; i++ )); do
wait_osc_import_state mds ost$i FULL
- wait_osc_import_state client ost$i FULL
+ wait_osc_import_ready client ost$i
done
#second client see all ost's
[ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
wait $PID || { RC1=$?; log "multiop return error "; }
- $LFS df &
+ $LFS df -l &
PID=$!
sleep 5
kill -s 0 $PID
setup
start_ost2 || error "Unable to start OST2"
wait_osc_import_state mds ost2 FULL
- wait_osc_import_state client ost2 FULL
+ wait_osc_import_ready client ost2
local PARAM="${FSNAME}-OST0001.osc.active"
done
echo
+ # sync all the data and make sure no pending data on the client,
+ # thus the SOM xattr would not be changed any more.
+ cancel_lru_locks osc
+
# backup files
echo backup files to $TMP/$tdir
local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
setup_noconfig
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
createmany -o $DIR/$tdir/$tfile-%d 100
- # make sure that OSTs do not cancel llog cookies before we unmount the MDS
-#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x601"
unlinkmany $DIR/$tdir/$tfile-%d 100
stop_mds || error "Unable to stop MDS"
local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1)
local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1)
+ # add EXCLUDE records to config log, they are not to be
+ # removed by lctl replace_nids
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n osc.$FSNAME-OST0000-osc-MDT0000.active" \
+ "$FSNAME-OST0000.osc.active" \
+ "0"
+
echo "replace_nids should fail if MDS, OSTs and clients are UP"
do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
error "replace_nids fail"
stop_mds || error "Unable to stop MDS"
fi
- setup_noconfig
+ start_mgsmds || error "start mgsmds failed"
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n osc.$FSNAME-OST0000-osc-MDT0000.active" \
+ "$FSNAME-OST0000.osc.active" \
+ "1"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "mount client failed"
+
check_mount || error "error after nid replace"
cleanup || error "cleanup failed"
reformat
local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM |
head -1)
echo "max_dirty_mb: $MAX_DIRTY_MB"
- local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB))
+ local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB - 10))
echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB"
do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB
wait_update $HOSTNAME "$LCTL get_param -n $MDMB_PARAM |
run_test 86 "Replacing mkfs.lustre -G option"
test_87() { #LU-6544
- [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] ||
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.51) ]] ||
{ skip "Need MDS version at least 2.9.51" && return; }
[[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
{ skip "ldiskfs only test" && return; }
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
error "Create file with 3 components failed"
$TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed"
i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed"
#shows that osp code is buggy
do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0
- cleanupall
+ stopall
}
run_test 106 "check osp llog processing when catalog is wrapped"
[ $(facet_fstype $SINGLEMDS) != "zfs" ] &&
skip "zfs only test" && return
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
stopall
load_modules
[ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
skip "ldiskfs only test" && return
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58" && return
+
stopall
load_modules
}
run_test 108b "migrate from ZFS to ldiskfs"
+
+#
+# set number of permanent parameters
+#
+test_109_set_params() {
+ local fsname=$1
+
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "62"
+ set_conf_param_and_check mds \
+ "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "63"
+ set_conf_param_and_check client \
+ "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "32"
+ set_conf_param_and_check client \
+ "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "64"
+ create_pool $fsname.pool1 || error "create pool failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+ do_facet mgs $LCTL pool_remove $fsname.pool1 OST0000 ||
+ error "pool_remove failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+}
+
+#
+# check permanent parameters
+#
+test_109_test_params() {
+ local fsname=$1
+
+ local atime_diff=$(do_facet mds $LCTL \
+ get_param -n mdd.$fsname-MDT0000.atime_diff)
+ [ $atime_diff == 63 ] || error "wrong mdd parameter after clear_conf"
+ local max_read_ahead_mb=$(do_facet client $LCTL \
+ get_param -n llite.$fsname*.max_read_ahead_mb)
+ [ $max_read_ahead_mb == 64 ] ||
+ error "wrong llite parameter after clear_conf"
+ local ost_in_pool=$(do_facet mds $LCTL pool_list $fsname.pool1 |
+ grep -v "^Pool:" | sed 's/_UUID//')
+ [ $ost_in_pool = "$fsname-OST0000" ] ||
+ error "wrong pool after clear_conf"
+}
+
+#
+# run lctl clear_conf, store CONFIGS before and after that
+#
+test_109_clear_conf()
+{
+ local clear_conf_arg=$1
+
+ local mgsdev
+ if ! combined_mgs_mds ; then
+ mgsdev=$MGSDEV
+ stop_mgs || error "stop_mgs failed"
+ start_mgs "-o nosvc" || error "start_mgs nosvc failed"
+ else
+ mgsdev=$(mdsdevname 1)
+ start_mdt 1 "-o nosvc" || error "start_mdt 1 nosvc failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf1; mkdir -p $TMP/${tdir}/conf1;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf1\\\" \
+ $mgsdev"
+
+ #
+ # the command being tested
+ #
+ do_facet mgs $LCTL clear_conf $clear_conf_arg ||
+ error "clear_conf failed"
+ if ! combined_mgs_mds ; then
+ stop_mgs || error "stop_mgs failed"
+ else
+ stop_mdt 1 || error "stop_mdt 1 failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf2; mkdir -p $TMP/${tdir}/conf2;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf2\\\" \
+ $mgsdev"
+}
+
+test_109_file_shortened() {
+ local file=$1
+ local sizes=($(do_facet mgs "stat -c %s " \
+ "$TMP/${tdir}/conf1/CONFIGS/$file" \
+ "$TMP/${tdir}/conf2/CONFIGS/$file"))
+ [ ${sizes[1]} -lt ${sizes[0]} ] && return 0
+ return 1
+}
+
+test_109a()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME
+ #
+ # make sure that all configs are cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client ||
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109a "test lctl clear_conf fsname"
+
+test_109b()
+{
+ [ "$(facet_fstype mgs)" == "zfs" ] &&
+ skip "LU-8727: no implementation for ZFS" && return
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME-MDT0000
+ #
+ # make sure that only one config is cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client &&
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109b "test lctl clear_conf one config"
+
cleanup_115()
{
trap 0
}
run_test 115 "Access large xattr with inodes number over 2TB"
+test_116() {
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.59) ] &&
+ skip "Need server version at least 2.10.59" && return
+
+ do_facet $SINGLEMDS which mkfs.xfs || {
+ skip_env "No mkfs.xfs installed"
+ return
+ }
+
+ stopall
+ load_modules
+
+ local tmpmnt=$TMP/$tdir
+ local mdtimg=$tfile-mdt0
+
+ do_facet $SINGLEMDS mkdir -p $tmpmnt
+ stack_trap "do_facet $SINGLEMDS rmdir $tmpmnt" EXIT
+
+ do_facet $SINGLEMDS touch $TMP/$mdtimg
+ stack_trap "do_facet $SINGLEMDS rm -f $TMP/$mdtimg" EXIT
+ do_facet $SINGLEMDS mkfs -t xfs -d file,size=1t,name=$TMP/$mdtimg ||
+ error "mkfs temporary xfs image"
+
+ do_facet $SINGLEMDS mount $TMP/$mdtimg $tmpmnt ||
+ error "mount temporary xfs image"
+ stack_trap "do_facet $SINGLEMDS umount $tmpmnt" EXIT
+ local old_mdssize=$MDSSIZE
+ local old_mdsisize=$MDSISIZE
+
+ MDSSIZE=$((17 * 1024 * 1024 * 1024)) # 17T MDT
+ MDSISIZE=$((16 << 20))
+ local opts17t="$(mkfs_opts $SINGLEMDS)"
+
+ MDSSIZE=$old_mdssize
+ MDSISIZE=$old_mdsisize
+ do_facet $SINGLEMDS $MKFS $opts17t $tmpmnt/$mdtimg ||
+ error "failed to mkfs for $tmpmnt/$mdtimg"
+
+ do_facet $SINGLEMDS $TUNE2FS -l $tmpmnt/$mdtimg |
+ grep -qw 'features.*extent' || error "extent should be enabled"
+}
+run_test 116 "big size MDT support"
+
+test_122() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] ||
+ { skip "Need OST version at least 2.11.53" && return 0; }
+
+
+ reformat
+ LOAD_MODULES_REMOTE=true load_modules
+#define OBD_FAIL_OFD_SET_OID 0x1e0
+ do_facet ost1 $LCTL set_param fail_loc=0x00001e0
+
+ setupall
+ $LFS mkdir -i1 -c1 $DIR/$tdir
+ $LFS setstripe -i0 -c1 $DIR/$tdir
+ do_facet ost1 $LCTL set_param fail_loc=0
+ createmany -o $DIR/$tdir/file_ 1000 ||
+ error "Fail to create a new sequence"
+
+ reformat
+}
+run_test 122 "Check OST sequence update"
+
if ! combined_mgs_mds ; then
stop mgs
fi
reformat
complete $SECONDS
+check_and_cleanup_lustre
exit_status