ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-7428
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 84"
+# bug number for skipped test: LU-8972
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
is_sles11() # LU-2181
test_5d() {
grep " $MOUNT " /etc/mtab &&
- error false "unexpected entry in mtab before mount" && return 10
+ error "unexpected entry in mtab before mount"
start_ost || error "OST start failed"
start_mds || error "MDS start failed"
- stop_ost || error "Unable to stop OST1"
+ stop_ost -f || error "Unable to stop OST1"
mount_client $MOUNT || error "mount_client $MOUNT failed"
umount_client $MOUNT -f || error "umount_client $MOUNT failed"
cleanup_nocli || error "cleanup_nocli failed with $?"
- grep " $MOUNT " /etc/mtab &&
+ ! grep " $MOUNT " /etc/mtab ||
error "$MOUNT entry in mtab after unmount"
- pass
}
run_test 5d "mount with ost down"
}
run_test 5f "mds down, cleanup after failed mount (bug 2712)"
+test_5g() {
+ modprobe lustre
+ [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] &&
+ { skip "automount of debugfs missing before 2.9.53" && return 0; }
+ umount /sys/kernel/debug
+ $LCTL get_param -n devices | egrep -v "error" && \
+ error "lctl can't access debugfs data"
+ grep " debugfs " /etc/mtab || error "debugfs failed to remount"
+}
+run_test 5g "handle missing debugfs"
+
test_6() {
setup
manual_umount_client
test_19b() {
start_ost || error "Unable to start OST1"
- stop_ost || error "Unable to stop OST1"
+ stop_ost -f || error "Unable to stop OST1"
}
run_test 19b "start/stop OSTs without MDS"
destroy_zpool $facet $poolname
done
fi
+ combined_mgs_mds || start_mgs || rc=$?
return $rc
}
local mdt2_is_available=false
local node=$(facet_active_host $SINGLEMDS)
local r="do_node $node"
- local node2=$(facet_active_host mds2)
local tmp=$TMP/t32
local img_commit
local img_kernel
local stripe_count
local dir
+ combined_mgs_mds || stop_mgs || error "Unable to stop MGS"
trap 'trap - RETURN; t32_test_cleanup' RETURN
load_modules
! $mdt2_is_available || poolname_list+=" t32fs-mdt2"
for poolname in $poolname_list; do
- $r "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $r "modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f -d $tmp $poolname"
done
mkfsoptions="--mkfsoptions=\\\"-J size=8\\\""
fi
- add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
+ add $SINGLEMDS $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
$mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || {
error_noexit "Mkfs new MDT failed"
return 1
}
- [[ $(facet_fstype mds1) != zfs ]] || import_zpool fs2mds
+ [[ $(facet_fstype mds1) != zfs ]] || import_zpool mds1
$r $TUNEFS --dryrun $fs2mdsdev || {
error_noexit "tunefs.lustre before mounting the MDT"
if [[ $fstype == zfs ]]; then
local poolname=t32fs-mdt1
- $r "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $r "modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f -d $tmp $poolname"
fi
$server_version -lt $(version_code 2.5.11) ]] ||
{ skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; }
+ # ensure mds1 ost1 have been created even if running sub-test standalone
cleanup
+ setup
+ cleanup || error "cleanup failed"
+
+ # using directly mount command instead of start() function to avoid
+ # any side effect of // with others/externals tools/features
+ # ("zpool import", ...)
+
# MDT concurrent start
LOAD_MODULES_REMOTE=true load_modules
do_facet $SINGLEMDS "lsmod | grep -q libcfs" ||
error "MDT concurrent start: libcfs module not loaded"
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
#define OBD_FAIL_TGT_MOUNT_RACE 0x716
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x716"
- start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
+ do_facet mds1 "$LCTL set_param fail_loc=0x80000716"
+
+ do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts &
local pid=$!
- start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x0"
- local pid2=$!
- wait $pid2
+
+ do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts
local rc2=$?
wait $pid
local rc=$?
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then
echo "1st MDT start succeed"
echo "2nd MDT start failed with $rc2"
do_rpc_nodes $oss_list "lsmod | grep -q libcfs" ||
error "OST concurrent start: libcfs module not loaded"
+ local ost1dev=$(ostdevname 1)
+ local ost1mnt=$(facet_mntpt ost1)
+ local ost1fstype=$(facet_fstype ost1)
+ local ost1opts=$OST_MOUNT_OPTS
+
+ if [ $ost1fstype == ldiskfs ] &&
+ ! do_facet ost1 test -b $ost1dev; then
+ ost1opts=$(csa_add "$ost1opts" -o loop)
+ fi
+ if [[ $ost1fstype == zfs ]]; then
+ import_zpool ost1 || return ${PIPESTATUS[0]}
+ fi
+
#define OBD_FAIL_TGT_MOUNT_RACE 0x716
- do_facet ost1 "$LCTL set_param fail_loc=0x716"
- start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
+ do_facet ost1 "$LCTL set_param fail_loc=0x80000716"
+
+ do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts &
pid=$!
- start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
- do_facet ost1 "$LCTL set_param fail_loc=0x0"
- pid2=$!
- wait $pid2
+
+ do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts
rc2=$?
wait $pid
rc=$?
+ do_facet ost1 "$LCTL set_param fail_loc=0x0"
if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then
echo "1st OST start succeed"
echo "2nd OST start failed with $rc2"
df -h $MOUNT &
log "sleep 60 sec"
sleep 60
-#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
- do_facet client "$LCTL set_param fail_loc=0x50f fail_val=0"
+ #define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
+ do_facet client "$LCTL set_param fail_loc=0x8000050f"
log "sleep 10 sec"
sleep 10
manual_umount_client --force || error "manual_umount_client failed"
return
fi
- local inode_slab=$(do_facet $SINGLEMDS \
- "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo")
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ldiskfs
+ local inode_slab=$(do_facet $SINGLEMDS "cat /proc/slabinfo" |
+ awk '/ldiskfs_inode_cache/ { print $5 / $6 }')
if [ -z "$inode_slab" ]; then
skip "ldiskfs module has not been loaded"
return
fi
- echo "$inode_slab ldisk inodes per page"
- [ "$inode_slab" -ge "3" ] ||
- error "ldisk inode size is too big, $inode_slab objs per page"
- return
+ echo "$inode_slab ldiskfs inodes per page"
+ [ "${inode_slab%.*}" -ge "3" ] && return 0
+
+ # If kmalloc-128 is also 1 per page - this is a debug kernel
+ # and so this is not an error.
+ local kmalloc128=$(do_facet $SINGLEMDS "cat /proc/slabinfo" |
+ awk '/^(kmalloc|size)-128 / { print $5 / $6 }')
+ # 32 128-byte chunks in 4k
+ [ "${kmalloc128%.*}" -lt "32" ] ||
+ error "ldiskfs inode too big, only $inode_slab objs/page, " \
+ "kmalloc128 = $kmalloc128 objs/page"
}
-run_test 63 "Verify each page can at least hold 3 ldisk inodes"
+run_test 63 "Verify each page can at least hold 3 ldiskfs inodes"
test_64() {
start_mds || error "unable to start MDS"
mount_client $MOUNT || error "Unable to mount client"
stop_ost2 || error "Unable to stop second ost"
echo "$LFS df"
- $LFS df --lazy || error "lfs df failed"
+ $LFS df --lazy
umount_client $MOUNT -f || error “unmount $MOUNT failed”
cleanup_nocli || error "cleanup_nocli failed with $?"
#writeconf to remove all ost2 traces for subsequent tests
}
run_test 81 "sparse OST indexing"
-# Wait OSTs to be active on both client and MDT side.
-wait_osts_up() {
- local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd |
- awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
- wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
- error "wait_update OSTs up on client failed"
-
- cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u |
- awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
- wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
- error "wait_update OSTs up on MDT failed"
-}
-
# Here we exercise the stripe placement functionality on a file system that
# has formatted the OST with a random index. With the file system the following
# functionality is tested:
wait_update $HOSTNAME "$LCTL get_param -n lov.$FSNAME-*.pools.$TESTNAME|
sort -u | tr '\n' ' ' " "$ost_targets_uuid" ||
error "wait_update $ost_pool failed"
- pool_list $ost_pool || error "list OST pool $ost_pool failed"
+ [[ -z $(list_pool $ost_pool) ]] &&
+ error "list OST pool $ost_pool failed"
# If [--pool|-p <pool_name>] is set with [--ost-list|-o <ost_indices>],
# then the OSTs must be the members of the pool.
}
run_test 97 "ldev returns correct ouput when querying based on role"
+test_98()
+{
+ local mountopt
+ local temp=$MDS_MOUNT_OPTS
+
+ setup
+ check_mount || error "mount failed"
+ mountopt="user_xattr"
+ for ((x = 1; x <= 400; x++)); do
+ mountopt="$mountopt,user_xattr"
+ done
+ remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ error "Buffer overflow check failed"
+ cleanup || error "cleanup failed"
+}
+run_test 98 "Buffer-overflow check while parsing mount_opts"
+
test_99()
{
[[ $(facet_fstype ost1) != ldiskfs ]] &&
{ skip "Only applicable to ldiskfs-based OSTs" && return; }
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] ||
+ { skip "Need OST version at least 2.8.57" && return 0; }
local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \
--reformat $(ostdevname 1) $(ostvdevname 1)"
}
run_test 99 "Adding meta_bg option"
+test_100() {
+ reformat
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ # Desired output
+ # MGS:
+ # 0@lo
+ # lustre-MDT0000:
+ # 0@lo
+ # lustre-OST0000:
+ # 0@lo
+ do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
+ END {exit rc}' || error "lshowmount have no output MGS"
+
+ do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output MDT0"
+
+ do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output OST0"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
+
+test_101() {
+ local createmany_oid
+ local dev=$FSNAME-OST0000-osc-MDT0000
+ setup
+
+ createmany -o $DIR1/$tfile-%d 50000 &
+ createmany_oid=$!
+ # MDT->OST reconnection causes MDT<->OST last_id synchornisation
+ # via osp_precreate_cleanup_orphans.
+ for ((i = 0; i < 100; i++)); do
+ for ((k = 0; k < 10; k++)); do
+ do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \
+ "$LCTL --device $dev activate"
+ done
+
+ ls -asl $MOUNT | grep '???' &&
+ (kill -9 $createmany_oid &>/dev/null; \
+ error "File hasn't object on OST")
+
+ kill -s 0 $createmany_oid || break
+ done
+ wait $createmany_oid
+ cleanup
+}
+run_test 101 "Race MDT->OST reconnection with create"
+
+test_102() {
+ cleanup || error "cleanup failed with $?"
+
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
+ # unload all and only load libcfs to allow fail_loc setting
+ do_facet mds1 lustre_rmmod || error "unable to unload modules"
+ do_facet mds1 modprobe libcfs || error "libcfs not loaded"
+ do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded"
+
+ #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a
+ do_facet mds1 "$LCTL set_param fail_loc=0x8000060a"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts &&
+ error "mdt start must fail"
+ do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load"
+
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts ||
+ error "mdt start must not fail"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 102 "obdclass module cleanup upon error"
+
if ! combined_mgs_mds ; then
stop mgs
fi