Since commit
2a223541d299bc8 there has not been automated loading
of lustre modules in the log() command, since this command only
needs libcfs loaded to use the "lctl mark" functionality.
When testing from a local build tree, conf-sanity.sh was failing
because it unloads the modules during cleanup and is not able to
automatically load the modules on demand if they are not installed,
and does not always manually load the modules for each running test.
Add a call to load_modules() in mount_facet() where the modules are
actually needed for the filesystem to mount.
Signed-off-by: Andreas Dilger <andreas.dilger@intel.com>
Change-Id: Ie7655c04a7bf42ec9969cb5c11fab3ac92f3dc0b
Reviewed-on: http://review.whamcloud.com/17603
Reviewed-by: Jian Yu <jian.yu@intel.com>
Tested-by: Jenkins
Reviewed-by: wangdi <di.wang@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Wei Liu <wei3.liu@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
stop ost1 -f || error "unable to stop OST1"
stop_mds || error "Unable to stop MDS"
stop_mds || error "Unable to stop MDS on second try"
stop ost1 -f || error "unable to stop OST1"
stop_mds || error "Unable to stop MDS"
stop_mds || error "Unable to stop MDS on second try"
- unload_modules_conf || error "unload_modules_conf failed"
}
run_test 41a "mount mds with --nosvc and --nomgs"
}
run_test 41a "mount mds with --nosvc and --nomgs"
do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
cleanup || error "stopping $FSNAME failed with invalid sys param"
do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
cleanup || error "stopping $FSNAME failed with invalid sys param"
setup
check_mount || error "client was not mounted with invalid sys param"
cleanup || error "stopping $FSNAME failed with invalid sys param"
setup
check_mount || error "client was not mounted with invalid sys param"
cleanup || error "stopping $FSNAME failed with invalid sys param"
[ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
[ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" ||
error "tunefs MDT2 failed"
start_mds || error "Unable to start MDT"
do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" ||
error "tunefs MDT2 failed"
start_mds || error "Unable to start MDT"
local newvalue="${opts}=$(expr $basethr \* $ncpts)"
setmodopts -a $modname "$newvalue" oldvalue
local newvalue="${opts}=$(expr $basethr \* $ncpts)"
setmodopts -a $modname "$newvalue" oldvalue
setup
check_mount || return 41
setup
check_mount || return 41
run_test 72 "test fast symlink with extents flag enabled"
test_73() { #LU-3006
run_test 72 "test fast symlink with extents flag enabled"
test_73() { #LU-3006
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" ||
error "1st tunefs failed"
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" ||
error "1st tunefs failed"
local correct_clients
local wrap_up=5
local correct_clients
local wrap_up=5
echo "start mds service on $(facet_active_host $facet)"
start_mds \
"-o recovery_time_hard=$time_min,recovery_time_soft=$time_min" $@ ||
echo "start mds service on $(facet_active_host $facet)"
start_mds \
"-o recovery_time_hard=$time_min,recovery_time_soft=$time_min" $@ ||
[[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
{ skip "Need OST version at least 2.7.63" && return 0; }
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
[[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
{ skip "Need OST version at least 2.7.63" && return 0; }
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
local mntpt=$(facet_mntpt $facet)
local opts="${!opt} $@"
local mntpt=$(facet_mntpt $facet)
local opts="${!opt} $@"
+ module_loaded lustre || load_modules
+
if [ $(facet_fstype $facet) == ldiskfs ] &&
! do_facet $facet test -b ${!dev}; then
opts=$(csa_add "$opts" -o loop)
if [ $(facet_fstype $facet) == ldiskfs ] &&
! do_facet $facet test -b ${!dev}; then
opts=$(csa_add "$opts" -o loop)
# start facet device options
start() {
# start facet device options
start() {
- local facet=$1
- shift
- local device=$1
- shift
- eval export ${facet}_dev=${device}
- eval export ${facet}_opt=\"$@\"
+ local facet=$1
+ shift
+ local device=$1
+ shift
+ eval export ${facet}_dev=${device}
+ eval export ${facet}_opt=\"$@\"
- local varname=${facet}failover_dev
- if [ -n "${!varname}" ] ; then
- eval export ${facet}failover_dev=${!varname}
- else
- eval export ${facet}failover_dev=$device
- fi
+ local varname=${facet}failover_dev
+ if [ -n "${!varname}" ] ; then
+ eval export ${facet}failover_dev=${!varname}
+ else
+ eval export ${facet}failover_dev=$device
+ fi
local mntpt=$(facet_mntpt $facet)
do_facet ${facet} mkdir -p $mntpt
local mntpt=$(facet_mntpt $facet)
do_facet ${facet} mkdir -p $mntpt