There are several tests that have the file system name
hard coded to "lustre". These tests will fail or some
calls will fail when these tests are run on systems
where the file system name is not "lustre". These tests
should be changed to use $FSNAME.
Test-Parameters: trivial testlist=sanity,conf-sanity
Signed-off-by: James Nunez <jnunez@whamcloud.com>
Change-Id: I22263d2ae5ad29806cb709f462ef21837916c939
Reviewed-on: https://review.whamcloud.com/36694
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Emoly Liu <emoly@whamcloud.com>
Reviewed-by: Wei Liu <sarah@whamcloud.com>
# Desired output
# MGS:
# 0@lo
# Desired output
# MGS:
# 0@lo
# 0@lo
do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
END {exit rc}' || error "lshowmount have no output MGS"
# 0@lo
do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
END {exit rc}' || error "lshowmount have no output MGS"
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
done
for facet in $scrub_list; do
done
for facet in $scrub_list; do
- $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub ||
+ $rcmd $LCTL lfsck_start -M $FSNAME-$facet -t scrub ||
error "failed to start OI scrub on $facet"
done
error "failed to start OI scrub on $facet"
done
# obdfilter-survey :
# case 1 (local disk):
# $ nobjhi=2 thrhi=2 size=1024
# obdfilter-survey :
# case 1 (local disk):
# $ nobjhi=2 thrhi=2 size=1024
- # targets="lustre-OST0000 lustre-OST0001 ..."
+ # targets="$nid:$FSNAME-OST0000 $nid:$FSNAME-OST0001 ..."
# sh obdfilter-survey
local_node && [ "$1" == "disk" ] || target=$nid:$target
targets="$targets $target"
# sh obdfilter-survey
local_node && [ "$1" == "disk" ] || target=$nid:$target
targets="$targets $target"
[ $PARALLEL == "yes" ] && skip "skip parallel run"
remote_ost_nodsh && skip "remote OST with nodsh"
[ $PARALLEL == "yes" ] && skip "skip parallel run"
remote_ost_nodsh && skip "remote OST with nodsh"
- # get ost1 size - lustre-OST0000
+ # get ost1 size - $FSNAME-OST0000
ost1_size=$(do_facet ost1 $LFS df | grep ${ost1_svc} |
awk '{ print $4 }')
# write 800M at maximum
ost1_size=$(do_facet ost1 $LFS df | grep ${ost1_svc} |
awk '{ print $4 }')
# write 800M at maximum
# this test needs a huge transaction
local kb
# this test needs a huge transaction
local kb
- kb=$(do_facet $SINGLEMDS lctl get_param -n osd*.lustre-MDT0000.kbytestotal)
- [ $kb -lt $((1024*1024)) ] && skip "too small mds: $kb"
+ kb=$(do_facet $SINGLEMDS "$LCTL get_param -n \
+ osd*.$FSNAME-MDT0000.kbytestotal")
+ [ $kb -lt $((1024*1024)) ] && skip "MDT0 too small: $kb"
local stripe_count
local file
local stripe_count
local file
$LFS setstripe -c 1 -i 0 $DIR/$tfile
$LFS setstripe -c 1 -i 0 $DIR/$tfile
- # get ost1 size - lustre-OST0000
+ # get ost1 size - $FSNAME-OST0000
local ost1_avail_size=$($LFS df | awk /${ost1_svc}/'{ print $4 }')
local blocks=$((ost1_avail_size/2/1024)) # half avail space by megabytes
[ $blocks -gt 1000 ] && blocks=1000 # 1G in maximum
local ost1_avail_size=$($LFS df | awk /${ost1_svc}/'{ print $4 }')
local blocks=$((ost1_avail_size/2/1024)) # half avail space by megabytes
[ $blocks -gt 1000 ] && blocks=1000 # 1G in maximum
lmv.*.qos_maxage=$lmv_qos_maxage > /dev/null" EXIT
lod_qos_prio_free=$(do_facet mds1 $LCTL get_param -n \
lmv.*.qos_maxage=$lmv_qos_maxage > /dev/null" EXIT
lod_qos_prio_free=$(do_facet mds1 $LCTL get_param -n \
- lod.lustre-MDT0000-mdtlov.mdt_qos_prio_free | head -n1)
+ lod.$FSNAME-MDT0000-mdtlov.mdt_qos_prio_free | head -n1)
lod_qos_prio_free=${lod_qos_prio_free%%%}
lod_qos_threshold_rr=$(do_facet mds1 $LCTL get_param -n \
lod_qos_prio_free=${lod_qos_prio_free%%%}
lod_qos_threshold_rr=$(do_facet mds1 $LCTL get_param -n \
- lod.lustre-MDT0000-mdtlov.mdt_qos_threshold_rr | head -n1)
+ lod.$FSNAME-MDT0000-mdtlov.mdt_qos_threshold_rr | head -n1)
lod_qos_threshold_rr=${lod_qos_threshold_rr%%%}
lod_qos_maxage=$(do_facet mds1 $LCTL get_param -n \
lod_qos_threshold_rr=${lod_qos_threshold_rr%%%}
lod_qos_maxage=$(do_facet mds1 $LCTL get_param -n \
- lod.lustre-MDT0000-mdtlov.qos_maxage | awk '{ print $1 }')
+ lod.$FSNAME-MDT0000-mdtlov.qos_maxage | awk '{ print $1 }')
stack_trap "do_nodes $mdts $LCTL set_param \
lod.*.mdt_qos_prio_free=$lod_qos_prio_free > /dev/null" EXIT
stack_trap "do_nodes $mdts $LCTL set_param \
stack_trap "do_nodes $mdts $LCTL set_param \
lod.*.mdt_qos_prio_free=$lod_qos_prio_free > /dev/null" EXIT
stack_trap "do_nodes $mdts $LCTL set_param \
local usedkb
local old
local quota
local usedkb
local old
local quota
- local pref="osd-zfs.lustre-MDT0000."
+ local pref="osd-zfs.$FSNAME-MDT0000."
# limit available space on MDS dataset to meet nospace issue
# quickly. then ZFS 0.7.2 can use reserved space if asked
# limit available space on MDS dataset to meet nospace issue
# quickly. then ZFS 0.7.2 can use reserved space if asked