# Desired output
# MGS:
# 0@lo
- # lustre-MDT0000:
+ # $FSNAME-MDT0000:
# 0@lo
- # lustre-OST0000:
+ # $FSNAME-OST0000:
# 0@lo
do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
END {exit rc}' || error "lshowmount have no output MGS"
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
echo "changing server nid..."
$rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
- $rcmd lctl replace_nids lustre-MDT0000 $nid
- $rcmd lctl replace_nids lustre-MDT0001 $nid
- $rcmd lctl replace_nids lustre-OST0000 $nid
- $rcmd lctl replace_nids lustre-OST0001 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
$rcmd umount $tmp/mnt/mdt1
for facet in $facets; do
done
for facet in $scrub_list; do
- $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub ||
+ $rcmd $LCTL lfsck_start -M $FSNAME-$facet -t scrub ||
error "failed to start OI scrub on $facet"
done
[ $PARALLEL == "yes" ] && skip "skip parallel run"
remote_ost_nodsh && skip "remote OST with nodsh"
- # get ost1 size - lustre-OST0000
+ # get ost1 size - $FSNAME-OST0000
ost1_size=$(do_facet ost1 $LFS df | grep ${ost1_svc} |
awk '{ print $4 }')
# write 800M at maximum
# this test needs a huge transaction
local kb
- kb=$(do_facet $SINGLEMDS lctl get_param -n osd*.lustre-MDT0000.kbytestotal)
- [ $kb -lt $((1024*1024)) ] && skip "too small mds: $kb"
+ kb=$(do_facet $SINGLEMDS "$LCTL get_param -n \
+ osd*.$FSNAME-MDT0000.kbytestotal")
+ [ $kb -lt $((1024*1024)) ] && skip "MDT0 too small: $kb"
local stripe_count
local file
$LFS setstripe -c 1 -i 0 $DIR/$tfile
- # get ost1 size - lustre-OST0000
+ # get ost1 size - $FSNAME-OST0000
local ost1_avail_size=$($LFS df | awk /${ost1_svc}/'{ print $4 }')
local blocks=$((ost1_avail_size/2/1024)) # half avail space by megabytes
[ $blocks -gt 1000 ] && blocks=1000 # 1G in maximum
lmv.*.qos_maxage=$lmv_qos_maxage > /dev/null" EXIT
lod_qos_prio_free=$(do_facet mds1 $LCTL get_param -n \
- lod.lustre-MDT0000-mdtlov.mdt_qos_prio_free | head -n1)
+ lod.$FSNAME-MDT0000-mdtlov.mdt_qos_prio_free | head -n1)
lod_qos_prio_free=${lod_qos_prio_free%%%}
lod_qos_threshold_rr=$(do_facet mds1 $LCTL get_param -n \
- lod.lustre-MDT0000-mdtlov.mdt_qos_threshold_rr | head -n1)
+ lod.$FSNAME-MDT0000-mdtlov.mdt_qos_threshold_rr | head -n1)
lod_qos_threshold_rr=${lod_qos_threshold_rr%%%}
lod_qos_maxage=$(do_facet mds1 $LCTL get_param -n \
- lod.lustre-MDT0000-mdtlov.qos_maxage | awk '{ print $1 }')
+ lod.$FSNAME-MDT0000-mdtlov.qos_maxage | awk '{ print $1 }')
stack_trap "do_nodes $mdts $LCTL set_param \
lod.*.mdt_qos_prio_free=$lod_qos_prio_free > /dev/null" EXIT
stack_trap "do_nodes $mdts $LCTL set_param \
local usedkb
local old
local quota
- local pref="osd-zfs.lustre-MDT0000."
+ local pref="osd-zfs.$FSNAME-MDT0000."
# limit available space on MDS dataset to meet nospace issue
# quickly. then ZFS 0.7.2 can use reserved space if asked