+t_108_prep() {
+ local facet
+
+ $rcmd rm -rf $tmp > /dev/null 2>&1
+ $rcmd mkdir -p $tmp/{mnt,images} || error "failed to mkdir remotely"
+
+ for facet in $facets; do
+ [ "$mds1_FSTYPE" = zfs ] &&
+ $rcmd $ZPOOL -f export lustre-$facet > /dev/null 2>&1
+ $rcmd mkdir $tmp/mnt/$facet ||
+ error "failed to mkdir $tmp/mnt/$facet"
+ $rcmd dd if=/dev/zero of=$tmp/images/$facet \
+ seek=199 bs=1M count=1 ||
+ error "failed to create $tmp/images/$facet"
+ done
+}
+
+t_108_mkfs() {
+ local role=$1
+ local idx=$2
+ local bkfs=$3
+ local mgs=$4
+ local facet=${role}$((idx + 1))
+ local pool=""
+ [ $# -eq 5 ] && pool=$5
+
+ do_facet $SINGLEMDS $MKFS --fsname=lustre --$mgs \
+ --$role --index=$idx --replace --backfstype=$bkfs \
+ --device-size=200000 --reformat $pool $tmp/images/$facet ||
+ error "failed to mkfs for $facet"
+}
+
+t_108_check() {
+ echo "mounting client..."
+ mount -t lustre ${nid}:/lustre $MOUNT ||
+ error "failed to mount lustre"
+
+ echo "check list"
+ ls -l $MOUNT/local_dir || error "failed to list"
+
+ echo "check truncate && write"
+ echo "dummmmmmmmmmmmm" > $MOUNT/remote_dir/fsx.c ||
+ error "failed to tuncate & write"
+
+ echo "check create"
+ touch $MOUNT/foooo ||
+ error "failed to create"
+
+ echo "check read && write && append"
+ sha1sum $MOUNT/conf-sanity.sh |
+ awk '{ print $1 }' > $MOUNT/checksum.new ||
+ error "failed to read(1)"
+ sha1sum $MOUNT/remote_dir/unlinkmany.c |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(2)"
+ sha1sum $MOUNT/striped_dir/lockahead_test.o |
+ awk '{ print $1 }' >> $MOUNT/checksum.new ||
+ error "failed to read(3)"
+
+ echo "verify data"
+ diff $MOUNT/checksum.new $MOUNT/checksum.src ||
+ error "failed to verify data"
+
+ echo "done."
+}
+
+t_108_cleanup() {
+ trap 0
+ local facet
+
+ echo "cleanup..."
+ umount -f $MOUNT || error "failed to umount client"
+ for facet in $facets; do
+ $rcmd umount -f $tmp/mnt/$facet ||
+ error "failed to umount $facet"
+ if [ "$mds1_FSTYPE" = zfs ]; then
+ $rcmd $ZPOOL export -f lustre-$facet ||
+ error "failed to export lustre-$facet"
+ fi
+ done
+
+ $rcmd rm -rf $tmp || error "failed to rm the dir $tmp"
+}
+
+test_108a() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing"
+ [ "$mds1_FSTYPE" != zfs ] && skip "zfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58"
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 zfs mgs lustre-mdt1/mdt1
+ t_108_mkfs mdt 1 zfs mgsnode=$nid lustre-mdt2/mdt2
+ t_108_mkfs ost 0 zfs mgsnode=$nid lustre-ost1/ost1
+ t_108_mkfs ost 1 zfs mgsnode=$nid lustre-ost2/ost2
+
+ for facet in $facets; do
+ $rcmd zfs set mountpoint=$tmp/mnt/$facet canmount=on \
+ lustre-$facet/$facet ||
+ error "failed to zfs set for $facet (1)"
+ $rcmd zfs mount lustre-$facet/$facet ||
+ error "failed to local mount $facet"
+ $rcmd tar jxf $LUSTRE/tests/ldiskfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="trusted.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd zfs umount lustre-$facet/$facet ||
+ error "failed to local umount $facet"
+ $rcmd zfs set canmount=off lustre-$facet/$facet ||
+ error "failed to zfs set $facet (2)"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o abort_recov lustre-$facet/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ # ZFS backend can detect migration and trigger OI scrub automatically
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108a "migrate from ldiskfs to ZFS"
+
+test_108b() {
+ [ "$CLIENTONLY" ] && skip "Client-only testing"
+ [ "$mds1_FSTYPE" != ldiskfs ] && skip "ldiskfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.10.58) ] &&
+ skip "Need server version at least 2.10.58"
+
+ stopall
+ load_modules
+
+ local tmp=$TMP/$tdir
+ local rcmd="do_facet $SINGLEMDS"
+ local facets="mdt1 mdt2 ost1 ost2"
+ local scrub_list="MDT0000 MDT0001 OST0000 OST0001"
+ local nid=$($rcmd $LCTL list_nids | head -1)
+ local facet
+
+ trap t_108_cleanup EXIT ERR
+ t_108_prep
+
+ t_108_mkfs mdt 0 ldiskfs mgs
+ t_108_mkfs mdt 1 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 0 ldiskfs mgsnode=$nid
+ t_108_mkfs ost 1 ldiskfs mgsnode=$nid
+
+ for facet in $facets; do
+ $rcmd mount -t ldiskfs -o loop $tmp/images/$facet \
+ $tmp/mnt/$facet ||
+ error "failed to local mount $facet"
+
+ $rcmd tar jxf $LUSTRE/tests/zfs_${facet}_2_11.tar.bz2 \
+ --xattrs --xattrs-include="*.*" \
+ -C $tmp/mnt/$facet/ > /dev/null 2>&1 ||
+ error "failed to untar image for $facet"
+ $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" ||
+ error "failed to cleanup for $facet"
+ $rcmd umount $tmp/mnt/$facet ||
+ error "failed to local umount $facet"
+ done
+
+ echo "changing server nid..."
+ $rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1
+ $rcmd lctl replace_nids $FSNAME-MDT0000 $nid
+ $rcmd lctl replace_nids $FSNAME-MDT0001 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0000 $nid
+ $rcmd lctl replace_nids $FSNAME-OST0001 $nid
+ $rcmd umount $tmp/mnt/mdt1
+
+ for facet in $facets; do
+ echo "mounting $facet from backup..."
+ $rcmd mount -t lustre -o loop,abort_recov $tmp/images/$facet \
+ $tmp/mnt/$facet || error "failed to mount $facet"
+ done
+
+ for facet in $scrub_list; do
+ $rcmd $LCTL lfsck_start -M $FSNAME-$facet -t scrub ||
+ error "failed to start OI scrub on $facet"
+ done
+
+ # sleep 3 seconds for scrub done
+ sleep 3
+
+ t_108_check
+ t_108_cleanup
+}
+run_test 108b "migrate from ZFS to ldiskfs"
+
+
+#
+# set number of permanent parameters
+#
+test_109_set_params() {
+ local fsname=$1
+
+ set_persistent_param_and_check mds \
+ "mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "62"
+ set_persistent_param_and_check mds \
+ "mdd.$fsname-MDT0000.atime_diff" \
+ "$fsname-MDT0000.mdd.atime_diff" \
+ "63"
+ set_persistent_param_and_check client \
+ "llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "32"
+ set_persistent_param_and_check client \
+ "llite.$fsname*.max_read_ahead_mb" \
+ "$fsname.llite.max_read_ahead_mb" \
+ "64"
+ create_pool $fsname.pool1 || error "create pool failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+ do_facet mgs $LCTL pool_remove $fsname.pool1 OST0000 ||
+ error "pool_remove failed"
+ do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 ||
+ error "pool_add failed"
+}
+
+#
+# check permanent parameters
+#
+test_109_test_params() {
+ local fsname=$1
+
+ local atime_diff=$(do_facet mds $LCTL \
+ get_param -n mdd.$fsname-MDT0000.atime_diff)
+ [ $atime_diff == 63 ] || error "wrong mdd parameter after clear_conf"
+ local max_read_ahead_mb=$(do_facet client $LCTL \
+ get_param -n llite.$fsname*.max_read_ahead_mb)
+ [ $max_read_ahead_mb == 64 ] ||
+ error "wrong llite parameter after clear_conf"
+ local ost_in_pool=$(do_facet mds $LCTL pool_list $fsname.pool1 |
+ grep -v "^Pool:" | sed 's/_UUID//')
+ [ $ost_in_pool = "$fsname-OST0000" ] ||
+ error "wrong pool after clear_conf"
+}
+
+#
+# run lctl clear_conf, store CONFIGS before and after that
+#
+test_109_clear_conf()
+{
+ local clear_conf_arg=$1
+
+ local mgsdev
+ if ! combined_mgs_mds ; then
+ mgsdev=$(mgsdevname)
+ stop_mgs || error "stop_mgs failed"
+ start_mgs "-o nosvc" || error "start_mgs nosvc failed"
+ else
+ mgsdev=$(mdsdevname 1)
+ start_mdt 1 "-o nosvc" || error "start_mdt 1 nosvc failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf1; mkdir -p $TMP/${tdir}/conf1;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf1\\\" \
+ $mgsdev"
+
+ #
+ # the command being tested
+ #
+ do_facet mgs $LCTL clear_conf $clear_conf_arg ||
+ error "clear_conf failed"
+ if ! combined_mgs_mds ; then
+ stop_mgs || error "stop_mgs failed"
+ else
+ stop_mdt 1 || error "stop_mdt 1 failed"
+ fi
+
+ do_facet mgs "rm -rf $TMP/${tdir}/conf2; mkdir -p $TMP/${tdir}/conf2;" \
+ "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf2\\\" \
+ $mgsdev"
+}
+
+test_109_file_shortened() {
+ local file=$1
+ local sizes=($(do_facet mgs "stat -c %s " \
+ "$TMP/${tdir}/conf1/CONFIGS/$file" \
+ "$TMP/${tdir}/conf2/CONFIGS/$file"))
+ [ ${sizes[1]} -lt ${sizes[0]} ] && return 0
+ return 1
+}
+
+test_109a()
+{
+ [ $MDS1_VERSION -lt $(version_code 2.10.59) ] &&
+ skip "Needs MDS version 2.10.59 or later."
+ [ "$(facet_fstype mgs)" == zfs ] &&
+ skip "LU-8727: no implementation for ZFS"
+
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME
+ #
+ # make sure that all configs are cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client ||
+ error "failed to clear client config"
+
+ setup_noconfig
+
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109a "test lctl clear_conf fsname"
+
+test_109b()
+{
+ [ $MDS1_VERSION -lt $(version_code 2.10.59) ] &&
+ skip "Needs MDS version 2.10.59 or later."
+ [ "$(facet_fstype mgs)" == zfs ] &&
+ skip "LU-8727: no implementation for ZFS"
+
+ stopall
+ reformat
+ setup_noconfig
+ client_up || error "client_up failed"
+
+ #
+ # set number of permanent parameters
+ #
+ test_109_set_params $FSNAME
+
+ umount_client $MOUNT || error "umount_client failed"
+ stop_ost || error "stop_ost failed"
+ stop_mds || error "stop_mds failed"
+
+ test_109_clear_conf $FSNAME-MDT0000
+ #
+ # make sure that only one config is cleared
+ #
+ test_109_file_shortened $FSNAME-MDT0000 ||
+ error "failed to clear MDT0000 config"
+ test_109_file_shortened $FSNAME-client &&
+ error "failed to clear client config"
+
+ setup_noconfig
+ #
+ # check that configurations are intact
+ #
+ test_109_test_params $FSNAME
+
+ #
+ # Destroy pool.
+ #
+ destroy_test_pools || error "destroy test pools failed"
+
+ cleanup
+}
+run_test 109b "test lctl clear_conf one config"
+
+test_110()
+{
+ [[ "$mds1_FSTYPE" != ldiskfs ]] &&
+ skip "Only applicable to ldiskfs-based MDTs"
+
+ do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir ||
+ skip "large_dir option is not supported on MDS"
+ do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir ||
+ skip "large_dir option is not supported on OSS"
+
+ stopall # stop all targets before modifying the target counts
+ local old_mdscount=$MDSCOUNT
+ local old_ostcount=$OSTCOUNT
+ local replace=""
+ stack_trap "MDSCOUNT=$old_mdscount OSTCOUNT=$old_ostcount" EXIT
+ MDSCOUNT=1
+ OSTCOUNT=1
+
+ # ext4_dir_entry_2 struct size:264
+ # dx_root struct size:8
+ # dx_node struct size:8
+ # dx_entry struct size:8
+ # For 1024 bytes block size.
+ # First level directory entries: 126
+ # Second level directory entries: 127
+ # Entries in leaf: 3
+ # For 2 levels limit: 48006
+ # For 3 levels limit : 6096762
+ # Create 80000 files to safely exceed 2-level htree limit.
+ CONF_SANITY_110_LINKS=${CONF_SANITY_110_LINKS:-80000}
+
+ # can fit at most 3 filenames per 1KB leaf block, but each
+ # leaf/index block will only be 3/4 full before split at each level
+ (( MDSSIZE < CONF_SANITY_110_LINKS / 3 * 4/3 * 4/3 )) &&
+ CONF_SANITY_110_LINKS=$((MDSSIZE * 3 * 3/4 * 3/4))
+
+ combined_mgs_mds || replace=" --replace "
+ local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
+ $replace --reformat $(mdsdevname 1) $(mdsvdevname 1)"
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536\\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536 }"
+ fi
+ fi
+ echo "MDT params: $opts"
+ load_modules
+ combined_mgs_mds || start_mgs
+ add mds1 $opts || error "add mds1 failed with new params"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ $replace --reformat $(ostdevname 1) $(ostvdevname 1)"
+
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir\\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir }"
+ fi
+ fi
+ echo "OST params: $opts"
+ add ost1 $opts || error "add ost1 failed with new params"
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS
+
+ MOUNT_2=yes mountcli || error "mount clients failed"
+
+ mkdir -v $DIR/$tdir || error "cannot create $DIR/$tdir"
+ local pids count=0 group=0
+
+ echo "creating $CONF_SANITY_110_LINKS in total"
+ while (( count < CONF_SANITY_110_LINKS )); do
+ local len=$((253 - $(wc -c <<<"$tfile-$group-40000-")))
+ local dir=DIR$((group % 2 + 1))
+ local target=${!dir}/$tdir/$tfile-$group
+ local long=$target-$(generate_name $len)-
+ local create=$((CONF_SANITY_110_LINKS - count))
+
+ (( create > 40000 )) && create=40000
+ touch $target || error "creating $target failed"
+ echo "creating $create hard links to $target"
+ createmany -l $target $long $create &
+ pids+=" $!"
+
+ count=$((count + create))
+ group=$((group + 1))
+ done
+ echo "waiting for PIDs$pids to complete"
+ wait $pids || error "createmany failed after $group groups"
+
+ umount_client $MOUNT2 -f
+ cleanup
+
+ run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n
+ MDSCOUNT=$old_mdscount
+ OSTCOUNT=$old_ostcount
+}
+run_test 110 "Adding large_dir with 3-level htree"
+
+test_111() {
+ [[ "$mds1_FSTYPE" != ldiskfs ]] &&
+ skip "Only applicable to ldiskfs-based MDTs"
+
+ is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) &&
+ skip "This test can not be executed on flakey dev"
+
+ do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir ||
+ skip "large_dir option is not supported on MDS"
+
+ do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir ||
+ skip "large_dir option is not supported on OSS"
+
+ # cleanup before changing target counts
+ cleanup
+ local old_mdscount=$MDSCOUNT
+ local old_ostcount=$OSTCOUNT
+ local old_mdssize=$MDSSIZE
+ local replace=""
+ stack_trap "MDSSIZE=$MDSSIZE MDSCOUNT=$MDSCOUNT OSTCOUNT=$OSTCOUNT" EXIT
+ MDSCOUNT=1
+ OSTCOUNT=1
+ (( MDSSIZE < 2400000 )) && MDSSIZE=2400000 # need at least 2.4GB
+
+ local mdsdev=$(mdsdevname 1)
+ combined_mgs_mds || replace=" --replace "
+ local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
+ $replace --reformat $(mdsdevname 1) $(mdsvdevname 1)"
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -i 1048576 \\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -i 1048576 }"
+ fi
+ fi
+ echo "MDT params: $opts"
+ load_modules
+ combined_mgs_mds || start_mgs
+ __touch_device mds 1
+ add mds1 $opts || error "add mds1 failed with new params"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ $replace --reformat $(ostdevname 1) $(ostvdevname 1)"
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir \\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ --mkfsoptions=\\\"-O large_dir }"
+ fi
+ fi
+ echo "OST params: $opts"
+ __touch_device ost 1
+ add ost1 $opts || error "add ost1 failed with new params"
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS
+
+ MOUNT_2=yes mountcli
+ mkdir $DIR/$tdir || error "cannot create $DIR/$tdir"
+ lfs df $DIR/$tdir
+ lfs df -i $DIR/$tdir
+
+ local group=0
+
+ local start=$SECONDS
+ local dirsize=0
+ local dirmax=$((2 << 30))
+ local needskip=0
+ local taken=0
+ local rate=0
+ local left=0
+ local num=0
+ while (( !needskip & dirsize < dirmax )); do
+ local pids=""
+
+ for cli in ${CLIENTS//,/ }; do
+ local len=$((253 - $(wc -c <<<"$cli-$group-60000-")))
+ local target=$cli-$group
+ local long=$DIR/$tdir/$target-$(generate_name $len)-
+
+ RPWD=$DIR/$tdir do_node $cli touch $target ||
+ error "creating $target failed"
+ echo "creating 60000 hardlinks to $target"
+ RPWD=$DIR/$tdir do_node $cli createmany -l $target $long 60000 &
+ pids+=" $!"
+
+ group=$((group + 1))
+ target=$cli-$group
+ long=$DIR2/$tdir/$target-$(generate_name $len)-
+
+ RPWD=$DIR2/$tdir do_node $cli touch $target ||
+ error "creating $target failed"
+ echo "creating 60000 hardlinks to $target"
+ RPWD=$DIR2/$tdir do_node $cli createmany -l $target $long 60000 &
+ pids+=" $!"
+
+ group=$((group + 1))
+ done
+ echo "waiting for PIDs$pids to complete"
+ wait $pids || error "createmany failed after $group groups"
+ dirsize=$(stat -c %s $DIR/$tdir)
+ taken=$((SECONDS - start))
+ rate=$((dirsize / taken))
+ left=$(((dirmax - dirsize) / rate))
+ num=$((group * 60000))
+ echo "estimate ${left}s left after $num files / ${taken}s"
+ # if the estimated time remaining is too large (it may change
+ # over time as the create rate is not constant) then exit
+ # without declaring a failure.
+ (( left > 1200 )) && needskip=1
+ done
+
+ umount_client $MOUNT2 -f
+ cleanup
+
+ (( $needskip )) && skip "ETA ${left}s after $num files / ${taken}s is too long"
+
+ run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n
+ MDSCOUNT=$old_mdscount
+ OSTCOUNT=$old_ostcount
+ MDSSIZE=$old_mdssize
+}
+run_test 111 "Adding large_dir with over 2GB directory"
+
+test_112() {
+ start_mds || error "MDS start failed"
+ start_ost || error "OSS start failed"
+ echo "start ost2 service on $(facet_active_host ost2)"
+ start ost2 $(ostdevname 2) $(csa_add "$OST_MOUNT_OPTS" -o no_precreate) ||
+ error "start ost2 facet failed"
+ local val=$(do_facet ost2 \
+ "$LCTL get_param -n obdfilter.$FSNAME-OST0001*.no_precreate")
+ (( $val == 1 )) || error "obdfilter.$FSNAME-OST0001*.no_precreate=$val"
+
+ mount_client $MOUNT || error "mount client failed"
+ wait_osc_import_state client ost2 FULL
+
+ $LFS setstripe -i 0 $DIR/$tfile.0 ||
+ error "problem creating $tfile.0 on OST0000"
+ $LFS setstripe -i 1 $DIR/$tfile.1 && $LFS getstripe $DIR/$tfile.1 &&
+ (( $($LFS getstripe -i $DIR/$tfile.1) == 1 )) &&
+ error "allowed to create $tfile.1 on OST0001"
+ do_facet ost2 $LCTL set_param obdfilter.*.no_precreate=0
+ sleep_maxage
+ $LFS setstripe -i 1 $DIR/$tfile.2 ||
+ error "failed to create $tfile.2 on ost1 facet"
+ stop_ost2 || error "stop ost2 facet failed"
+ cleanup
+}
+run_test 112 "mount OST with nocreate option"
+
+cleanup_115()
+{
+ trap 0
+ stopall
+ rm -f $TMP/$tdir/lustre-mdt
+}
+
+test_115() {
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ fi
+
+ local dbfs_ver=$(do_facet $SINGLEMDS $DEBUGFS -V 2>&1)
+
+ echo "debugfs version: $dbfs_ver"
+ echo "$dbfs_ver" | egrep -w "1.44.3.wc1|1.44.5.wc1|1.45.2.wc1" &&
+ skip_env "This version of debugfs doesn't show inode number"
+
+ is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) &&
+ skip "This test can not be executed on flakey dev"
+
+ IMAGESIZE=$((3072 << 30)) # 3072 GiB
+
+ stopall
+ # We need MDT size 3072GB, because it is smallest
+ # partition that can store 2B inodes
+ do_facet $SINGLEMDS "mkdir -p $TMP/$tdir"
+ local mdsimgname=$TMP/$tdir/lustre-mdt
+ do_facet $SINGLEMDS "rm -f $mdsimgname"
+ do_facet $SINGLEMDS "touch $mdsimgname"
+ trap cleanup_115 RETURN EXIT
+ do_facet $SINGLEMDS "$TRUNCATE $mdsimgname $IMAGESIZE" ||
+ skip "Backend FS doesn't support sparse files"
+ local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
+ do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
+
+ local mds_opts="$(mkfs_opts mds1 $(mdsdevname 1)) --device-size=$IMAGESIZE \
+ --mkfsoptions='-O ea_inode,^resize_inode,meta_bg \
+ -N 2247484000 -E lazy_itable_init'"
+ add mds1 $mds_opts --mgs --reformat $mdsdev ||
+ skip_env "format large MDT failed"
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ $replace --reformat $(ostdevname 1) $(ostvdevname 1)"
+ add ost1 $opts || error "add ost1 failed with new params"
+ start $SINGLEMDS $mdsdev $MDS_MOUNT_OPTS || error "start MDS failed"
+ start_ost || error "start OSS failed"
+ mount_client $MOUNT || error "mount client failed"
+
+ mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir fail"
+ goal="/sys/fs/ldiskfs/$(basename $mdsdev)/inode_goal"
+echo goal: $goal
+ # 2147483648 is 0x80000000
+ do_facet $SINGLEMDS "echo 2147483648 >> $goal; grep . $goal"
+ touch $DIR/$tdir/$tfile
+
+ # attrs from 1 to 15 go to block, 16th - to inode
+ for i in {1..16}; do
+ local nm="trusted.ea$i"
+ setfattr -n $nm -v $(printf "xattr%0250d" $i) $DIR/$tdir/$tfile
+ done
+
+ # inode <2147483649> trusted.ea16 (255)
+ local inode_num=$(do_facet $SINGLEMDS \
+ "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsdev" |
+ awk '/ea16/ { print $2 }' |
+ sed -e 's/>//' -e 's/<//' -e 's/\"//')
+ echo "inode num: $inode_num"
+ [ $inode_num -ge 2147483648 ] || error "inode $inode_num too small"
+ do_facet $SINGLEMDS "losetup -d $mdsdev"
+ cleanup_115
+}
+run_test 115 "Access large xattr with inodes number over 2TB"
+
+test_116() {
+ [ "$mds1_FSTYPE" != ldiskfs ] && skip "ldiskfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.10.59) ] &&
+ skip "Need server version at least 2.10.59"
+ do_facet $SINGLEMDS which mkfs.xfs ||
+ skip_env "No mkfs.xfs installed"
+
+ stopall
+ load_modules
+
+ local tmpmnt=$TMP/$tdir
+ local mdtimg=$tfile-mdt0
+
+ do_facet $SINGLEMDS mkdir -p $tmpmnt
+ stack_trap "do_facet $SINGLEMDS rmdir $tmpmnt" EXIT
+
+ do_facet $SINGLEMDS touch $TMP/$mdtimg
+ stack_trap "do_facet $SINGLEMDS rm -f $TMP/$mdtimg" EXIT
+ do_facet $SINGLEMDS mkfs -t xfs -d file,size=1t,name=$TMP/$mdtimg ||
+ error "mkfs temporary xfs image"
+
+ do_facet $SINGLEMDS mount $TMP/$mdtimg $tmpmnt ||
+ error "mount temporary xfs image"
+ stack_trap "do_facet $SINGLEMDS umount $tmpmnt" EXIT
+ local old_mdssize=$MDSSIZE
+ local old_mdsisize=$MDSISIZE
+
+ MDSSIZE=$((17 * 1024 * 1024 * 1024)) # 17T MDT
+ MDSISIZE=$((16 << 20))
+ local opts17t="$(mkfs_opts $SINGLEMDS)"
+
+ MDSSIZE=$old_mdssize
+ MDSISIZE=$old_mdsisize
+ do_facet $SINGLEMDS $MKFS $opts17t $tmpmnt/$mdtimg ||
+ error "failed to mkfs for $tmpmnt/$mdtimg"
+
+ do_facet $SINGLEMDS $TUNE2FS -l $tmpmnt/$mdtimg |
+ grep -qw 'features.*extent' || error "extent should be enabled"
+ reformat_and_config
+}
+run_test 116 "big size MDT support"
+
+test_117() {
+ setup
+ do_facet ost1 "$LCTL set_param ost.OSS.ost_io.nrs_policies=fifo"
+ do_facet ost1 "$LCTL get_param -n ost.OSS.ost_io.nrs_tbf_rule" &&
+ error "get_param should fail"
+ cleanup || error "cleanup failed with rc $?"
+}
+run_test 117 "lctl get_param return errors properly"
+
+test_120() { # LU-11130
+ [ "$MDSCOUNT" -lt 2 ] && skip "mdt count < 2"
+ [ "$mds1_FSTYPE" != ldiskfs ] &&
+ skip "ldiskfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.11.56) ] &&
+ skip "Need DNE2 capable MD target with LU-11130 fix"
+
+ setup
+
+ local mds1host=$(facet_active_host mds1)
+ local mds1dev=$(mdsdevname 1)
+
+ $LFS mkdir -i 1 $DIR/$tdir
+ $LFS mkdir -i 0 $DIR/$tdir/mds1dir
+
+ ln -s foo $DIR/$tdir/bar
+ mv $DIR/$tdir/bar $DIR/$tdir/mds1dir/bar2 ||
+ error "cross-target rename failed"
+
+ stopall
+
+ run_e2fsck $mds1host $mds1dev "-n"
+}
+run_test 120 "cross-target rename should not create bad symlinks"
+
+test_122() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [[ "$OST1_VERSION" -ge $(version_code 2.11.53) ]] ||
+ skip "Need OST version at least 2.11.53"
+
+ reformat
+ LOAD_MODULES_REMOTE=true load_modules
+#define OBD_FAIL_OFD_SET_OID 0x1e0
+ do_facet ost1 $LCTL set_param fail_loc=0x00001e0
+
+ setup_noconfig
+ $LFS mkdir -i1 -c1 $DIR/$tdir
+ $LFS setstripe -i0 -c1 $DIR/$tdir
+ do_facet ost1 $LCTL set_param fail_loc=0
+ createmany -o $DIR/$tdir/file_ 1000 ||
+ error "Fail to create a new sequence"
+
+ cleanup
+}
+run_test 122 "Check OST sequence update"
+
+test_123aa() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setup
+
+ # test old logid format until removal from llog_ioctl.c::str2logid()
+ if [ $MGS_VERSION -lt $(version_code 3.1.53) ]; then
+ do_facet mgs $LCTL dl | grep MGS
+ do_facet mgs "$LCTL --device %MGS llog_print \
+ \\\\\\\$$FSNAME-client 1 10" ||
+ error "old llog_print failed"
+ fi
+
+ # test new logid format
+ if [ $MGS_VERSION -ge $(version_code 2.9.53) ]; then
+ do_facet mgs "$LCTL --device MGS llog_print $FSNAME-client" ||
+ error "new llog_print failed"
+ fi
+}
+run_test 123aa "llog_print works with FIDs and simple names"
+
+test_123ab() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [[ $MGS_VERSION -gt $(version_code 2.11.51) ]] ||
+ skip "Need server with working llog_print support"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ local yaml
+ local orig_val
+
+ orig_val=$(do_facet mgs $LCTL get_param jobid_name)
+ do_facet mgs $LCTL set_param -P jobid_name="TESTNAME"
+
+ yaml=$(do_facet mgs $LCTL --device MGS llog_print params |
+ grep jobid_name | tail -n 1)
+
+ local param=$(awk '{ print $10 }' <<< "$yaml")
+ local val=$(awk '{ print $12 }' <<< "$yaml")
+ #return to the default
+ do_facet mgs $LCTL set_param -P jobid_name=$orig_val
+ [ $val = "TESTNAME" ] || error "bad value: $val"
+ [ $param = "jobid_name," ] || error "Bad param: $param"
+}
+run_test 123ab "llog_print params output values from set_param -P"
+
+test_123ac() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ local start=10
+ local end=50
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ # - { index: 10, event: add_uuid, nid: 192.168.20.1@tcp(0x20000c0a81401,
+ # node: 192.168.20.1@tcp }
+ do_facet mgs $LCTL --device MGS \
+ llog_print --start $start --end $end $FSNAME-client | tr -d , |
+ while read DASH BRACE INDEX idx EVENT BLAH BLAH BLAH; do
+ (( idx >= start )) || error "llog_print index $idx < $start"
+ (( idx <= end )) || error "llog_print index $idx > $end"
+ done
+}
+run_test 123ac "llog_print with --start and --end"
+
+test_123ad() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ # older versions of lctl may not print all records properly
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ # append a new record, to avoid issues if last record was cancelled
+ local old=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$old
+
+ # logid: [0x3:0xa:0x0]:0
+ # flags: 4 (plain)
+ # records_count: 72
+ # last_index: 72
+ local num=$(do_facet mgs $LCTL --device MGS llog_info $FSNAME-client |
+ awk '/last_index:/ { print $2 - 1 }')
+
+ # - { index: 71, event: set_timeout, num: 0x14, param: sys.timeout=20 }
+ local last=$(do_facet mgs $LCTL --device MGS llog_print $FSNAME-client |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( last == num )) || error "llog_print only showed $last/$num records"
+}
+run_test 123ad "llog_print shows all records"
+
+test_123ae() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setupall
+
+ local max=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+
+ if do_facet mgs "$LCTL help llog_cancel" 2>&1| grep -q -- --log_id; then
+ # save one set_param -P record in case none exist
+ do_facet mgs $LCTL set_param -P osc.*.max_dirty_mb=$max
+
+ local log=params
+ local orig=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL set_param -P osc.*.max_dirty_mb=$max
+ do_facet mgs $LCTL --device MGS llog_print $log | tail -1 |
+ grep "parameter: osc.*.max_dirty_mb" ||
+ error "new set_param -P wasn't stored in params log"
+
+ # - { index: 71, event: set_param, device: general,
+ # param: osc.*.max_dirty_mb, value: 256 }
+ local id=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+
+ do_facet mgs $LCTL --device MGS llog_cancel $log --log_idx=$id
+ local new=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( new == orig )) ||
+ error "new llog_cancel now $new, not at $orig records"
+ fi
+
+ # test old positional parameters for a while still
+ if [ "$MGS_VERSION" -le $(version_code 3.1.53) ]; then
+ log=$FSNAME-client
+ orig=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$max
+ do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | grep "parameter: osc.max_dirty_mb" ||
+ error "old conf_param wasn't stored in params log"
+
+ # - { index: 71, event: conf_param, device: testfs-OST0000-osc,
+ # param: osc.max_dirty_mb=256 }
+ id=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL --device MGS llog_cancel $log $id
+ new=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( new == orig )) ||
+ error "old llog_cancel now $new, not at $orig records"
+ fi
+}
+run_test 123ae "llog_cancel can cancel requested record"
+
+test_123F() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+
+ [ -d $MOUNT/.lustre ] || setup
+ local yaml_file="$TMP/$tfile.yaml"
+ do_facet mgs rm "$yaml_file"
+ local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist" |
+ sed 's/config_log://')
+
+ # set jobid_var to a different value for test
+ local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P jobid_var="TESTNAME"
+
+ for i in $cfgfiles params; do
+ do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
+ done
+
+ echo "Unmounting FS"
+ stopall
+ echo "Writeconf"
+ writeconf_all
+ echo "Remounting"
+ mountmgs
+ mountmds
+ mountoss
+ mountcli
+
+ # Reapply the config from before
+ echo "Setting configuration parameters"
+ do_facet mgs "lctl set_param -F $yaml_file"
+
+ local set_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P $orig_val
+
+ [ $set_val == "jobid_var=TESTNAME" ] ||
+ error "$set_val is not TESTNAME"
+
+ do_facet mgs rm "$yaml_file"
+ cleanup
+}
+run_test 123F "clear and reset all parameters using set_param -F"
+
+test_124()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [ -z $mds2failover_HOST ] && skip "needs MDT failover setup"
+
+ setup
+ cleanup
+
+ load_modules
+ if combined_mgs_mds; then
+ start_mdt 1 "-o nosvc" ||
+ error "starting mds with nosvc option failed"
+ fi
+ local nid=$(do_facet mds2 $LCTL list_nids | head -1)
+ local failover_nid=$(do_node $mds2failover_HOST $LCTL list_nids | head -1)
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0001 $nid:$failover_nid ||
+ error "replace_nids execution error"
+
+ if combined_mgs_mds; then
+ stop_mdt 1
+ fi
+
+ setup
+ fail mds2
+ echo "lfs setdirstripe"
+ $LFS setdirstripe -i 1 $MOUNT/$tdir || error "setdirstirpe error"
+ echo ok
+}
+run_test 124 "check failover after replace_nids"
+
+get_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ cat ${max_sectors_path}"
+}
+
+get_max_hw_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_hw_path="/sys/block/${dev_base}/queue/max_hw_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_hw_path} ]] && cat ${max_hw_path}"
+}
+
+set_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local value="$3"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ echo ${value} > ${max_sectors_path}"
+ rc=$?
+
+ [[ $rc -ne 0 ]] && echo "Failed to set ${max_sectors_path} to ${value}"
+
+ return $rc
+}
+
+# Return 0 if all slave devices have max_sectors_kb == max_hw_sectors_kb
+# Otherwise return > 0
+check_slaves_max_sectors_kb()
+{
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local slaves_dir=/sys/block/${dev_base}/slaves
+ local slave_devices=$(do_facet ${facet} "ls ${slaves_dir} 2>/dev/null")
+ [[ -z ${slave_devices} ]] && return 0
+
+ local slave max_sectors new_max_sectors max_hw_sectors path
+ local rc=0
+ for slave in ${slave_devices}; do
+ path="/dev/${slave}"
+ ! is_blkdev ${facet} ${path} && continue
+ max_sectors=$(get_max_sectors_kb ${facet} ${path})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${path})
+ new_max_sectors=${max_hw_sectors}
+ [[ ${new_max_sectors} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors=${RQ_SIZE_LIMIT}
+
+ if [[ ${max_sectors} -ne ${new_max_sectors} ]]; then
+ echo "${path} ${max_sectors} ${new_max_sectors}"
+ ((rc++))
+ fi
+ check_slaves_max_sectors_kb ${facet} ${path}
+ ((rc + $?))
+ done
+
+ return $rc
+}
+
+test_125()
+{
+ local facet_list="mgs mds1 ost1"
+ combined_mgs_mds && facet_list="mgs ost1"
+
+ local facet
+ for facet in ${facet_list}; do
+ [[ $(facet_fstype ${facet}) != ldiskfs ]] &&
+ skip "ldiskfs only test" &&
+ return 0
+ ! is_blkdev ${facet} $(facet_device ${facet}) &&
+ skip "requires all real devices" &&
+ return 0
+ done
+
+ local rc=0
+ # We don't increase IO request size limit past 16MB. See comments in
+ # lustre/utils/libmount_utils_ldiskfs.c:tune_max_sectors_kb()
+ RQ_SIZE_LIMIT=$((16 * 1024))
+ local device old_max_sectors new_max_sectors max_hw_sectors
+ for facet in ${facet_list}; do
+ device=$(facet_device ${facet})
+ old_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${device})
+
+ # The expected value after l_tunedisk is executed
+ new_max_sectors=$old_max_sectors
+ [[ ${new_max_sectors_kb} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors_kb=${RQ_SIZE_LIMIT}
+
+ # Ensure the current value of max_sectors_kb does not equal
+ # max_hw_sectors_kb, so we can tell whether l_tunedisk did
+ # anything
+ set_max_sectors_kb ${facet} ${device} $((new_max_sectors - 1))
+
+ # Value before l_tunedisk
+ local pre_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ if [[ ${pre_max_sectors} -ne $((new_max_sectors - 1)) ]]; then
+ echo "unable to satsify test pre-condition:"
+ echo "${pre_max_sectors} != $((new_max_sectors - 1))"
+ ((rc++))
+ continue
+ fi
+
+ echo "Before: ${facet} ${device} ${pre_max_sectors} ${max_hw_sectors}"
+
+ do_facet ${facet} "libtool execute l_tunedisk ${device}"
+
+ # Value after l_tunedisk
+ local post_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+
+ echo "After: ${facet} ${device} ${post_max_sectors} ${max_hw_sectors}"
+
+ if [[ ${facet} != ost1 ]]; then
+ if [[ ${post_max_sectors} -ne ${pre_max_sectors} ]]; then
+ echo "l_tunedisk modified max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ set_max_sectors_kb ${facet} ${device} ${old_max_sectors}
+ else
+ if [[ ${post_max_sectors} -eq ${pre_max_sectors} ]]; then
+ echo "l_tunedisk failed to modify max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ check_slaves_max_sectors_kb ${facet} ${device} ||
+ ((rc++))
+ fi
+ done
+
+ return $rc
+}
+run_test 125 "check l_tunedisk only tunes OSTs and their slave devices"
+