+test_116() {
+ [ "$mds1_FSTYPE" != ldiskfs ] && skip "ldiskfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.10.59) ] &&
+ skip "Need server version at least 2.10.59"
+ do_facet $SINGLEMDS which mkfs.xfs ||
+ skip_env "No mkfs.xfs installed"
+
+ stopall
+ load_modules
+
+ local tmpmnt=$TMP/$tdir
+ local mdtimg=$tfile-mdt0
+
+ do_facet $SINGLEMDS mkdir -p $tmpmnt
+ stack_trap "do_facet $SINGLEMDS rmdir $tmpmnt" EXIT
+
+ do_facet $SINGLEMDS touch $TMP/$mdtimg
+ stack_trap "do_facet $SINGLEMDS rm -f $TMP/$mdtimg" EXIT
+ do_facet $SINGLEMDS mkfs -t xfs -d file,size=1t,name=$TMP/$mdtimg ||
+ error "mkfs temporary xfs image"
+
+ do_facet $SINGLEMDS mount $TMP/$mdtimg $tmpmnt ||
+ error "mount temporary xfs image"
+ stack_trap "do_facet $SINGLEMDS umount $tmpmnt" EXIT
+ local old_mdssize=$MDSSIZE
+ local old_mdsisize=$MDSISIZE
+
+ MDSSIZE=$((17 * 1024 * 1024 * 1024)) # 17T MDT
+ MDSISIZE=$((16 << 20))
+ local opts17t="$(mkfs_opts $SINGLEMDS)"
+
+ MDSSIZE=$old_mdssize
+ MDSISIZE=$old_mdsisize
+ do_facet $SINGLEMDS $MKFS $opts17t $tmpmnt/$mdtimg ||
+ error "failed to mkfs for $tmpmnt/$mdtimg"
+
+ do_facet $SINGLEMDS $TUNE2FS -l $tmpmnt/$mdtimg |
+ grep -qw 'features.*extent' || error "extent should be enabled"
+ reformat_and_config
+}
+run_test 116 "big size MDT support"
+
+test_117() {
+ setup
+ do_facet ost1 "$LCTL set_param ost.OSS.ost_io.nrs_policies=fifo"
+ do_facet ost1 "$LCTL get_param -n ost.OSS.ost_io.nrs_tbf_rule" &&
+ error "get_param should fail"
+ cleanup || error "cleanup failed with rc $?"
+}
+run_test 117 "lctl get_param return errors properly"
+
+test_120() { # LU-11130
+ [ "$MDSCOUNT" -lt 2 ] && skip "mdt count < 2"
+ [ "$mds1_FSTYPE" != ldiskfs ] &&
+ skip "ldiskfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.11.56) ] &&
+ skip "Need DNE2 capable MD target with LU-11130 fix"
+
+ setup
+
+ local mds1host=$(facet_active_host mds1)
+ local mds1dev=$(mdsdevname 1)
+
+ $LFS mkdir -i 1 $DIR/$tdir
+ $LFS mkdir -i 0 $DIR/$tdir/mds1dir
+
+ ln -s foo $DIR/$tdir/bar
+ mv $DIR/$tdir/bar $DIR/$tdir/mds1dir/bar2 ||
+ error "cross-target rename failed"
+
+ stopall
+
+ run_e2fsck $mds1host $mds1dev "-n"
+}
+run_test 120 "cross-target rename should not create bad symlinks"
+
+test_122() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [[ "$OST1_VERSION" -ge $(version_code 2.11.53) ]] ||
+ skip "Need OST version at least 2.11.53"
+
+ reformat
+ LOAD_MODULES_REMOTE=true load_modules
+#define OBD_FAIL_OFD_SET_OID 0x1e0
+ do_facet ost1 $LCTL set_param fail_loc=0x00001e0
+
+ setup_noconfig
+ $LFS mkdir -i1 -c1 $DIR/$tdir
+ $LFS setstripe -i0 -c1 $DIR/$tdir
+ do_facet ost1 $LCTL set_param fail_loc=0
+ createmany -o $DIR/$tdir/file_ 1000 ||
+ error "Fail to create a new sequence"
+
+ cleanup
+}
+run_test 122 "Check OST sequence update"
+
+test_123aa() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setup
+
+ # test old logid format until removal from llog_ioctl.c::str2logid()
+ if [ $MGS_VERSION -lt $(version_code 3.1.53) ]; then
+ do_facet mgs $LCTL dl | grep MGS
+ do_facet mgs "$LCTL --device %MGS llog_print \
+ \\\\\\\$$FSNAME-client 1 10" ||
+ error "old llog_print failed"
+ fi
+
+ # test new logid format
+ if [ $MGS_VERSION -ge $(version_code 2.9.53) ]; then
+ do_facet mgs "$LCTL --device MGS llog_print $FSNAME-client" ||
+ error "new llog_print failed"
+ fi
+}
+run_test 123aa "llog_print works with FIDs and simple names"
+
+test_123ab() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [[ $MGS_VERSION -gt $(version_code 2.11.51) ]] ||
+ skip "Need server with working llog_print support"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ local yaml
+ local orig_val
+
+ orig_val=$(do_facet mgs $LCTL get_param jobid_name)
+ do_facet mgs $LCTL set_param -P jobid_name="TESTNAME"
+
+ yaml=$(do_facet mgs $LCTL --device MGS llog_print params |
+ grep jobid_name | tail -n 1)
+
+ local param=$(awk '{ print $10 }' <<< "$yaml")
+ local val=$(awk '{ print $12 }' <<< "$yaml")
+ #return to the default
+ do_facet mgs $LCTL set_param -P jobid_name=$orig_val
+ [ $val = "TESTNAME" ] || error "bad value: $val"
+ [ $param = "jobid_name," ] || error "Bad param: $param"
+}
+run_test 123ab "llog_print params output values from set_param -P"
+
+test_123ac() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ local start=10
+ local end=50
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ # - { index: 10, event: add_uuid, nid: 192.168.20.1@tcp(0x20000c0a81401,
+ # node: 192.168.20.1@tcp }
+ do_facet mgs $LCTL --device MGS \
+ llog_print --start $start --end $end $FSNAME-client | tr -d , |
+ while read DASH BRACE INDEX idx EVENT BLAH BLAH BLAH; do
+ (( idx >= start )) || error "llog_print index $idx < $start"
+ (( idx <= end )) || error "llog_print index $idx > $end"
+ done
+}
+run_test 123ac "llog_print with --start and --end"
+
+test_123ad() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ # older versions of lctl may not print all records properly
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ # append a new record, to avoid issues if last record was cancelled
+ local old=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$old
+
+ # logid: [0x3:0xa:0x0]:0
+ # flags: 4 (plain)
+ # records_count: 72
+ # last_index: 72
+ local num=$(do_facet mgs $LCTL --device MGS llog_info $FSNAME-client |
+ awk '/last_index:/ { print $2 - 1 }')
+
+ # - { index: 71, event: set_timeout, num: 0x14, param: sys.timeout=20 }
+ local last=$(do_facet mgs $LCTL --device MGS llog_print $FSNAME-client |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( last == num )) || error "llog_print only showed $last/$num records"
+}
+run_test 123ad "llog_print shows all records"
+
+test_123ae() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setupall
+
+ local max=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+
+ if do_facet mgs "$LCTL help llog_cancel" 2>&1| grep -q -- --log_id; then
+ # save one set_param -P record in case none exist
+ do_facet mgs $LCTL set_param -P osc.*.max_dirty_mb=$max
+
+ local log=params
+ local orig=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL set_param -P osc.*.max_dirty_mb=$max
+ do_facet mgs $LCTL --device MGS llog_print $log | tail -1 |
+ grep "parameter: osc.*.max_dirty_mb" ||
+ error "new set_param -P wasn't stored in params log"
+
+ # - { index: 71, event: set_param, device: general,
+ # param: osc.*.max_dirty_mb, value: 256 }
+ local id=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+
+ do_facet mgs $LCTL --device MGS llog_cancel $log --log_idx=$id
+ local new=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( new == orig )) ||
+ error "new llog_cancel now $new, not at $orig records"
+ fi
+
+ # test old positional parameters for a while still
+ if [ "$MGS_VERSION" -le $(version_code 3.1.53) ]; then
+ log=$FSNAME-client
+ orig=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$max
+ do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | grep "parameter: osc.max_dirty_mb" ||
+ error "old conf_param wasn't stored in params log"
+
+ # - { index: 71, event: conf_param, device: testfs-OST0000-osc,
+ # param: osc.max_dirty_mb=256 }
+ id=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL --device MGS llog_cancel $log $id
+ new=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( new == orig )) ||
+ error "old llog_cancel now $new, not at $orig records"
+ fi
+}
+run_test 123ae "llog_cancel can cancel requested record"
+
+test_123F() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+
+ [ -d $MOUNT/.lustre ] || setup
+ local yaml_file="$TMP/$tfile.yaml"
+ do_facet mgs rm "$yaml_file"
+ local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist" |
+ sed 's/config_log://')
+
+ # set jobid_var to a different value for test
+ local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P jobid_var="TESTNAME"
+
+ for i in $cfgfiles params; do
+ do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
+ done
+
+ echo "Unmounting FS"
+ stopall
+ echo "Writeconf"
+ writeconf_all
+ echo "Remounting"
+ mountmgs
+ mountmds
+ mountoss
+ mountcli
+
+ # Reapply the config from before
+ echo "Setting configuration parameters"
+ do_facet mgs "lctl set_param -F $yaml_file"
+
+ local set_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P $orig_val
+
+ [ $set_val == "jobid_var=TESTNAME" ] ||
+ error "$set_val is not TESTNAME"
+
+ do_facet mgs rm "$yaml_file"
+ cleanup
+}
+run_test 123F "clear and reset all parameters using set_param -F"
+
+test_124()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [ -z $mds2failover_HOST ] && skip "needs MDT failover setup"
+
+ setup
+ cleanup
+
+ load_modules
+ if combined_mgs_mds; then
+ start_mdt 1 "-o nosvc" ||
+ error "starting mds with nosvc option failed"
+ fi
+ local nid=$(do_facet mds2 $LCTL list_nids | head -1)
+ local failover_nid=$(do_node $mds2failover_HOST $LCTL list_nids | head -1)
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0001 $nid:$failover_nid ||
+ error "replace_nids execution error"
+
+ if combined_mgs_mds; then
+ stop_mdt 1
+ fi
+
+ setup
+ fail mds2
+ echo "lfs setdirstripe"
+ $LFS setdirstripe -i 1 $MOUNT/$tdir || error "setdirstirpe error"
+ echo ok
+}
+run_test 124 "check failover after replace_nids"
+
+get_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ cat ${max_sectors_path}"
+}
+
+get_max_hw_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_hw_path="/sys/block/${dev_base}/queue/max_hw_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_hw_path} ]] && cat ${max_hw_path}"
+}
+
+set_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local value="$3"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ echo ${value} > ${max_sectors_path}"
+ rc=$?
+
+ [[ $rc -ne 0 ]] && echo "Failed to set ${max_sectors_path} to ${value}"
+
+ return $rc
+}
+
+# Return 0 if all slave devices have max_sectors_kb == max_hw_sectors_kb
+# Otherwise return > 0
+check_slaves_max_sectors_kb()
+{
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local slaves_dir=/sys/block/${dev_base}/slaves
+ local slave_devices=$(do_facet ${facet} "ls ${slaves_dir} 2>/dev/null")
+ [[ -z ${slave_devices} ]] && return 0
+
+ local slave max_sectors new_max_sectors max_hw_sectors path
+ local rc=0
+ for slave in ${slave_devices}; do
+ path="/dev/${slave}"
+ ! is_blkdev ${facet} ${path} && continue
+ max_sectors=$(get_max_sectors_kb ${facet} ${path})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${path})
+ new_max_sectors=${max_hw_sectors}
+ [[ ${new_max_sectors} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors=${RQ_SIZE_LIMIT}
+
+ if [[ ${max_sectors} -ne ${new_max_sectors} ]]; then
+ echo "${path} ${max_sectors} ${new_max_sectors}"
+ ((rc++))
+ fi
+ check_slaves_max_sectors_kb ${facet} ${path}
+ ((rc + $?))
+ done
+
+ return $rc
+}
+
+test_125()
+{
+ local facet_list="mgs mds1 ost1"
+ combined_mgs_mds && facet_list="mgs ost1"
+
+ local facet
+ for facet in ${facet_list}; do
+ [[ $(facet_fstype ${facet}) != ldiskfs ]] &&
+ skip "ldiskfs only test" &&
+ return 0
+ ! is_blkdev ${facet} $(facet_device ${facet}) &&
+ skip "requires all real devices" &&
+ return 0
+ done
+
+ local rc=0
+ # We don't increase IO request size limit past 16MB. See comments in
+ # lustre/utils/libmount_utils_ldiskfs.c:tune_max_sectors_kb()
+ RQ_SIZE_LIMIT=$((16 * 1024))
+ local device old_max_sectors new_max_sectors max_hw_sectors
+ for facet in ${facet_list}; do
+ device=$(facet_device ${facet})
+ old_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${device})
+
+ # The expected value after l_tunedisk is executed
+ new_max_sectors=$old_max_sectors
+ [[ ${new_max_sectors_kb} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors_kb=${RQ_SIZE_LIMIT}
+
+ # Ensure the current value of max_sectors_kb does not equal
+ # max_hw_sectors_kb, so we can tell whether l_tunedisk did
+ # anything
+ set_max_sectors_kb ${facet} ${device} $((new_max_sectors - 1))
+
+ # Value before l_tunedisk
+ local pre_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ if [[ ${pre_max_sectors} -ne $((new_max_sectors - 1)) ]]; then
+ echo "unable to satsify test pre-condition:"
+ echo "${pre_max_sectors} != $((new_max_sectors - 1))"
+ ((rc++))
+ continue
+ fi
+
+ echo "Before: ${facet} ${device} ${pre_max_sectors} ${max_hw_sectors}"
+
+ do_facet ${facet} "libtool execute l_tunedisk ${device}"
+
+ # Value after l_tunedisk
+ local post_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+
+ echo "After: ${facet} ${device} ${post_max_sectors} ${max_hw_sectors}"
+
+ if [[ ${facet} != ost1 ]]; then
+ if [[ ${post_max_sectors} -ne ${pre_max_sectors} ]]; then
+ echo "l_tunedisk modified max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ set_max_sectors_kb ${facet} ${device} ${old_max_sectors}
+ else
+ if [[ ${post_max_sectors} -eq ${pre_max_sectors} ]]; then
+ echo "l_tunedisk failed to modify max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ check_slaves_max_sectors_kb ${facet} ${device} ||
+ ((rc++))
+ fi
+ done
+
+ return $rc
+}
+run_test 125 "check l_tunedisk only tunes OSTs and their slave devices"
+