+ LOAD_MODULES_REMOTE=true load_modules
+#define OBD_FAIL_OFD_SET_OID 0x1e0
+ do_facet ost1 $LCTL set_param fail_loc=0x00001e0
+
+ stack_trap cleanup EXIT
+ setup_noconfig
+ do_facet ost1 $LCTL set_param obdfilter.*.precreate_batch=256
+ $LFS mkdir -i0 -c1 $DIR/$tdir || error "failed to create directory"
+ $LFS setstripe -i0 -c1 $DIR/$tdir || error "failed to setstripe"
+ do_facet ost1 $LCTL set_param fail_loc=0
+ # overflow IDIF 32bit and create > OST_MAX_PRECREATE*5
+ # so a new wrong sequence would differ from an original with error
+ #define OST_MAX_PRECREATE 20000
+ local ost_max_precreate=20100
+ local num_create=$(( ost_max_precreate * 5 ))
+
+ # Check the number of inodes available on OST0
+ local files=0
+ local ifree=$($LFS df -i $MOUNT |
+ awk '/OST0000/ { print $4 }'; exit ${PIPESTATUS[0]})
+
+ log "On OST0, $ifree inodes available. Want $num_create. rc=$?"
+
+ if [ $ifree -lt 10000 ]; then
+ files=$(( ifree - 50 ))
+ else
+ files=10000
+ fi
+
+ local j=$((num_create / files + 1))
+
+ for i in $(seq 1 $j); do
+ createmany -o $DIR/$tdir/$tfile-$i- $files ||
+ error "createmany fail create $files files: $?"
+ unlinkmany $DIR/$tdir/$tfile-$i- $files ||
+ error "unlinkmany failed unlink $files files"
+ done
+ sync
+ touch $DIR/$tdir/$tfile
+ do_facet ost1 sync
+ #we need a write req during recovery for ofd_seq_load
+ replay_barrier ost1
+ dd if=/dev/urandom of=$DIR/$tdir/$tfile bs=1024k count=1 oflag=sync ||
+ error "failed to write file"
+
+ # OBD_FAIL_OST_CREATE_NET 0x204
+ do_facet ost1 $LCTL set_param fail_loc=0x80000204
+ fail ost1
+ createmany -o $DIR/$tdir/file_ 100
+ sync
+
+ err=$(do_facet ost1 dmesg | tac | sed "/Recovery over/,$ d" |
+ grep "OST replaced or reformatted")
+ [ -z "$err" ] || error $err
+}
+run_test 122b "Check OST sequence wouldn't change when IDIF 32bit overflows"
+
+test_123aa() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setup
+
+ # test old logid format until removal from llog_ioctl.c::str2logid()
+ if [ $MGS_VERSION -lt $(version_code 3.1.53) ]; then
+ do_facet mgs $LCTL dl | grep MGS
+ do_facet mgs "$LCTL --device %MGS llog_print \
+ \\\\\\\$$FSNAME-client 1 10" ||
+ error "old llog_print failed"
+ fi
+
+ # test new logid format
+ if [ $MGS_VERSION -ge $(version_code 2.9.53) ]; then
+ do_facet mgs "$LCTL --device MGS llog_print $FSNAME-client" ||
+ error "new llog_print failed"
+ fi
+}
+run_test 123aa "llog_print works with FIDs and simple names"
+
+test_123ab() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [[ $MGS_VERSION -gt $(version_code 2.11.51) ]] ||
+ skip "Need server with working llog_print support"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ local yaml
+ local orig_val
+ local mgs_arg=""
+
+ [[ $MGS_VERSION -gt $(version_code 2.13.54) ]] ||
+ mgs_arg="--device MGS"
+
+ orig_val=$(do_facet mgs $LCTL get_param jobid_name)
+ do_facet mgs $LCTL set_param -P jobid_name="TESTNAME"
+
+ yaml=$(do_facet mgs $LCTL $mgs_arg llog_print params |
+ grep jobid_name | tail -n 1)
+
+ local param=$(awk '{ print $10 }' <<< "$yaml")
+ local val=$(awk '{ print $12 }' <<< "$yaml")
+ #return to the default
+ do_facet mgs $LCTL set_param -P jobid_name=$orig_val
+ [ $val = "TESTNAME" ] || error "bad value: $val"
+ [ $param = "jobid_name," ] || error "Bad param: $param"
+}
+run_test 123ab "llog_print params output values from set_param -P"
+
+test_123ac() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ local start=10
+ local end=50
+ local mgs_arg=""
+
+ [[ $MGS_VERSION -gt $(version_code 2.13.54) ]] ||
+ mgs_arg="--device MGS"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ # - { index: 10, event: add_uuid, nid: 192.168.20.1@tcp(0x20000c0a81401,
+ # node: 192.168.20.1@tcp }
+ do_facet mgs $LCTL $mgs_arg \
+ llog_print --start $start --end $end $FSNAME-client | tr -d , |
+ while read DASH BRACE INDEX idx EVENT BLAH BLAH BLAH; do
+ (( idx >= start )) || error "llog_print index $idx < $start"
+ (( idx <= end )) || error "llog_print index $idx > $end"
+ done
+}
+run_test 123ac "llog_print with --start and --end"
+
+test_123ad() { # LU-11566
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ # older versions of lctl may not print all records properly
+ do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start ||
+ skip "Need 'lctl llog_print --start' on MGS"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ # append a new record, to avoid issues if last record was cancelled
+ local old=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$old
+
+ # logid: [0x3:0xa:0x0]:0
+ # flags: 4 (plain)
+ # records_count: 72
+ # last_index: 72
+ local num=$(do_facet mgs $LCTL --device MGS llog_info $FSNAME-client |
+ awk '/last_index:/ { print $2 - 1 }')
+
+ # - { index: 71, event: set_timeout, num: 0x14, param: sys.timeout=20 }
+ local last=$(do_facet mgs $LCTL --device MGS llog_print $FSNAME-client |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( last == num )) || error "llog_print only showed $last/$num records"
+}
+run_test 123ad "llog_print shows all records"
+
+test_123ae() { # LU-11566
+ local max
+ local mgs_arg=""
+ local log
+ local id
+ local orig
+ local new
+ local rpcs
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ [ -d $MOUNT/.lustre ] || setupall
+
+ max=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1)
+ pgs=$($LCTL get_param -n osc.*-OST0000-*.max_pages_per_rpc | head -1)
+ [[ $MGS_VERSION -gt $(version_code 2.13.54) ]] ||
+ mgs_arg="--device MGS"
+
+ if do_facet mgs "$LCTL help llog_cancel" 2>&1| grep -q -- --log_id; then
+ # save one set_param -P record in case none exist
+
+ do_facet mgs $LCTL set_param -P osc.*.max_pages_per_rpc=$pgs
+ stack_trap "do_facet mgs $LCTL set_param -P -d \
+ osc.*.max_pages_per_rpc"
+
+ log=params
+ orig=$(do_facet mgs $LCTL $mgs_arg llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL set_param -P osc.*.max_dirty_mb=$max
+ do_facet mgs $LCTL $mgs_arg llog_print $log | tail -1 |
+ grep "parameter: osc.*.max_dirty_mb" ||
+ error "new set_param -P wasn't stored in params log"
+
+ # - { index: 71, event: set_param, device: general,
+ # param: osc.*.max_dirty_mb, value: 256 }
+ id=$(do_facet mgs $LCTL $mgs_arg llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL $mgs_arg llog_cancel $log --log_idx=$id
+ local new=$(do_facet mgs $LCTL $mgs_arg llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( new == orig )) ||
+ error "new llog_cancel now $new, not at $orig records"
+ fi
+
+ # test old positional parameters for a while still
+ if [ "$MGS_VERSION" -le $(version_code 3.1.53) ]; then
+ log=$FSNAME-client
+
+ do_facet mgs $LCTL conf_param \
+ $FSNAME-OST0000.osc.max_pages_per_rpc=$pgs
+ stack_trap "do_facet mgs $LCTL conf_param -d \
+ $FSNAME-OST0000.osc.max_pages_per_rpc"
+
+ orig=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$max
+ do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | grep "parameter: osc.max_dirty_mb" ||
+ error "old conf_param wasn't stored in params log"
+ do_facet mgs $LCTL --device MGS llog_print $log
+ # - { index: 71, event: conf_param, device: testfs-OST0000-osc,
+ # param: osc.max_dirty_mb=256 }
+ id=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ do_facet mgs $LCTL --device MGS llog_cancel $log $id
+ do_facet mgs $LCTL --device MGS llog_print $log
+ new=$(do_facet mgs $LCTL --device MGS llog_print $log |
+ tail -1 | awk '{ print $4 }' | tr -d , )
+ (( new == orig )) ||
+ error "old llog_cancel now $new, not at $orig records"
+ fi
+}
+run_test 123ae "llog_cancel can cancel requested record"
+
+test_123af() { #LU-13609
+ [ "$MGS_VERSION" -ge $(version_code 2.13.54) -a \
+ "$MDS1_VERSION" -ge $(version_code 2.13.54) ] ||
+ skip "Need both MGS and MDS version at least 2.13.54"
+
+ [ -d $MOUNT/.lustre ] || setupall
+ stack_trap "do_facet mds1 $LCTL set_param fail_loc=0" EXIT
+
+ local device
+ local facet
+ local cmd
+ local orig_clist
+ local orig_count
+ local new_clist
+ local new_count
+
+ for device in "MGS" "$FSNAME-MDT0000"; do
+ cmd="--device $device llog_catlist"
+ echo "lctl $cmd ..."
+ if [ "$device" = "MGS" ]; then
+ facet="mgs"
+ else
+ facet="mds1"
+ fi
+ orig_clist=($(do_facet $facet $LCTL $cmd | awk '{ print $2 }'))
+ orig_count=${#orig_clist[@]}
+ echo "orig_clist: ${orig_clist[@]}"
+
+ #define OBD_FAIL_CATLIST 0x131b
+ #fetch to llog records from the second one
+ do_facet $facet $LCTL set_param fail_loc=0x131b fail_val=2
+
+ new_clist=($(do_facet $facet $LCTL $cmd | awk '{ print $2 }'))
+ new_count=${#new_clist[@]}
+ echo "new_clist: ${new_clist[@]}"
+
+ [ $new_count -eq $((orig_count - 1)) ] ||
+ error "$new_count != $orig_count - 1"
+ for i in $(seq 0 $new_count); do
+ j=$((i + 1))
+ [ "${orig_clist[$j]}" = "${new_clist[$i]}" ] ||
+ error "${orig_clist[$j]} != ${new_clist[$i]}"
+ done
+ do_facet mds1 $LCTL set_param fail_loc=0
+ echo "done"
+ done
+}
+run_test 123af "llog_catlist can show all config files correctly"
+
+test_123ag() { # LU-15142
+ local rec
+ local orig_val
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+ (( $MGS_VERSION >= $(version_code 2.14.55) )) ||
+ skip "Need server version least 2.14.55"
+
+ [ -d $MOUNT/.lustre ] || setup
+
+ orig_val=$(do_facet mgs $LCTL get_param jobid_name)
+ stack_trap "do_facet mgs $LCTL set_param -P jobid_name=$orig_val"
+
+ do_facet mgs $LCTL set_param -P jobid_name="TESTNAME1"
+ do_facet mgs $LCTL set_param -P -d jobid_name
+ rec=$(do_facet mgs $LCTL --device MGS llog_print params |
+ grep -c jobid_name)
+ (( rec == 0 )) || error "parameter was not deleted, check #1"
+ do_facet mgs $LCTL set_param -P jobid_name="TESTNAME1"
+ rec=$(do_facet mgs $LCTL --device MGS llog_print params |
+ grep -c jobid_name)
+ (( rec == 1)) || error "parameter is not set"
+ # usage with ordinary set_param format works too
+ do_facet mgs $LCTL set_param -P -d jobid_name="ANY"
+ rec=$(do_facet mgs $LCTL --device MGS llog_print params |
+ grep -c jobid_name)
+ (( rec == 0 )) || error "parameter was not deleted, check #2"
+}
+run_test 123ag "llog_print skips values deleted by set_param -P -d"
+
+test_123F() {
+ remote_mgs_nodsh && skip "remote MGS with nodsh"
+
+ [ -d $MOUNT/.lustre ] || setup
+ local yaml_file="$TMP/$tfile.yaml"
+ do_facet mgs rm "$yaml_file"
+ local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist" |
+ sed 's/config_log://')
+
+ # set jobid_var to a different value for test
+ local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P jobid_var="TESTNAME"
+
+ for i in $cfgfiles params; do
+ do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
+ done
+
+ echo "Unmounting FS"
+ stopall
+ echo "Writeconf"
+ writeconf_all
+ echo "Remounting"
+ setup_noconfig
+
+ # Reapply the config from before
+ echo "Setting configuration parameters"
+ do_facet mgs "lctl set_param -F $yaml_file"
+
+ local set_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P $orig_val
+
+ [ $set_val == "jobid_var=TESTNAME" ] ||
+ error "$set_val is not TESTNAME"
+
+ do_facet mgs rm "$yaml_file"
+ cleanup
+}
+run_test 123F "clear and reset all parameters using set_param -F"
+
+test_124()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [ -z $mds2failover_HOST ] && skip "needs MDT failover setup"
+
+ setup
+ do_facet mgs $LCTL --device MGS llog_print $FSNAME-client |
+ grep 1.2.3.4@tcp && error "Should not be fake nid"
+ do_facet mgs $LCTL conf_param $FSNAME-MDT0001.failover.node=1.2.3.4@tcp\
+ || error "Set params error"
+ do_facet mgs $LCTL --device MGS llog_print $FSNAME-client |
+ grep 1.2.3.4@tcp || error "Fake nid should be added"
+ cleanup
+
+ load_modules
+ if combined_mgs_mds; then
+ start_mdt 1 "-o nosvc" ||
+ error "starting mds with nosvc option failed"
+ fi
+ local nid=$(do_facet mds2 $LCTL list_nids | head -1)
+ local failover_nid=$(do_node $mds2failover_HOST $LCTL list_nids | head -1)
+ do_facet mgs $LCTL replace_nids $FSNAME-MDT0001 $nid:$failover_nid ||
+ error "replace_nids execution error"
+
+ if combined_mgs_mds; then
+ stop_mdt 1
+ fi
+
+ setup
+ fail mds2
+ echo "lfs setdirstripe"
+ $LFS setdirstripe -i 1 $MOUNT/$tdir || error "setdirstirpe error"
+ echo ok
+}
+run_test 124 "check failover after replace_nids"
+
+get_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ cat ${max_sectors_path}"
+}
+
+get_max_hw_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_hw_path="/sys/block/${dev_base}/queue/max_hw_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_hw_path} ]] && cat ${max_hw_path}"
+}
+
+set_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local value="$3"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ echo ${value} > ${max_sectors_path}"
+ rc=$?
+
+ [[ $rc -ne 0 ]] && echo "Failed to set ${max_sectors_path} to ${value}"
+
+ return $rc
+}
+
+# Return 0 if all slave devices have max_sectors_kb == max_hw_sectors_kb
+# Otherwise return > 0
+check_slaves_max_sectors_kb()
+{
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local slaves_dir=/sys/block/${dev_base}/slaves
+ local slave_devices=$(do_facet ${facet} "ls ${slaves_dir} 2>/dev/null")
+ [[ -z ${slave_devices} ]] && return 0
+
+ local slave max_sectors new_max_sectors max_hw_sectors path
+ local rc=0
+ for slave in ${slave_devices}; do
+ path="/dev/${slave}"
+ ! is_blkdev ${facet} ${path} && continue
+ max_sectors=$(get_max_sectors_kb ${facet} ${path})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${path})
+ new_max_sectors=${max_hw_sectors}
+ [[ ${new_max_sectors} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors=${RQ_SIZE_LIMIT}
+
+ if [[ ${max_sectors} -ne ${new_max_sectors} ]]; then
+ echo "${path} ${max_sectors} ${new_max_sectors}"
+ ((rc++))
+ fi
+ check_slaves_max_sectors_kb ${facet} ${path}
+ ((rc + $?))
+ done
+
+ return $rc
+}
+
+test_125()
+{
+ local facet_list="mgs mds1 ost1"
+ combined_mgs_mds && facet_list="mgs ost1"
+
+ local facet
+ for facet in ${facet_list}; do
+ [[ $(facet_fstype ${facet}) != ldiskfs ]] &&
+ skip "ldiskfs only test" &&
+ return 0
+ ! is_blkdev ${facet} $(facet_device ${facet}) &&
+ skip "requires all real devices" &&
+ return 0
+ done
+
+ local rc=0
+ # We don't increase IO request size limit past 16MB. See comments in
+ # lustre/utils/libmount_utils_ldiskfs.c:tune_max_sectors_kb()
+ RQ_SIZE_LIMIT=$((16 * 1024))
+ local device old_max_sectors new_max_sectors max_hw_sectors
+ for facet in ${facet_list}; do
+ device=$(facet_device ${facet})
+ old_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${device})
+
+ # The expected value after l_tunedisk is executed
+ new_max_sectors=$old_max_sectors
+ [[ ${new_max_sectors_kb} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors_kb=${RQ_SIZE_LIMIT}
+
+ # Ensure the current value of max_sectors_kb does not equal
+ # max_hw_sectors_kb, so we can tell whether l_tunedisk did
+ # anything
+ set_max_sectors_kb ${facet} ${device} $((new_max_sectors - 1))
+
+ # Value before l_tunedisk
+ local pre_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ if [[ ${pre_max_sectors} -ne $((new_max_sectors - 1)) ]]; then
+ echo "unable to satsify test pre-condition:"
+ echo "${pre_max_sectors} != $((new_max_sectors - 1))"
+ ((rc++))
+ continue
+ fi
+
+ echo "Before: ${facet} ${device} ${pre_max_sectors} ${max_hw_sectors}"
+
+ do_facet ${facet} "l_tunedisk ${device}"
+
+ # Value after l_tunedisk
+ local post_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+
+ echo "After: ${facet} ${device} ${post_max_sectors} ${max_hw_sectors}"
+
+ if [[ ${facet} != ost1 ]]; then
+ if [[ ${post_max_sectors} -ne ${pre_max_sectors} ]]; then
+ echo "l_tunedisk modified max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ set_max_sectors_kb ${facet} ${device} ${old_max_sectors}
+ else
+ if [[ ${post_max_sectors} -eq ${pre_max_sectors} ]]; then
+ echo "l_tunedisk failed to modify max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ check_slaves_max_sectors_kb ${facet} ${device} ||
+ ((rc++))
+ fi
+ done
+
+ return $rc
+}
+run_test 125 "check l_tunedisk only tunes OSTs and their slave devices"
+
+test_126() {
+ [[ "$MDS1_VERSION" -ge $(version_code 2.13.52) ]] ||
+ skip "Need MDS version at least 2.13.52"
+
+ cleanup
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ../libcfs/libcfs/libcfs
+ #define OBD_FAIL_OBD_SETUP 0x60d
+ do_facet mds1 $LCTL set_param fail_loc=0x60d
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) load_modules &
+ for i in {1..40}; do
+ do_facet mds1 lsmod | grep -q osd_$mds1_FSTYPE && break
+ sleep 1
+ done
+ clear_failloc $SINGLEMDS 20 &
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+}
+run_test 126 "mount in parallel shouldn't cause a crash"
+
+test_127() {
+ [[ "$ost1_FSTYPE" == ldiskfs ]] || skip "ldiskfs only test"
+
+ cleanup
+ setup
+ zconf_umount_clients $RCLIENTS $MOUNT
+
+ wait_osp_active ost ${FSNAME}-OST0000 0 1
+ local osc_tgt="$FSNAME-OST0000-osc-$($LFS getname -i $DIR)"
+ local avail1=($($LCTL get_param -n osc.${osc_tgt}.kbytesavail))
+
+ $LFS setstripe -i 0 $DIR/$tfile || error "failed creating $DIR/$tfile"
+ dd if=/dev/zero of=$DIR/$tfile bs=1M oflag=direct || true
+
+ local avail2=($($LCTL get_param -n osc.${osc_tgt}.kbytesavail))
+
+ if ((avail2 * 100 / avail1 > 1)); then
+ lfs df $DIR
+ ls -l $DIR/$tfile
+ error "more than 1% space left: before=$avail1 after=$avail2"
+ fi
+
+ local mbs=$(($(stat -c %s $DIR/$tfile) / (1024 * 1024)))
+
+ dd if=/dev/zero of=$DIR/$tfile bs=1M count=$mbs conv=notrunc \
+ oflag=direct || error "overwrite failed"
+}
+run_test 127 "direct io overwrite on full ost"
+
+test_128()
+{
+ combined_mgs_mds && skip "need separate mgs device"
+ [ "$ost2_FSTYPE" == zfs ] && import_zpool ost2
+
+ format_ost 2
+ # Try to apply nolocallogs to the virgin OST. Should fail.
+ do_facet ost2 "$TUNEFS --nolocallogs $(ostdevname 2)" &&
+ error "nolocallogs should not be allowed on the virgin target"
+
+ setupall
+ stopall
+
+ [ "$ost1_FSTYPE" == zfs ] && import_zpool ost1
+ # Start OST without MGS (local configs)
+ do_facet ost1 "$TUNEFS --dryrun $(ostdevname 1)"
+ start_ost || error "unable to start OST1"
+ stop_ost || error "Unable to stop OST1"
+
+ [ "$ost1_FSTYPE" == zfs ] && import_zpool ost1
+ # Do not allow reading local configs, should fail
+ do_facet ost1 "$TUNEFS --nolocallogs $(ostdevname 1)" ||
+ error "Can not set nolocallogs"
+ start_ost && error "OST1 started, but should fail"
+
+ # Connect to MGS successfully, reset nolocallogs flag
+ [ "$ost1_FSTYPE" == zfs ] && import_zpool ost1
+ start_mgs || error "unable to start MGS"
+ start_ost || error "unable to start OST1"
+
+ do_facet ost1 "$TUNEFS --dryrun $(ostdevname 1)" | grep "nolocallogs" &&
+ error "nolocallogs expected to be reset"
+
+ stop_ost || error "Unable to stop OST1"
+}
+run_test 128 "Force using remote logs with --nolocallogs"
+
+test_129()
+{
+ stopall
+ start_mds || error "MDS start failed"
+ format_ost 1
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &&
+ error "start ost1 should fail" || true
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &&
+ error "second start ost1 should fail" || true
+ do_facet ost1 "$TUNEFS --writeconf $(ostdevname 1)"
+ start ost1 $(ostdevname 1) $OST_MOUNT_OPTS ||
+ error "start ost1 failed"
+ stop ost1
+ stop_mds
+}
+run_test 129 "attempt to connect an OST with the same index should fail"
+
+test_130()
+{
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ setupall
+ stop_mdt 2 || error "mdt2 stop failed"
+ do_facet mds2 "$TUNEFS --writeconf $(mdsdevname 2)"
+ start_mdt 2 || error "mdt2 start failed"
+ do_facet mds2 "$LCTL dl" | grep MDT0001-osp-MDT0001 &&
+ error "Illegal OSP device created" || true
+}
+run_test 130 "re-register an MDT after writeconf"
+
+test_131() {
+ [ "$mds1_FSTYPE" == "ldiskfs" ] || skip "ldiskfs only test"
+ do_facet mds1 $DEBUGFS -R features $(mdsdevname 1) |
+ grep -q project || skip "skip project quota not supported"
+
+ local projid
+
+ setupall
+ test_mkdir -c $MDSCOUNT -p $DIR/$tdir
+ $LFS project -p 1000 $DIR/$tdir || error "set dir project id failed"
+ createmany -o $DIR/$tdir/f 512
+ for ((i = 0; i < 512; ++i)); do
+ $LFS project -p $i $DIR/$tdir/f${i} ||
+ error "set f${i} project id failed"
+ done
+
+ test_mkdir -c $MDSCOUNT -p $DIR/$tdir.inherit
+ $LFS project -p 1001 -s $DIR/$tdir.inherit
+ createmany -o $DIR/$tdir.inherit/f 128
+ (( $($LFS project $DIR/$tdir.inherit/f* |
+ awk '$1 == 1001 { print }' | wc -l) == 128 )) ||
+ error "files did not inherit projid 1001"
+
+ stopall
+
+ for i in $(seq $MDSCOUNT); do
+ mds_backup_restore mds$i ||
+ error "Backup/restore on mds$i failed"
+ done
+
+ setupall
+
+ projid=($($LFS project -d $DIR/$tdir))
+ [ ${projid[0]} == "1000" ] ||
+ error "projid expected 1000 not ${projid[0]}"
+ for ((i = 0; i < 512; ++i)); do
+ projid=($($LFS project $DIR/$tdir/f${i}))
+ [ ${projid[0]} == "$i" ] ||
+ error "projid expected $i not ${projid[0]}"
+ done
+
+ (( $($LFS project $DIR/$tdir.inherit/f* |
+ awk '$1 == 1001 { print }' | wc -l) == 128 )) ||
+ error "restore did not copy projid 1001"