+run_test 73a "default limits at OST Pool Quotas"
+
+test_73b()
+{
+ local TESTFILE1="$DIR/$tdir/$tfile-1"
+ local limit=20 #20M
+ local qpool="qpool1"
+
+ mds_supports_qp
+
+ setup_quota_test || error "setup quota failed with $?"
+ quota_init
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+ # pool quotas don't work properly without global limit
+ $LFS setquota -u $TSTUSR -b 0 -B ${limit}M -i 0 -I 0 $DIR ||
+ error "set global limit failed"
+
+ pool_add $qpool || error "pool_add failed"
+ pool_add_targets $qpool 0 $((OSTCOUNT - 1)) ||
+ error "pool_add_targets failed"
+
+ log "set default quota for $qpool"
+ $LFS setquota -U --pool $qpool -b ${limit}M -B ${limit}M $DIR ||
+ error "set default quota failed"
+
+ log "Write from user that hasn't lqe"
+ # Check that it doesn't cause a panic or a deadlock
+ # due to nested lqe lookups that rewrite 1st lqe in qti_lqes array.
+ # Have to use RUNAS_ID as resetquota creates lqes in
+ # the beginning for TSTUSR/TSTUSR2 when sets limits to 0.
+ runas -u $RUNAS_ID -g $RUNAS_GID $DD of=$TESTFILE1 count=10
+
+ cancel_lru_locks osc
+ sync; sync_all_data || true
+}
+run_test 73b "default OST Pool Quotas limit for new user"
+
+test_74()
+{
+ local global_limit=200 # 200M
+ local limit=10 # 10M
+ local limit2=50 # 50M
+ local qpool="qpool1"
+ local qpool2="qpool2"
+ local tmp=0
+
+ mds_supports_qp
+ setup_quota_test || error "setup quota failed with $?"
+
+ # enable ost quota
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+ $LFS setquota -u $TSTUSR -b 0 -B ${global_limit}M -i 0 -I 0 $DIR ||
+ error "set user quota failed"
+
+ pool_add $qpool || error "pool_add failed"
+ pool_add_targets $qpool 0 1 ||
+ error "pool_add_targets failed"
+
+ $LFS setquota -u $TSTUSR -B ${limit}M --pool $qpool $DIR ||
+ error "set user quota failed"
+
+ pool_add $qpool2 || error "pool_add failed"
+ pool_add_targets $qpool2 1 1 ||
+ error "pool_add_targets failed"
+
+ $LFS setquota -u $TSTUSR -B ${limit2}M --pool $qpool2 $DIR ||
+ error "set user quota failed"
+
+ tmp=$(getquota -u $TSTUSR global bhardlimit)
+ [ $tmp -eq $((global_limit * 1024)) ] ||
+ error "wrong global limit $global_limit"
+
+ tmp=$(getquota -u $TSTUSR global bhardlimit $qpool)
+ [ $tmp -eq $((limit * 1024)) ] || error "wrong limit $tmp for $qpool"
+
+ tmp=$(getquota -u $TSTUSR global bhardlimit $qpool2)
+ [ $tmp -eq $((limit2 * 1024)) ] || error "wrong limit $tmp for $qpool2"
+
+ # check limits in pools
+ tmp=$($LFS quota -u $TSTUSR --pool $DIR | \
+ grep -A4 $qpool | awk 'NR == 4{print $4}')
+ echo "pool limit for $qpool $tmp"
+ [ $tmp -eq $((limit * 1024)) ] || error "wrong limit:tmp for $qpool"
+ tmp=$($LFS quota -u $TSTUSR --pool $DIR | \
+ grep -A4 $qpool2 | awk 'NR == 4{print $4}')
+ echo "pool limit for $qpool2 $tmp"
+ [ $tmp -eq $((limit2 * 1024)) ] || error "wrong limit:$tmp for $qpool2"
+}
+run_test 74 "check quota pools per user"
+
+function cleanup_quota_test_75()
+{
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 1
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_uid --value 99
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_gid --value 99
+
+ wait_nm_sync default admin_nodemap
+ wait_nm_sync default trusted_nodemap
+
+ do_facet mgs $LCTL nodemap_activate 0
+ wait_nm_sync active
+
+ resetquota -u $TSTUSR
+}
+
+test_dom_75() {
+ local dd_failed=false
+ local LIMIT=20480 #20M
+ local qid=$TSTID
+
+ for ((i = 0; i < $((LIMIT/2048-1)); i++)); do
+ $DD of=$DIR/$tdir_dom/$tfile-$i count=1 \
+ oflag=sync || dd_failed=true
+ done
+
+ $dd_failed && quota_error u $qid "write failed, expect succeed (1)"
+
+ for ((i = $((LIMIT/2048-1)); i < $((LIMIT/1024 + 10)); i++)); do
+ $DD of=$DIR/$tdir_dom/$tfile-$i count=1 \
+ oflag=sync || dd_failed=true
+ done
+
+ $dd_failed || quota_error u $qid "write succeed, expect EDQUOT (1)"
+
+ rm -f $DIR/$tdir_dom/*
+
+ # flush cache, ensure noquota flag is set on client
+ cancel_lru_locks
+ sync; sync_all_data || true
+
+ dd_failed=false
+
+ $DD of=$DIR/$tdir/file count=$((LIMIT/2048-1)) oflag=sync ||
+ quota_error u $qid "write failed, expect succeed (2)"
+
+ for ((i = 0; i < $((LIMIT/2048 + 10)); i++)); do
+ $DD of=$DIR/$tdir_dom/$tfile-$i count=1 \
+ oflag=sync || dd_failed=true
+ done
+
+ $dd_failed || quota_error u $TSTID "write succeed, expect EDQUOT (2)"
+
+ rm -f $DIR/$tdir/*
+ rm -f $DIR/$tdir_dom/*
+
+ # flush cache, ensure noquota flag is set on client
+ cancel_lru_locks
+ sync; sync_all_data || true
+
+ dd_failed=false
+
+ for ((i = 0; i < $((LIMIT/2048-1)); i++)); do
+ $DD of=$DIR/$tdir_dom/$tfile-$i count=1 \
+ oflag=sync || dd_failed=true
+ done
+
+ $dd_failed && quota_error u $qid "write failed, expect succeed (3)"
+
+ $DD of=$DIR/$tdir/file count=$((LIMIT/2048 + 10)) oflag=sync &&
+ quota_error u $qid "write succeed, expect EDQUOT (3)"
+ true
+}
+
+test_75()
+{
+ local soft_limit=10 # MB
+ local hard_limit=20 # MB
+ local limit=$soft_limit
+ local testfile="$DIR/$tdir/$tfile-0"
+ local grace=20 # seconds
+ local tdir_dom=${tdir}_dom
+
+ if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+ grace=60
+ fi
+
+ setup_quota_test || error "setup quota failed with $?"
+ stack_trap cleanup_quota_test_75 EXIT
+
+ # enable ost quota
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+ set_mdt_qtype $QTYPE || error "enable mdt quota failed"
+
+ local used=$(getquota -u $TSTID global curspace)
+ $LFS setquota -t -u --block-grace $grace --inode-grace \
+ $MAX_IQ_TIME $DIR || error "set user grace time failed"
+ $LFS setquota -u $TSTUSR -b $((soft_limit+used/1024))M \
+ -B $((hard_limit+used/1024))M -i 0 -I 0 $DIR ||
+ error "set user quota failed"
+
+ chmod 777 $DIR/$tdir || error "chmod 777 $DIR/$tdir failed"
+ mkdir $DIR/$tdir_dom
+ chmod 777 $DIR/$tdir_dom
+ $LFS setstripe -E 1M -L mdt $DIR/$tdir_dom ||
+ error "setstripe $tdir_dom failed"
+
+ do_facet mgs $LCTL nodemap_activate 1
+ wait_nm_sync active
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property deny_unknown --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_uid --value $TSTID
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_gid --value $TSTID
+ cancel_lru_locks mdc
+ wait_nm_sync default admin_nodemap
+ wait_nm_sync default trusted_nodemap
+ wait_nm_sync default squash_uid
+
+ # mmap write when over soft limit
+ limit=$soft_limit
+ $DD of=$testfile count=${limit} ||
+ quota_error a "root write failure, but expect success (1)"
+ OFFSET=$((limit * 1024))
+ cancel_lru_locks osc
+
+ echo "Write to exceed soft limit"
+ dd if=/dev/zero of=$testfile bs=1K count=10 seek=$OFFSET ||
+ quota_error a $TSTUSR "root write failure, but expect success (2)"
+ OFFSET=$((OFFSET + 1024)) # make sure we don't write to same block
+ cancel_lru_locks osc
+
+ echo "mmap write when over soft limit"
+ $MULTIOP $testfile.mmap OT40960SMW ||
+ quota_error a $TSTUSR "mmap write failure, but expect success"
+ cancel_lru_locks osc
+ rm -f $testfile*
+ wait_delete_completed || error "wait_delete_completed failed (1)"
+ sync_all_data || true
+
+ # test for user hard limit
+ limit=$hard_limit
+ log "Write..."
+ $DD of=$testfile bs=1M count=$((limit/2)) ||
+ quota_error u $TSTID \
+ "root write failure, but expect success (3)"
+
+ log "Write out of block quota ..."
+ # possibly a cache write, ignore failure
+ $DD of=$testfile bs=1M count=$((limit/2)) seek=$((limit/2)) || true
+ # flush cache, ensure noquota flag is set on client
+ cancel_lru_locks osc
+ sync; sync_all_data || true
+ # sync forced cache flush, but did not guarantee that slave
+ # got new edquot through glimpse, so wait to make sure
+ sleep 5
+ $DD of=$testfile bs=1M count=1 seek=$limit conv=fsync &&
+ quota_error u $TSTID \
+ "user write success, but expect EDQUOT"
+ rm -f $testfile
+ wait_delete_completed || error "wait_delete_completed failed (2)"
+ sync_all_data || true
+ [ $(getquota -u $TSTUSR global curspace) -eq $used ] ||
+ quota_error u $TSTID "user quota not released after deletion"
+
+ test_dom_75
+}
+run_test 75 "nodemap squashed root respects quota enforcement"
+
+test_76() {
+ ! is_project_quota_supported &&
+ skip "skip project quota unsupported"
+
+ setup_quota_test || error "setup quota failed with $?"
+ quota_init
+
+ local testfile="$DIR/$tdir/$tfile-0"
+
+ touch $testfile
+ $LFS project -p 4294967295 $testfile &&
+ error "set project ID should fail"
+ return 0
+}
+run_test 76 "project ID 4294967295 should be not allowed"
+
+test_77()
+{
+ mount_client $MOUNT2 "ro"
+ lfs setquota -u quota_usr -b 100M -B 100M -i 10K -I 10K $MOUNT2 &&
+ error "lfs setquota should fail in read-only Lustre mount"
+ umount $MOUNT2
+}
+run_test 77 "lfs setquota should fail in Lustre mount with 'ro'"
+
+test_78()
+{
+ (( $OST1_VERSION >= $(version_code 2.14.55) )) ||
+ skip "need OST at least 2.14.55"
+ check_set_fallocate_or_skip
+
+ setup_quota_test || error "setup quota failed with $?"
+
+ # enable ost quota
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+ mkdir -p $DIR/$tdir || error "failed to create $tdir"
+ chown $TSTUSR $DIR/$tdir || error "failed to chown $tdir"
+
+ # setup quota limit
+ $LFS setquota -u $TSTUSR -b25M -B25M $DIR/$tdir ||
+ error "lfs setquota failed"
+
+ # call fallocate
+ runas -u $TSTUSR -g $TSTUSR fallocate -l 204800 $DIR/$tdir/$tfile
+
+ kbytes=$(lfs quota -u $TSTUSR $DIR |
+ awk -v pattern=$DIR 'match($0, pattern) {printf $2}')
+ echo "kbytes returned:$kbytes"
+
+ # For file size of 204800. We should be having roughly 200 kbytes
+ # returned. Anything alarmingly low (50 taken as arbitrary value)
+ # would bail out this TC. Also this also avoids $kbytes of 0
+ # to be used in calculation below.
+ (( $kbytes > 50 )) ||
+ error "fallocate did not use quota. kbytes returned:$kbytes"
+
+ local expect_lo=$(($kbytes * 95 / 100)) # 5% below
+ local expect_hi=$(($kbytes * 105 / 100)) # 5% above
+
+ # Verify kbytes is 200 (204800/1024). With a permited 5% drift
+ (( $kbytes >= $expect_lo && $kbytes <= $expect_hi )) ||
+ error "fallocate did not use quota correctly"
+}
+run_test 78 "Check fallocate increase quota usage"
+
+test_78a()
+{
+ (( $CLIENT_VERSION >= $(version_code 2.15.0) )) ||
+ skip "need client at least 2.15.0"
+ (( $OST1_VERSION >= $(version_code 2.15.0) )) ||
+ skip "need OST at least 2.15.0"
+ check_set_fallocate_or_skip
+
+ setup_quota_test || error "setup quota failed with $?"
+
+ # enable ost quota
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+ mkdir -p $DIR/$tdir || error "failed to create $tdir"
+
+ local projectid=5200 # Random project id to test
+
+ change_project -sp $projectid $DIR/$tdir
+
+ # setup quota limit
+ $LFS setquota -p $projectid -b25M -B25M $DIR/$tdir ||
+ error "lfs setquota project failed"
+
+ # call fallocate
+ fallocate -l 204800 $DIR/$tdir/$tfile
+
+ # Get curspace (kbytes) for $projectid
+ local kbytes=$(getquota -p $projectid global curspace)
+
+ echo "kbytes returned:$kbytes"
+
+ # For file size of 204800. We should be having roughly 200 kbytes
+ # returned. Anything alarmingly low (50 taken as arbitrary value)
+ # would bail out this TC. Also this also avoids $kbytes of 0
+ # to be used in calculation below.
+ (( $kbytes > 50 )) ||
+ error "fallocate did not use projectid. kbytes returned:$kbytes"
+
+ local expect_lo=$(($kbytes * 95 / 100)) # 5% below
+ local expect_hi=$(($kbytes * 105 / 100)) # 5% above
+
+ # Verify kbytes is 200 (204800/1024). With a permited 5% drift
+ (( $kbytes >= $expect_lo && $kbytes <= $expect_hi )) ||
+ error "fallocate did not use quota projectid correctly"
+}
+run_test 78a "Check fallocate increase projectid usage"
+
+test_79()
+{
+ local qpool="qpool1"
+ local cmd="$LCTL get_param -n qmt.$FSNAME-QMT0000.dt-$qpool.info"
+ local stopf=$TMP/$tfile
+
+ do_facet mds1 "touch $stopf"
+ stack_trap "do_facet mds1 'rm -f $stopf'"
+ do_facet mds1 "while [ -e $stopf ]; do $cmd &>/dev/null; done"&
+ local pid=$!
+ pool_add $qpool || error "pool_add failed"
+ do_facet mds1 "rm $stopf"
+ wait $pid
+}
+run_test 79 "access to non-existed dt-pool/info doesn't cause a panic"
+
+test_80()
+{
+ local dir1="$DIR/$tdir/dir1"
+ local dir2="$DIR/$tdir/dir2"
+ local TESTFILE0="$dir1/$tfile-0"
+ local TESTFILE1="$dir1/$tfile-1"
+ local TESTFILE2="$dir1/$tfile-2"
+ local TESTFILE3="$dir2/$tfile-0"
+ local global_limit=100 # 100M
+ local limit=10 # 10M
+ local qpool="qpool1"
+
+ [ "$OSTCOUNT" -lt "2" ] && skip "needs >= 2 OSTs"
+ mds_supports_qp
+ [ "$ost1_FSTYPE" == zfs ] &&
+ skip "ZFS grants some block space together with inode"
+ setup_quota_test || error "setup quota failed with $?"
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+ # make sure the system is clean
+ local used=$(getquota -u $TSTUSR global curspace)
+ [ $used -ne 0 ] && error "Used space($used) for user $TSTUSR is not 0."
+
+ pool_add $qpool || error "pool_add failed"
+ pool_add_targets $qpool 0 1 ||
+ error "pool_add_targets failed"
+
+ $LFS setquota -u $TSTUSR -b 0 -B ${global_limit}M -i 0 -I 0 $DIR ||
+ error "set user quota failed"
+
+ $LFS setquota -u $TSTUSR -B ${global_limit}M --pool $qpool $DIR ||
+ error "set user quota failed"
+ $LFS setquota -u $TSTUSR -B ${limit}M --pool $qpool $DIR ||
+ error "set user quota failed"
+
+ mkdir -p $dir1 || error "failed to mkdir"
+ chown $TSTUSR.$TSTUSR $dir1 || error "chown $dir1 failed"
+ mkdir -p $dir2 || error "failed to mkdir"
+ chown $TSTUSR.$TSTUSR $dir2 || error "chown $dir2 failed"
+
+ $LFS setstripe $dir1 -i 1 -c 1|| error "setstripe $testfile failed"
+ $LFS setstripe $dir2 -i 0 -c 1|| error "setstripe $testfile failed"
+ lfs getstripe $dir1
+ lfs getstripe $dir2
+ sleep 3
+
+ $LFS quota -uv $TSTUSR $DIR
+ #define OBD_FAIL_QUOTA_PREACQ 0xA06
+ do_facet mds1 $LCTL set_param fail_loc=0xa06
+ $RUNAS $DD of=$TESTFILE3 count=3 ||
+ quota_error u $TSTUSR "write failed"
+ $RUNAS $DD of=$TESTFILE2 count=7 ||
+ quota_error u $TSTUSR "write failed"
+ $RUNAS $DD of=$TESTFILE1 count=1 oflag=direct ||
+ quota_error u $TSTUSR "write failed"
+ sync
+ sleep 3
+ $LFS quota -uv --pool $qpool $TSTUSR $DIR
+
+ rm -f $TESTFILE2
+ stop ost2
+ do_facet mds1 $LCTL set_param fail_loc=0
+ start ost2 $(ostdevname 2) $OST_MOUNT_OPTS || error "start ost2 failed"
+ $LFS quota -uv $TSTUSR --pool $qpool $DIR
+ # OST0 needs some time to update quota usage after removing TESTFILE2
+ sleep 4
+ $LFS quota -uv $TSTUSR --pool $qpool $DIR
+ $RUNAS $DD of=$TESTFILE0 count=2 oflag=direct ||
+ quota_error u $TSTUSR "write failure, but expect success"
+}
+run_test 80 "check for EDQUOT after OST failover"
+
+test_81() {
+ local global_limit=20 # 100M
+ local testfile="$DIR/$tdir/$tfile-0"
+ local qpool="qpool1"
+
+ mds_supports_qp
+ setup_quota_test || error "setup quota failed with $?"
+
+ # enable ost quota
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+ # test for user
+ log "User quota (block hardlimit:$global_limit MB)"
+ $LFS setquota -u $TSTUSR -B 1G $DIR || error "set user quota failed"
+
+ pool_add $qpool || error "pool_add failed"
+ #define OBD_FAIL_QUOTA_RECALC 0xA07
+ do_facet mds1 $LCTL set_param fail_loc=0x80000A07 fail_val=30
+ # added OST casues to start pool recalculation
+ pool_add_targets $qpool 0 0 1
+ stop mds1 -f || error "MDS umount failed"
+
+ #start mds1 back to destroy created pool
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+ clients_up || true
+}
+run_test 81 "Race qmt_start_pool_recalc with qmt_pool_free"
+
+test_82()
+{
+ (( $MDS1_VERSION >= $(version_code 2.14.55) )) ||
+ skip "need MDS 2.14.55 or later"
+ is_project_quota_supported ||
+ skip "skip project quota unsupported"
+
+ setup_quota_test || error "setup quota failed with $?"
+ stack_trap cleanup_quota_test
+ quota_init
+
+ local parent_dir="$DIR/$tdir.parent"
+ local child_dir="$parent_dir/child"
+
+ mkdir -p $child_dir
+ stack_trap "chown -R 0:0 $parent_dir"
+
+ chown $TSTUSR:$TSTUSR $parent_dir ||
+ error "failed to chown on $parent_dir"
+ chown $TSTUSR2:$TSTUSRS2 $child_dir ||
+ error "failed to chown on $parent_dir"
+
+ $LFS project -p 1000 $parent_dir ||
+ error "failed to set project id on $parent_dir"
+ $LFS project -p 1001 $child_dir ||
+ error "failed to set project id on $child_dir"
+
+ rmdir $child_dir || error "cannot remove child dir, test failed"
+}
+run_test 82 "verify more than 8 qids for single operation"