- resetquota -u $TSTUSR
-}
-run_test_with_stat 29 "unhandled quotactls must not hang lustre client (19778) ========"
-
-test_30()
-{
- local output
- local LIMIT=1024
- local TESTFILE="$DIR/$tdir/$tfile"
- local GRACE=10
-
- set_blk_tunesz 512
- set_blk_unitsz 1024
-
- mkdir -p $DIR/$tdir
- chmod 0777 $DIR/$tdir
-
- $LFS setstripe $TESTFILE -i 0 -c 1
- chown $TSTUSR.$TSTUSR $TESTFILE
-
- $LFS setquota -t -u --block-grace $GRACE --inode-grace $MAX_IQ_TIME $DIR
- $LFS setquota -u $TSTUSR -b $LIMIT -B 0 -i 0 -I 0 $DIR
- $RUNAS dd if=/dev/zero of=$TESTFILE bs=1024 count=$((LIMIT * 2)) || true
- cancel_lru_locks osc
- sleep $GRACE
- $LFS setquota -u $TSTUSR -B 0 $DIR
- # over-quota flag has not yet settled since we do not trigger async events
- # based on grace time period expiration
- $SHOW_QUOTA_USER
- $RUNAS dd if=/dev/zero of=$TESTFILE conv=notrunc oflag=append bs=1048576 count=1 || true
- cancel_lru_locks osc
- # now over-quota flag should be settled and further writes should fail
- $SHOW_QUOTA_USER
- $RUNAS dd if=/dev/zero of=$TESTFILE conv=notrunc oflag=append bs=1048576 count=1 && error "grace times were reset"
- rm -f $TESTFILE
- resetquota -u $TSTUSR
- $LFS setquota -t -u --block-grace $MAX_DQ_TIME --inode-grace $MAX_IQ_TIME $DIR
-
- set_blk_unitsz $((128 * 1024))
- set_blk_tunesz $((128 * 1024 / 2))
-}
-run_test_with_stat 30 "hard limit updates should not reset grace times ================"
-
-# test duplicate quota releases b=18630
-test_31() {
- mkdir -p $DIR/$tdir
- chmod 0777 $DIR/$tdir
-
- LIMIT=$(( $BUNIT_SZ * $(($OSTCOUNT + 1)) * 10)) # 10 bunits each sever
- TESTFILE="$DIR/$tdir/$tfile-0"
- TESTFILE2="$DIR/$tdir/$tfile-1"
-
- wait_delete_completed
-
- log " User quota (limit: $LIMIT kbytes)"
- $LFS setquota -u $TSTUSR -b 0 -B $LIMIT -i 0 -I 0 $DIR
-
- $LFS setstripe $TESTFILE -i 0 -c 1
- chown $TSTUSR.$TSTUSR $TESTFILE
- $LFS setstripe $TESTFILE2 -i 0 -c 1
- chown $TSTUSR.$TSTUSR $TESTFILE2
-
- log " step1: write out of block quota ..."
- $RUNAS dd if=/dev/zero of=$TESTFILE bs=$BLK_SZ count=5120
- $RUNAS dd if=/dev/zero of=$TESTFILE2 bs=$BLK_SZ count=5120
-
- #define OBD_FAIL_QUOTA_DELAY_SD 0xA04
- #define OBD_FAIL_SOME 0x10000000 /* fail N times */
- lustre_fail ost $((0x00000A04 | 0x10000000)) 1
-
- log " step2: delete two files so that triggering duplicate quota release ..."
- rm -f $TESTFILE $TESTFILE2
- sync; sleep 5; sync # OBD_FAIL_QUOTA_DELAY_SD will delay for 5 seconds
- wait_delete_completed
-
- log " step3: verify if the ost failed"
- do_facet ost1 dmesg > $TMP/lustre-log-${TESTNAME}.log
- watchdog=`awk '/test 31/ {start = 1;}
- /release quota error/ {
- if (start) {
- print;
- }
- }' $TMP/lustre-log-${TESTNAME}.log`
- [ "$watchdog" ] && error "$watchdog"
- rm -f $TMP/lustre-log-${TESTNAME}.log
-
- lustre_fail ost 0
- resetquota -u $TSTUSR
-}
-run_test_with_stat 31 "test duplicate quota releases ==="
+ echo "Write file..."
+ $RUNAS $DD of=$DIR/$tdir/$tfile count=$BLK_CNT 2>/dev/null ||
+ error "write failed"
+ cancel_lru_locks osc
+ sync; sync_all_data || true
+
+ echo "Save disk usage before restart"
+ local ORIG_USR_SPACE=$(getquota -u $TSTID global curspace)
+ [ $ORIG_USR_SPACE -eq 0 ] &&
+ error "Used space for user $TSTID is 0, expected ${BLK_CNT}M"
+ local ORIG_USR_INODES=$(getquota -u $TSTID global curinodes)
+ [ $ORIG_USR_INODES -eq 0 ] &&
+ error "Used inodes for user $TSTID is 0, expected 1"
+ echo "User $TSTID: ${ORIG_USR_SPACE}KB $ORIG_USR_INODES inodes"
+ local ORIG_GRP_SPACE=$(getquota -g $TSTID global curspace)
+ [ $ORIG_GRP_SPACE -eq 0 ] &&
+ error "Used space for group $TSTID is 0, expected ${BLK_CNT}M"
+ local ORIG_GRP_INODES=$(getquota -g $TSTID global curinodes)
+ [ $ORIG_GRP_INODES -eq 0 ] &&
+ error "Used inodes for group $TSTID is 0, expected 1"
+ echo "Group $TSTID: ${ORIG_GRP_SPACE}KB $ORIG_GRP_INODES inodes"
+
+ log "Restart..."
+ local ORIG_REFORMAT=$REFORMAT
+ REFORMAT=""
+ cleanup_and_setup_lustre
+ REFORMAT=$ORIG_REFORMAT
+ quota_init
+
+ echo "Verify disk usage after restart"
+ local USED=$(getquota -u $TSTID global curspace)
+ [ $USED -eq $ORIG_USR_SPACE ] ||
+ error "Used space for user $TSTID changed from " \
+ "$ORIG_USR_SPACE to $USED"
+ USED=$(getquota -u $TSTID global curinodes)
+ [ $USED -eq $ORIG_USR_INODES ] ||
+ error "Used inodes for user $TSTID changed from " \
+ "$ORIG_USR_INODES to $USED"
+ USED=$(getquota -g $TSTID global curspace)
+ [ $USED -eq $ORIG_GRP_SPACE ] ||
+ error "Used space for group $TSTID changed from " \
+ "$ORIG_GRP_SPACE to $USED"
+ USED=$(getquota -g $TSTID global curinodes)
+ [ $USED -eq $ORIG_GRP_INODES ] ||
+ error "Used inodes for group $TSTID changed from " \
+ "$ORIG_GRP_INODES to $USED"
+
+ # check if the vfs_dq_init() is called before writing
+ echo "Append to the same file..."
+ $RUNAS $DD of=$DIR/$tdir/$tfile count=$BLK_CNT seek=1 2>/dev/null ||
+ error "write failed"
+ cancel_lru_locks osc
+ sync; sync_all_data || true
+
+ echo "Verify space usage is increased"
+ USED=$(getquota -u $TSTID global curspace)
+ [ $USED -gt $ORIG_USR_SPACE ] ||
+ error "Used space for user $TSTID isn't increased" \
+ "orig:$ORIG_USR_SPACE, now:$USED"
+ USED=$(getquota -g $TSTID global curspace)
+ [ $USED -gt $ORIG_GRP_SPACE ] ||
+ error "Used space for group $TSTID isn't increased" \
+ "orig:$ORIG_GRP_SPACE, now:$USED"
+
+ cleanup_quota_test
+}
+run_test 35 "Usage is still accessible across reboot"
+
+# test migrating old amdin quota files (in Linux quota file format v2) into new
+# quota global index (in IAM format)
+test_36() {
+ [ $(facet_fstype $SINGLEMDS) != ldiskfs ] && \
+ skip "skipping migration test" && return
+
+ # get the mdt0 device name
+ local mdt0_node=$(facet_active_host $SINGLEMDS)
+ local mdt0_dev=$(mdsdevname ${SINGLEMDS//mds/})
+
+ echo "Reformat..."
+ formatall
+
+ echo "Copy admin quota files into MDT0..."
+ local mntpt=$(facet_mntpt $SINGLEMDS)
+ local mdt0_fstype=$(facet_fstype $SINGLEMDS)
+ local opt
+ if ! do_node $mdt0_node test -b $mdt0_fstype; then
+ opt="-o loop"
+ fi
+ echo "$mdt0_node, $mdt0_dev, $mntpt, $opt"
+ do_node $mdt0_node mount -t $mdt0_fstype $opt $mdt0_dev $mntpt
+ do_node $mdt0_node mkdir $mntpt/OBJECTS
+ do_node $mdt0_node cp $LUSTRE/tests/admin_quotafile_v2.usr $mntpt/OBJECTS
+ do_node $mdt0_node cp $LUSTRE/tests/admin_quotafile_v2.grp $mntpt/OBJECTS
+ do_node $mdt0_node umount -f $mntpt
+
+ echo "Setup all..."
+ setupall
+
+ echo "Verify global limits..."
+ local id_cnt
+ local limit
+
+ local proc="qmt.*.md-0x0.glb-usr"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 401 ] || error "Migrate inode user limit failed: $id_cnt"
+ limit=$(getquota -u 1 global isoftlimit)
+ [ $limit -eq 1024 ] || error "User inode softlimit: $limit"
+ limit=$(getquota -u 1 global ihardlimit)
+ [ $limit -eq 2048 ] || error "User inode hardlimit: $limit"
+
+ proc="qmt.*.md-0x0.glb-grp"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 401 ] || error "Migrate inode group limit failed: $id_cnt"
+ limit=$(getquota -g 1 global isoftlimit)
+ [ $limit -eq 1024 ] || error "Group inode softlimit: $limit"
+ limit=$(getquota -g 1 global ihardlimit)
+ [ $limit -eq 2048 ] || error "Group inode hardlimit: $limit"
+
+ proc=" qmt.*.dt-0x0.glb-usr"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 401 ] || error "Migrate block user limit failed: $id_cnt"
+ limit=$(getquota -u 60001 global bsoftlimit)
+ [ $limit -eq 10485760 ] || error "User block softlimit: $limit"
+ limit=$(getquota -u 60001 global bhardlimit)
+ [ $limit -eq 20971520 ] || error "User block hardlimit: $limit"
+
+ proc="qmt.*.dt-0x0.glb-grp"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 401 ] || error "Migrate block user limit failed: $id_cnt"
+ limit=$(getquota -g 60001 global bsoftlimit)
+ [ $limit -eq 10485760 ] || error "Group block softlimit: $limit"
+ limit=$(getquota -g 60001 global bhardlimit)
+ [ $limit -eq 20971520 ] || error "Group block hardlimit: $limit"
+
+ echo "Cleanup..."
+ formatall
+ setupall
+}
+run_test 36 "Migrate old admin files into new global indexes"