+ setup_quota_test
+ trap cleanup_quota_test EXIT
+
+ echo "Write file..."
+ $RUNAS $DD of=$DIR/$tdir/$tfile count=$BLK_CNT 2>/dev/null ||
+ error "write failed"
+ cancel_lru_locks osc
+ sync; sync_all_data || true
+
+ echo "Save disk usage before restart"
+ local ORIG_USR_SPACE=$(getquota -u $TSTID global curspace)
+ [ $ORIG_USR_SPACE -eq 0 ] &&
+ error "Used space for user $TSTID is 0, expected ${BLK_CNT}M"
+ local ORIG_USR_INODES=$(getquota -u $TSTID global curinodes)
+ [ $ORIG_USR_INODES -eq 0 ] &&
+ error "Used inodes for user $TSTID is 0, expected 1"
+ echo "User $TSTID: ${ORIG_USR_SPACE}KB $ORIG_USR_INODES inodes"
+ local ORIG_GRP_SPACE=$(getquota -g $TSTID global curspace)
+ [ $ORIG_GRP_SPACE -eq 0 ] &&
+ error "Used space for group $TSTID is 0, expected ${BLK_CNT}M"
+ local ORIG_GRP_INODES=$(getquota -g $TSTID global curinodes)
+ [ $ORIG_GRP_INODES -eq 0 ] &&
+ error "Used inodes for group $TSTID is 0, expected 1"
+ echo "Group $TSTID: ${ORIG_GRP_SPACE}KB $ORIG_GRP_INODES inodes"
+
+ log "Restart..."
+ local ORIG_REFORMAT=$REFORMAT
+ REFORMAT=""
+ cleanup_and_setup_lustre
+ REFORMAT=$ORIG_REFORMAT
+ quota_init
+
+ echo "Verify disk usage after restart"
+ local USED=$(getquota -u $TSTID global curspace)
+ [ $USED -eq $ORIG_USR_SPACE ] ||
+ error "Used space for user $TSTID changed from " \
+ "$ORIG_USR_SPACE to $USED"
+ USED=$(getquota -u $TSTID global curinodes)
+ [ $USED -eq $ORIG_USR_INODES ] ||
+ error "Used inodes for user $TSTID changed from " \
+ "$ORIG_USR_INODES to $USED"
+ USED=$(getquota -g $TSTID global curspace)
+ [ $USED -eq $ORIG_GRP_SPACE ] ||
+ error "Used space for group $TSTID changed from " \
+ "$ORIG_GRP_SPACE to $USED"
+ USED=$(getquota -g $TSTID global curinodes)
+ [ $USED -eq $ORIG_GRP_INODES ] ||
+ error "Used inodes for group $TSTID changed from " \
+ "$ORIG_GRP_INODES to $USED"
+
+ # check if the vfs_dq_init() is called before writing
+ echo "Append to the same file..."
+ $RUNAS $DD of=$DIR/$tdir/$tfile count=$BLK_CNT seek=1 2>/dev/null ||
+ error "write failed"
+ cancel_lru_locks osc
+ sync; sync_all_data || true
+
+ echo "Verify space usage is increased"
+ USED=$(getquota -u $TSTID global curspace)
+ [ $USED -gt $ORIG_USR_SPACE ] ||
+ error "Used space for user $TSTID isn't increased" \
+ "orig:$ORIG_USR_SPACE, now:$USED"
+ USED=$(getquota -g $TSTID global curspace)
+ [ $USED -gt $ORIG_GRP_SPACE ] ||
+ error "Used space for group $TSTID isn't increased" \
+ "orig:$ORIG_GRP_SPACE, now:$USED"
+
+ cleanup_quota_test
+}
+run_test 35 "Usage is still accessible across reboot"
+
+# test migrating old amdin quota files (in Linux quota file format v2) into new
+# quota global index (in IAM format)
+test_36() {
+ [ $(facet_fstype $SINGLEMDS) != ldiskfs ] && \
+ skip "skipping migration test" && return
+
+ # get the mdt0 device name
+ local mdt0_node=$(facet_active_host $SINGLEMDS)
+ local mdt0_dev=$(mdsdevname ${SINGLEMDS//mds/})
+
+ echo "Reformat..."
+ formatall
+
+ echo "Copy admin quota files into MDT0..."
+ local mntpt=$(facet_mntpt $SINGLEMDS)
+ local mdt0_fstype=$(facet_fstype $SINGLEMDS)
+ local opt
+ if ! do_node $mdt0_node test -b $mdt0_fstype; then
+ opt="-o loop"
+ fi
+ echo "$mdt0_node, $mdt0_dev, $mntpt, $opt"
+ do_node $mdt0_node mount -t $mdt0_fstype $opt $mdt0_dev $mntpt
+ do_node $mdt0_node mkdir $mntpt/OBJECTS
+ do_node $mdt0_node cp $LUSTRE/tests/admin_quotafile_v2.usr $mntpt/OBJECTS
+ do_node $mdt0_node cp $LUSTRE/tests/admin_quotafile_v2.grp $mntpt/OBJECTS
+ do_node $mdt0_node umount -f $mntpt
+
+ echo "Setup all..."
+ setupall
+
+ echo "Verify global limits..."
+ local id_cnt
+ local limit
+
+ local proc="qmt.*.md-0x0.glb-usr"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 403 ] || error "Migrate inode user limit failed: $id_cnt"
+ limit=$(getquota -u 1 global isoftlimit)
+ [ $limit -eq 1024 ] || error "User inode softlimit: $limit"
+ limit=$(getquota -u 1 global ihardlimit)
+ [ $limit -eq 2048 ] || error "User inode hardlimit: $limit"
+
+ proc="qmt.*.md-0x0.glb-grp"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 403 ] || error "Migrate inode group limit failed: $id_cnt"
+ limit=$(getquota -g 1 global isoftlimit)
+ [ $limit -eq 1024 ] || error "Group inode softlimit: $limit"
+ limit=$(getquota -g 1 global ihardlimit)
+ [ $limit -eq 2048 ] || error "Group inode hardlimit: $limit"
+
+ proc=" qmt.*.dt-0x0.glb-usr"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 403 ] || error "Migrate block user limit failed: $id_cnt"
+ limit=$(getquota -u 60001 global bsoftlimit)
+ [ $limit -eq 10485760 ] || error "User block softlimit: $limit"
+ limit=$(getquota -u 60001 global bhardlimit)
+ [ $limit -eq 20971520 ] || error "User block hardlimit: $limit"
+
+ proc="qmt.*.dt-0x0.glb-grp"
+ id_cnt=$(do_node $mdt0_node $LCTL get_param -n $proc | wc -l)
+ [ $id_cnt -eq 403 ] || error "Migrate block user limit failed: $id_cnt"
+ limit=$(getquota -g 60001 global bsoftlimit)
+ [ $limit -eq 10485760 ] || error "Group block softlimit: $limit"
+ limit=$(getquota -g 60001 global bhardlimit)
+ [ $limit -eq 20971520 ] || error "Group block hardlimit: $limit"
+
+ echo "Cleanup..."
+ formatall
+ setupall