+ int i;
+ struct qunit_data qdata[MAXQUOTAS];
+ int mb = 0;
+ int rc = 0, rc2[2] = { 0, 0 };
+ ENTRY;
+
+ cfs_spin_lock(&qctxt->lqc_lock);
+ if (!qctxt->lqc_valid){
+ cfs_spin_unlock(&qctxt->lqc_lock);
+ RETURN(rc);
+ }
+ cfs_spin_unlock(&qctxt->lqc_lock);
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ struct lustre_qunit_size *lqs = NULL;
+
+ qdata[i].qd_id = id[i];
+ qdata[i].qd_flags = i;
+ if (isblk)
+ QDATA_SET_BLK(&qdata[i]);
+ qdata[i].qd_count = 0;
+
+ /* ignore root user */
+ if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
+ continue;
+
+ lqs = quota_search_lqs(LQS_KEY(i, id[i]), qctxt, 0);
+ if (lqs == NULL || IS_ERR(lqs))
+ continue;
+
+ if (IS_ERR(lqs)) {
+ CERROR("can not find lqs for check_common: "
+ "[id %u] [%c] [isblk %d] [count %d] [rc %ld]\n",
+ id[i], i % 2 ? 'g': 'u', isblk, count,
+ PTR_ERR(lqs));
+ RETURN(PTR_ERR(lqs));
+ }
+
+ rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
+ cfs_spin_lock(&lqs->lqs_lock);
+ if (!cycle) {
+ if (isblk) {
+ pending[i] = count * CFS_PAGE_SIZE;
+ /* in order to complete this write, we need extra
+ * meta blocks. This function can get it through
+ * data needed to be written b=16542 */
+ if (inode) {
+ mb = pending[i];
+ rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
+ &mb, inode,
+ frags);
+ if (rc)
+ CERROR("%s: can't get extra "
+ "meta blocks\n",
+ obd->obd_name);
+ else
+ pending[i] += mb;
+ }
+ lqs->lqs_bwrite_pending += pending[i];
+ } else {
+ pending[i] = count;
+ lqs->lqs_iwrite_pending += pending[i];
+ }
+ }
+
+ /* if xx_rec < 0, that means quota are releasing,
+ * and it may return before we use quota. So if
+ * we find this situation, we assuming it has
+ * returned b=18491 */
+ if (isblk && lqs->lqs_blk_rec < 0) {
+ if (qdata[i].qd_count < -lqs->lqs_blk_rec)
+ qdata[i].qd_count = 0;
+ else
+ qdata[i].qd_count += lqs->lqs_blk_rec;
+ }
+ if (!isblk && lqs->lqs_ino_rec < 0) {
+ if (qdata[i].qd_count < -lqs->lqs_ino_rec)
+ qdata[i].qd_count = 0;
+ else
+ qdata[i].qd_count += lqs->lqs_ino_rec;
+ }
+
+ CDEBUG(D_QUOTA, "[id %u] [%c] [isblk %d] [count %d]"
+ " [lqs pending: %lu] [qd_count: "LPU64"] [metablocks: %d]"
+ " [pending: %d]\n", id[i], i % 2 ? 'g': 'u', isblk, count,
+ isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
+ qdata[i].qd_count, mb, pending[i]);
+ if (rc2[i] == QUOTA_RET_OK) {
+ if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending)
+ rc2[i] = QUOTA_RET_ACQUOTA;
+ if (!isblk && qdata[i].qd_count <
+ lqs->lqs_iwrite_pending)
+ rc2[i] = QUOTA_RET_ACQUOTA;
+ }
+
+ cfs_spin_unlock(&lqs->lqs_lock);
+
+ if (lqs->lqs_blk_rec < 0 &&
+ qdata[i].qd_count <
+ lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb)
+ OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
+
+ /* When cycle is zero, lqs_*_pending will be changed. We will
+ * get reference of the lqs here and put reference of lqs in
+ * quota_pending_commit b=14784 */
+ if (!cycle)
+ lqs_getref(lqs);
+
+ /* this is for quota_search_lqs */
+ lqs_putref(lqs);
+ }
+
+ if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
+ RETURN(QUOTA_RET_ACQUOTA);
+ else
+ RETURN(rc);
+}
+
+int quota_is_set(struct obd_device *obd, const unsigned int id[], int flag)
+{
+ struct lustre_qunit_size *lqs;
+ int i, q_set = 0;
+
+ if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb))
+ RETURN(0);
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ lqs = quota_search_lqs(LQS_KEY(i, id[i]),
+ &obd->u.obt.obt_qctxt, 0);
+ if (lqs && !IS_ERR(lqs)) {
+ if (lqs->lqs_flags & flag)
+ q_set = 1;
+ lqs_putref(lqs);
+ }
+ }
+
+ return q_set;
+}
+
+static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
+ const unsigned int id[], int pending[],
+ int count, quota_acquire acquire,
+ struct obd_trans_info *oti, int isblk,
+ struct inode *inode, int frags)
+{
+ struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
+ struct timeval work_start;
+ struct timeval work_end;
+ long timediff;
+ struct l_wait_info lwi = { 0 };
+ int rc = 0, cycle = 0, count_err = 1;