/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*
* Code originally extracted from quota directory
*/
}
}
-int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
+int osc_quota_setdq(struct client_obd *cli, __u64 xid, const unsigned int qid[],
u64 valid, u32 flags)
{
int type;
if ((valid & (OBD_MD_FLALLQUOTA)) == 0)
RETURN(0);
+ mutex_lock(&cli->cl_quota_mutex);
+ /* still mark the quots is running out for the old request, because it
+ * could be processed after the new request at OST, the side effect is
+ * the following request will be processed synchronously, but it will
+ * not break the quota enforcement. */
+ if (cli->cl_quota_last_xid > xid && !(flags & OBD_FL_NO_QUOTA_ALL))
+ GOTO(out_unlock, rc);
+
+ if (cli->cl_quota_last_xid < xid)
+ cli->cl_quota_last_xid = xid;
+
for (type = 0; type < LL_MAXQUOTAS; type++) {
struct osc_quota_info *oqi;
}
}
+out_unlock:
+ mutex_unlock(&cli->cl_quota_mutex);
RETURN(rc);
}
int i, type;
ENTRY;
+ mutex_init(&cli->cl_quota_mutex);
+
for (type = 0; type < LL_MAXQUOTAS; type++) {
cli->cl_quota_hash[type] = cfs_hash_create("QUOTA_HASH",
HASH_QUOTA_CUR_BITS,