/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*
* Code originally extracted from quota directory
*/
#include <obd.h>
+#include <lustre_osc.h>
+
#include "osc_internal.h"
-static inline struct osc_quota_info *osc_oqi_alloc(obd_uid id)
+static inline struct osc_quota_info *osc_oqi_alloc(u32 id)
{
struct osc_quota_info *oqi;
int type;
ENTRY;
- for (type = 0; type < MAXQUOTAS; type++) {
+ for (type = 0; type < LL_MAXQUOTAS; type++) {
struct osc_quota_info *oqi;
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
RETURN(QUOTA_OK);
}
-#define MD_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_MD_FLUSRQUOTA \
- : OBD_MD_FLGRPQUOTA)
-#define FL_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_FL_NO_USRQUOTA \
- : OBD_FL_NO_GRPQUOTA)
+static inline u32 md_quota_flag(int qtype)
+{
+ switch (qtype) {
+ case USRQUOTA:
+ return OBD_MD_FLUSRQUOTA;
+ case GRPQUOTA:
+ return OBD_MD_FLGRPQUOTA;
+ case PRJQUOTA:
+ return OBD_MD_FLPRJQUOTA;
+ default:
+ return 0;
+ }
+}
-int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
- obd_flag valid, obd_flag flags)
+static inline u32 fl_quota_flag(int qtype)
+{
+ switch (qtype) {
+ case USRQUOTA:
+ return OBD_FL_NO_USRQUOTA;
+ case GRPQUOTA:
+ return OBD_FL_NO_GRPQUOTA;
+ case PRJQUOTA:
+ return OBD_FL_NO_PRJQUOTA;
+ default:
+ return 0;
+ }
+}
+
+int osc_quota_setdq(struct client_obd *cli, __u64 xid, const unsigned int qid[],
+ u64 valid, u32 flags)
{
int type;
int rc = 0;
+
ENTRY;
- if ((valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) == 0)
+ if ((valid & (OBD_MD_FLALLQUOTA)) == 0)
RETURN(0);
- for (type = 0; type < MAXQUOTAS; type++) {
+ mutex_lock(&cli->cl_quota_mutex);
+ /* still mark the quots is running out for the old request, because it
+ * could be processed after the new request at OST, the side effect is
+ * the following request will be processed synchronously, but it will
+ * not break the quota enforcement. */
+ if (cli->cl_quota_last_xid > xid && !(flags & OBD_FL_NO_QUOTA_ALL))
+ GOTO(out_unlock, rc);
+
+ if (cli->cl_quota_last_xid < xid)
+ cli->cl_quota_last_xid = xid;
+
+ for (type = 0; type < LL_MAXQUOTAS; type++) {
struct osc_quota_info *oqi;
- if ((valid & MD_QUOTA_FLAG(type)) == 0)
+ if ((valid & md_quota_flag(type)) == 0)
continue;
/* lookup the ID in the per-type hash table */
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
- if ((flags & FL_QUOTA_FLAG(type)) != 0) {
+ if ((flags & fl_quota_flag(type)) != 0) {
/* This ID is getting close to its quota limit, let's
* switch to sync I/O */
if (oqi != NULL)
}
CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
- cli->cl_import->imp_obd->obd_name,
- type == USRQUOTA ? "user" : "group",
- qid[type], rc);
+ cli_name(cli), qtype_name(type), qid[type], rc);
} else {
/* This ID is now off the hook, let's remove it from
* the hash table */
OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
- cli->cl_import->imp_obd->obd_name,
- type == USRQUOTA ? "user" : "group",
- qid[type], oqi);
+ cli_name(cli), qtype_name(type), qid[type], oqi);
}
}
+out_unlock:
+ mutex_unlock(&cli->cl_quota_mutex);
RETURN(rc);
}
* Hash operations for uid/gid <-> osc_quota_info
*/
static unsigned
-oqi_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_u32_hash(*((__u32*)key), mask);
}
oqi_keycmp(const void *key, struct hlist_node *hnode)
{
struct osc_quota_info *oqi;
- obd_uid uid;
+ u32 uid;
LASSERT(key != NULL);
- uid = *((obd_uid*)key);
+ uid = *((u32 *)key);
oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
return uid == oqi->oqi_id;
}
static void
-oqi_get(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
}
static void
-oqi_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
}
static void
-oqi_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct osc_quota_info *oqi;
#define HASH_QUOTA_CUR_BITS 5
#define HASH_QUOTA_MAX_BITS 15
-static cfs_hash_ops_t quota_hash_ops = {
+static struct cfs_hash_ops quota_hash_ops = {
.hs_hash = oqi_hashfn,
.hs_keycmp = oqi_keycmp,
.hs_key = oqi_key,
int i, type;
ENTRY;
- for (type = 0; type < MAXQUOTAS; type++) {
+ mutex_init(&cli->cl_quota_mutex);
+
+ for (type = 0; type < LL_MAXQUOTAS; type++) {
cli->cl_quota_hash[type] = cfs_hash_create("QUOTA_HASH",
HASH_QUOTA_CUR_BITS,
HASH_QUOTA_MAX_BITS,
break;
}
- if (type == MAXQUOTAS)
+ if (type == LL_MAXQUOTAS)
RETURN(0);
for (i = 0; i < type; i++)
int type;
ENTRY;
- for (type = 0; type < MAXQUOTAS; type++)
+ for (type = 0; type < LL_MAXQUOTAS; type++)
cfs_hash_putref(cli->cl_quota_hash[type]);
RETURN(0);
RETURN(rc);
}
-
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct obd_quotactl *body;
- int rc;
- ENTRY;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
- OST_QUOTACHECK);
- if (req == NULL)
- RETURN(-ENOMEM);
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *body = *oqctl;
-
- ptlrpc_request_set_replen(req);
-
- /* the next poll will find -ENODATA, that means quotacheck is
- * going on */
- cli->cl_qchk_stat = -ENODATA;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- cli->cl_qchk_stat = rc;
- ptlrpc_req_finished(req);
- RETURN(rc);
-}
-
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc;
- ENTRY;
-
- qchk->obd_uuid = cli->cl_target_uuid;
- memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
-
- rc = cli->cl_qchk_stat;
- /* the client is not the previous one */
- if (rc == CL_NOT_QUOTACHECKED)
- rc = -EINTR;
- RETURN(rc);
-}