Whamcloud - gitweb
git://git.whamcloud.com
/
fs
/
lustre-release.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
LU-13600 ptlrpc: limit rate of lock replays
[fs/lustre-release.git]
/
lustre
/
quota
/
qsd_handler.c
diff --git
a/lustre/quota/qsd_handler.c
b/lustre/quota/qsd_handler.c
index
0982e93
..
02ebad1
100644
(file)
--- a/
lustre/quota/qsd_handler.c
+++ b/
lustre/quota/qsd_handler.c
@@
-21,7
+21,7
@@
* GPL HEADER END
*/
/*
* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 201
4
, Intel Corporation.
+ * Copyright (c) 2012, 201
7
, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
@@
-402,7
+402,7
@@
out:
adjust = qsd_adjust_needed(lqe);
if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) {
lqe->lqe_acq_rc = ret;
adjust = qsd_adjust_needed(lqe);
if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) {
lqe->lqe_acq_rc = ret;
- lqe->lqe_acq_time =
cfs_time_current_64
();
+ lqe->lqe_acq_time =
ktime_get_seconds
();
}
out_noadjust:
qsd_request_exit(lqe);
}
out_noadjust:
qsd_request_exit(lqe);
@@
-463,8
+463,7
@@
static int qsd_acquire_local(struct lquota_entry *lqe, __u64 space)
* sometimes due to the race reply of dqacq vs. id lock glimpse
* (see LU-4505), so we revalidate it every 5 seconds. */
} else if (lqe->lqe_edquot &&
* sometimes due to the race reply of dqacq vs. id lock glimpse
* (see LU-4505), so we revalidate it every 5 seconds. */
} else if (lqe->lqe_edquot &&
- cfs_time_before_64(cfs_time_shift_64(-5),
- lqe->lqe_edquot_time)) {
+ (lqe->lqe_edquot_time > ktime_get_seconds() - 5)) {
rc = -EDQUOT;
}else {
rc = -EAGAIN;
rc = -EDQUOT;
}else {
rc = -EAGAIN;
@@
-563,7
+562,7
@@
static int qsd_acquire_remote(const struct lu_env *env,
/* check whether an acquire request completed recently */
if (lqe->lqe_acq_rc != 0 &&
/* check whether an acquire request completed recently */
if (lqe->lqe_acq_rc != 0 &&
-
cfs_time_before_64(cfs_time_shift_64(-1), lqe->lqe_acq_time)
) {
+
lqe->lqe_acq_time > ktime_get_seconds() - 1
) {
lqe_write_unlock(lqe);
LQUOTA_DEBUG(lqe, "using cached return code %d", lqe->lqe_acq_rc);
RETURN(lqe->lqe_acq_rc);
lqe_write_unlock(lqe);
LQUOTA_DEBUG(lqe, "using cached return code %d", lqe->lqe_acq_rc);
RETURN(lqe->lqe_acq_rc);
@@
-653,6
+652,13
@@
static bool qsd_acquire(const struct lu_env *env, struct lquota_entry *lqe,
* rc < 0, something bad happened */
break;
* rc < 0, something bad happened */
break;
+ /* if we have gotten some quota and stil wait more quota,
+ * it's better to give QMT some time to reclaim from clients */
+ if (count > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
+
/* need to acquire more quota space from master */
rc = qsd_acquire_remote(env, lqe);
}
/* need to acquire more quota space from master */
rc = qsd_acquire_remote(env, lqe);
}
@@
-685,11
+691,12
@@
static bool qsd_acquire(const struct lu_env *env, struct lquota_entry *lqe,
*/
static int qsd_op_begin0(const struct lu_env *env, struct qsd_qtype_info *qqi,
struct lquota_id_info *qid, long long space,
*/
static int qsd_op_begin0(const struct lu_env *env, struct qsd_qtype_info *qqi,
struct lquota_id_info *qid, long long space,
-
int *
flags)
+
enum osd_quota_local_flags *local_
flags)
{
{
- struct lquota_entry *lqe;
- int rc, ret = -EINPROGRESS;
- struct l_wait_info lwi;
+ struct lquota_entry *lqe;
+ struct l_wait_info lwi;
+ enum osd_quota_local_flags qtype_flag = 0;
+ int rc, ret = -EINPROGRESS;
ENTRY;
if (qid->lqi_qentry != NULL) {
ENTRY;
if (qid->lqi_qentry != NULL) {
@@
-715,7
+722,7
@@
static int qsd_op_begin0(const struct lu_env *env, struct qsd_qtype_info *qqi,
* quota space. That said, we still want to perform space
* adjustments in qsd_op_end, so we return here, but with
* a reference on the lqe */
* quota space. That said, we still want to perform space
* adjustments in qsd_op_end, so we return here, but with
* a reference on the lqe */
- if (flags != NULL) {
+ if (
local_
flags != NULL) {
rc = qsd_refresh_usage(env, lqe);
GOTO(out_flags, rc);
}
rc = qsd_refresh_usage(env, lqe);
GOTO(out_flags, rc);
}
@@
-746,10
+753,10
@@
static int qsd_op_begin0(const struct lu_env *env, struct qsd_qtype_info *qqi,
lqe_write_lock(lqe);
lqe->lqe_waiting_write -= space;
lqe_write_lock(lqe);
lqe->lqe_waiting_write -= space;
- if (flags && lqe->lqe_pending_write != 0)
+ if (
local_
flags && lqe->lqe_pending_write != 0)
/* Inform OSD layer that there are pending writes.
* It might want to retry after a sync if appropriate */
/* Inform OSD layer that there are pending writes.
* It might want to retry after a sync if appropriate */
- *flags |= QUOTA_FL_SYNC;
+ *
local_
flags |= QUOTA_FL_SYNC;
lqe_write_unlock(lqe);
/* convert recoverable error into -EINPROGRESS, client will
lqe_write_unlock(lqe);
/* convert recoverable error into -EINPROGRESS, client will
@@
-768,25
+775,29
@@
static int qsd_op_begin0(const struct lu_env *env, struct qsd_qtype_info *qqi,
}
}
}
}
- if (flags != NULL) {
+ if (
local_
flags != NULL) {
out_flags:
LASSERT(qid->lqi_is_blk);
if (rc != 0) {
out_flags:
LASSERT(qid->lqi_is_blk);
if (rc != 0) {
- *
flags |= LQUOTA_OVER_FL
(qqi->qqi_qtype);
+ *
local_flags |= lquota_over_fl
(qqi->qqi_qtype);
} else {
__u64 usage;
lqe_read_lock(lqe);
} else {
__u64 usage;
lqe_read_lock(lqe);
- usage = lqe->lqe_usage;
- usage += lqe->lqe_pending_write;
+ usage = lqe->lqe_pending_write;
usage += lqe->lqe_waiting_write;
usage += lqe->lqe_waiting_write;
- usage += qqi->qqi_qsd->qsd_sync_threshold;
+ if (lqe->lqe_qunit != 0 && (usage % lqe->lqe_qunit >
+ qqi->qqi_qsd->qsd_sync_threshold))
+ usage += qqi->qqi_qsd->qsd_sync_threshold;
+
+ usage += lqe->lqe_usage;
+ qtype_flag = lquota_over_fl(qqi->qqi_qtype);
/* if we should notify client to start sync write */
if (usage >= lqe->lqe_granted - lqe->lqe_pending_rel)
/* if we should notify client to start sync write */
if (usage >= lqe->lqe_granted - lqe->lqe_pending_rel)
- *
flags |= LQUOTA_OVER_FL(qqi->qqi_qtype)
;
+ *
local_flags |= qtype_flag
;
else
else
- *
flags &= ~LQUOTA_OVER_FL(qqi->qqi_qtype)
;
+ *
local_flags &= ~qtype_flag
;
lqe_read_unlock(lqe);
}
}
lqe_read_unlock(lqe);
}
}
@@
-799,7
+810,7
@@
out_flags:
static inline bool qid_equal(struct lquota_id_info *q1,
struct lquota_id_info *q2)
{
static inline bool qid_equal(struct lquota_id_info *q1,
struct lquota_id_info *q2)
{
- if (q1->lqi_type != q2->lqi_type)
+ if (q1->lqi_
is_blk != q2->lqi_is_blk || q1->lqi_
type != q2->lqi_type)
return false;
return (q1->lqi_id.qid_uid == q2->lqi_id.qid_uid) ? true : false;
}
return false;
return (q1->lqi_id.qid_uid == q2->lqi_id.qid_uid) ? true : false;
}
@@
-824,7
+835,7
@@
static inline bool qid_equal(struct lquota_id_info *q1,
*/
int qsd_op_begin(const struct lu_env *env, struct qsd_instance *qsd,
struct lquota_trans *trans, struct lquota_id_info *qi,
*/
int qsd_op_begin(const struct lu_env *env, struct qsd_instance *qsd,
struct lquota_trans *trans, struct lquota_id_info *qi,
-
int *
flags)
+
enum osd_quota_local_flags *local_
flags)
{
int i, rc;
bool found = false;
{
int i, rc;
bool found = false;
@@
-854,7
+865,7
@@
int qsd_op_begin(const struct lu_env *env, struct qsd_instance *qsd,
* or - the user/group is root
* or - quota accounting isn't enabled */
if (!qsd_type_enabled(qsd, qi->lqi_type) || qi->lqi_id.qid_uid == 0 ||
* or - the user/group is root
* or - quota accounting isn't enabled */
if (!qsd_type_enabled(qsd, qi->lqi_type) || qi->lqi_id.qid_uid == 0 ||
-
qsd->qsd
_acct_failed)
+
(qsd->qsd_type_array[qi->lqi_type])->qqi
_acct_failed)
RETURN(0);
LASSERTF(trans->lqt_id_cnt <= QUOTA_MAX_TRANSIDS, "id_cnt=%d\n",
RETURN(0);
LASSERTF(trans->lqt_id_cnt <= QUOTA_MAX_TRANSIDS, "id_cnt=%d\n",
@@
-863,8
+874,6
@@
int qsd_op_begin(const struct lu_env *env, struct qsd_instance *qsd,
for (i = 0; i < trans->lqt_id_cnt; i++) {
if (qid_equal(qi, &trans->lqt_ids[i])) {
found = true;
for (i = 0; i < trans->lqt_id_cnt; i++) {
if (qid_equal(qi, &trans->lqt_ids[i])) {
found = true;
- /* make sure we are not mixing inodes & blocks */
- LASSERT(trans->lqt_ids[i].lqi_is_blk == qi->lqi_is_blk);
break;
}
}
break;
}
}
@@
-885,7
+894,7
@@
int qsd_op_begin(const struct lu_env *env, struct qsd_instance *qsd,
/* manage quota enforcement for this ID */
rc = qsd_op_begin0(env, qsd->qsd_type_array[qi->lqi_type],
/* manage quota enforcement for this ID */
rc = qsd_op_begin0(env, qsd->qsd_type_array[qi->lqi_type],
- &trans->lqt_ids[i], qi->lqi_space, flags);
+ &trans->lqt_ids[i], qi->lqi_space,
local_
flags);
RETURN(rc);
}
EXPORT_SYMBOL(qsd_op_begin);
RETURN(rc);
}
EXPORT_SYMBOL(qsd_op_begin);