* GPL HEADER END
*/
/*
- * Copyright (c) 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
* Use is subject to license terms.
*/
#include <obd.h>
-#include <lquota.h>
+#include <lustre_quota.h>
#ifndef _LQUOTA_INTERNAL_H
#define _LQUOTA_INTERNAL_H
/* r/w semaphore used to protect concurrent access to the quota
* parameters which are stored on disk */
- cfs_rw_semaphore_t lme_sem;
+ struct rw_semaphore lme_sem;
/* quota space that may be released after glimpse */
__u64 lme_may_rel;
unsigned int lse_pending_req;
/* rw spinlock protecting in-memory counters (i.e. lse_pending*) */
- cfs_rwlock_t lse_lock;
+ rwlock_t lse_lock;
/* waiter for pending request done */
cfs_waitq_t lse_waiters;
/* time to trigger quota adjust */
__u64 lse_adjust_time;
+
+ /* return code of latest acquire RPC */
+ int lse_acq_rc;
+
+ /* when latest acquire RPC completed */
+ __u64 lse_acq_time;
};
/* In-memory entry for each enforced quota id
#define lqe_usage u.se.lse_usage
#define lqe_adjust_time u.se.lse_adjust_time
#define lqe_lockh u.se.lse_lockh
+#define lqe_acq_rc u.se.lse_acq_rc
+#define lqe_acq_time u.se.lse_acq_time
#define LQUOTA_BUMP_VER 0x1
#define LQUOTA_SET_VER 0x2
static inline void lqe_write_lock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_down_write(&lqe->lqe_sem);
+ down_write(&lqe->lqe_sem);
else
- cfs_write_lock(&lqe->lqe_lock);
+ write_lock(&lqe->lqe_lock);
}
static inline void lqe_write_unlock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_up_write(&lqe->lqe_sem);
+ up_write(&lqe->lqe_sem);
else
- cfs_write_unlock(&lqe->lqe_lock);
+ write_unlock(&lqe->lqe_lock);
}
static inline void lqe_read_lock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_down_read(&lqe->lqe_sem);
+ down_read(&lqe->lqe_sem);
else
- cfs_read_lock(&lqe->lqe_lock);
+ read_lock(&lqe->lqe_lock);
}
static inline void lqe_read_unlock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_up_read(&lqe->lqe_sem);
+ up_read(&lqe->lqe_sem);
else
- cfs_read_unlock(&lqe->lqe_lock);
+ read_unlock(&lqe->lqe_lock);
}
/*
#define LQUOTA_LEAST_QUNIT(type) \
(type == LQUOTA_RES_MD ? (1 << 10) : toqb(PTLRPC_MAX_BRW_SIZE))
+#define LQUOTA_OVER_FL(type) \
+ (type == USRQUOTA ? QUOTA_FL_OVER_USRQUOTA : QUOTA_FL_OVER_GRPQUOTA)
+
/* Common data shared by quota-level handlers. This is allocated per-thread to
* reduce stack consumption */
struct lquota_thread_info {