This patch solves a race condition that the lock may be used again
after LRU cancellation policy check. In that case, the lock may have
locked or dirty pages that makes the policy check totally useless.
The problem is solved by checking l_last_used at cancellation time
therefore it can make sure that the lock has not been used.
Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: Vitaly Fertman <vitaly_fertman@xyratex.com>
Change-Id: I3c7e2a3cc84fa9f549cf002e751018c022493606
Reviewed-on: http://review.whamcloud.com/12603
Tested-by: Jenkins
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
ldlm_desc_ast_t ast_type);
int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq);
int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
ldlm_desc_ast_t ast_type);
int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock,
+ cfs_time_t last_use);
+#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_add_to_lru(struct ldlm_lock *lock);
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_add_to_lru(struct ldlm_lock *lock);
/**
* Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
/**
* Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ *
+ * If \a last_use is non-zero, it will remove the lock from LRU only if
+ * it matches lock's l_last_used.
+ *
+ * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
+ * doesn't match lock's l_last_used;
+ * otherwise, the lock hasn't been in the LRU list.
+ * \retval 1 the lock was in LRU list and removed.
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, cfs_time_t last_use)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
ENTRY;
if (ldlm_is_ns_srv(lock)) {
ENTRY;
if (ldlm_is_ns_srv(lock)) {
}
spin_lock(&ns->ns_lock);
}
spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
+ if (last_use == 0 || last_use == lock->l_last_used)
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
spin_unlock(&ns->ns_lock);
while (!list_empty(&ns->ns_unused_list)) {
ldlm_policy_res_t result;
while (!list_empty(&ns->ns_unused_list)) {
ldlm_policy_res_t result;
+ cfs_time_t last_use = 0;
/* all unused locks */
if (remained-- <= 0)
/* all unused locks */
if (remained-- <= 0)
/* already processed */
continue;
/* already processed */
continue;
+ last_use = lock->l_last_used;
+ if (last_use == cfs_time_current())
+ continue;
+
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again. */
if (!ldlm_is_canceling(lock))
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again. */
if (!ldlm_is_canceling(lock))
lock_res_and_lock(lock);
/* Check flags again under the lock. */
if (ldlm_is_canceling(lock) ||
lock_res_and_lock(lock);
/* Check flags again under the lock. */
if (ldlm_is_canceling(lock) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
+ ldlm_lock_remove_from_lru_check(lock, last_use) == 0) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
- * by itself, or the lock is no longer unused. */
+ * by itself, or the lock is no longer unused or
+ * the lock has been used since the pf() call and
+ * pages could be put under it. */
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference, __FUNCTION__, current);
LDLM_LOCK_RELEASE(lock);
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference, __FUNCTION__, current);
LDLM_LOCK_RELEASE(lock);