#define l_flock_waitq l_lru
static CFS_LIST_HEAD(ldlm_flock_waitq);
+spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
mode, flags);
+ /* Safe to not lock here, since it should be empty anyway */
LASSERT(list_empty(&lock->l_flock_waitq));
list_del_init(&lock->l_res_link);
pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
struct ldlm_lock *lock;
+ spin_lock(&ldlm_flock_waitq_lock);
restart:
list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
blocking_export = (struct obd_export *)(long)
lock->l_policy_data.l_flock.blocking_export;
- if (blocking_pid == req_pid && blocking_export == req_export)
+ if (blocking_pid == req_pid && blocking_export == req_export) {
+ spin_unlock(&ldlm_flock_waitq_lock);
return 1;
+ }
goto restart;
}
+ spin_unlock(&ldlm_flock_waitq_lock);
return 0;
}
(long)(void *)lock->l_export;
LASSERT(list_empty(&req->l_flock_waitq));
+ spin_lock(&ldlm_flock_waitq_lock);
list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
/* In case we had slept on this lock request take it off of the
* deadlock detection waitq. */
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&req->l_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
/* Scan the locks owned by this process that overlap this request.
* We may have to merge or split existing locks. */
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
/* take lock off the deadlock detection waitq. */
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&lock->l_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
/* client side - set flag to prevent lock from being put on lru list */
lock->l_flags |= LDLM_FL_CBPENDING;
CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
flags, data, getlk);
+ /* Import invalidation. We need to actually release the lock
+ * references being held, so that it can go away. No point in
+ * holding the lock even if app still believes it has it, since
+ * server already dropped it anyway. Only for granted locks too. */
+ lock_res_and_lock(lock);
+ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
+ (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
+ unlock_res_and_lock(lock);
+ if (lock->l_req_mode == lock->l_granted_mode &&
+ lock->l_granted_mode != LCK_NL)
+ ldlm_lock_decref_internal(lock, lock->l_req_mode);
+ RETURN(0);
+ }
+ unlock_res_and_lock(lock);
+
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
lock_res(lock->l_resource);
/* take lock off the deadlock detection waitq. */
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&lock->l_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
ns = lock->l_resource->lr_namespace;
/* take lock off the deadlock detection waitq. */
- lock_res_and_lock(lock);
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&lock->l_flock_waitq);
- unlock_res_and_lock(lock);
+ spin_unlock(&ldlm_flock_waitq_lock);
RETURN(0);
}