-void l_lock_init(struct lustre_lock *lock)
-{
- sema_init(&lock->l_sem, 1);
- spin_lock_init(&lock->l_spin);
-}
-
-void l_lock(struct lustre_lock *lock)
-{
- int owner = 0;
-
- spin_lock(&lock->l_spin);
- if (lock->l_owner == current)
- owner = 1;
- spin_unlock(&lock->l_spin);
-
- /* This is safe to increment outside the spinlock because we
- * can only have 1 CPU running on the current task
- * (i.e. l_owner == current), regardless of the number of CPUs.
- */
- if (owner) {
- ++lock->l_depth;
- } else {
- down(&lock->l_sem);
- spin_lock(&lock->l_spin);
- lock->l_owner = current;
- lock->l_depth = 0;
- spin_unlock(&lock->l_spin);
- }
-}
-
-void l_unlock(struct lustre_lock *lock)
-{
- LASSERT(lock->l_owner == current);
- LASSERT(lock->l_depth >= 0);
-
- spin_lock(&lock->l_spin);
- if (--lock->l_depth < 0) {
- lock->l_owner = NULL;
- spin_unlock(&lock->l_spin);
- up(&lock->l_sem);
- return;
- }
- spin_unlock(&lock->l_spin);
-}
-
-int l_has_lock(struct lustre_lock *lock)
+/**
+ * Lock a lock and its resource.
+ *
+ * LDLM locking uses resource to serialize access to locks
+ * but there is a case when we change resource of lock upon
+ * enqueue reply. We rely on lock->l_resource = new_res
+ * being an atomic operation.
+ */
+struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)