Whamcloud - gitweb
Branch: b_new_cmd
[fs/lustre-release.git] / lustre / ldlm / l_lock.c
index e8ffd5b..b652097 100644 (file)
@@ -20,6 +20,8 @@
  *
  */
 
+#define DEBUG_SUBSYSTEM S_LDLM
+#ifdef __KERNEL__
 #include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/smp_lock.h>
+#else 
+#include <liblustre.h>
+#endif
 
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <linux/obd_class.h>
+#include <linux/lustre_dlm.h>
 #include <linux/lustre_lib.h>
 
-/* invariants:
- - only the owner of the lock changes l_owner/l_depth
- - if a non-owner changes or checks the variables a spin lock is taken
-*/
-
-void l_lock_init(struct lustre_lock *lock)
-{
-        sema_init(&lock->l_sem, 1);
-        spin_lock_init(&lock->l_spin);
-}
-
-void l_lock(struct lustre_lock *lock)
+/*
+ * ldlm locking uses resource to serialize access to locks
+ * but there is a case when we change resource of lock upon
+ * enqueue reply. we rely on that lock->l_resource = new_res
+ * is atomic
+ */
+struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock)
 {
-        int owner = 0;
+        struct ldlm_resource *res = lock->l_resource;
 
-        spin_lock(&lock->l_spin);
-        if (lock->l_owner == current)
-                owner = 1;
-        spin_unlock(&lock->l_spin);
+        if (!res->lr_namespace->ns_client) {
+                /* on server-side resource of lock doesn't change */
+                lock_res(res);
+                return res;
+        } 
 
-        /* This is safe to increment outside the spinlock because we
-         * can only have 1 CPU running on the current task
-         * (i.e. l_owner == current), regardless of the number of CPUs.
-         */
-        if (owner) {
-                ++lock->l_depth;
-        } else {
-                down(&lock->l_sem);
-                spin_lock(&lock->l_spin);
-                lock->l_owner = current;
-                lock->l_depth = 0;
-                spin_unlock(&lock->l_spin);
-        }
+        lock_bitlock(lock);
+        res = lock->l_resource;
+        lock_res(res);
+        return res;
 }
 
-void l_unlock(struct lustre_lock *lock)
+void unlock_res_and_lock(struct ldlm_lock *lock)
 {
-        LASSERT(lock->l_owner == current);
-        LASSERT(lock->l_depth >= 0);
+        struct ldlm_resource *res = lock->l_resource;
 
-        spin_lock(&lock->l_spin);
-        if (--lock->l_depth < 0) {
-                lock->l_owner = NULL;
-                spin_unlock(&lock->l_spin);
-                up(&lock->l_sem);
+        if (!res->lr_namespace->ns_client) {
+                /* on server-side resource of lock doesn't change */
+                unlock_res(res);
                 return;
         }
-        spin_unlock(&lock->l_spin);
-}
-
-int l_has_lock(struct lustre_lock *lock)
-{
-        int depth = -1, owner = 0;
 
-        spin_lock(&lock->l_spin);
-        if (lock->l_owner == current) {
-                depth = lock->l_depth;
-                owner = 1;
-        }
-        spin_unlock(&lock->l_spin);
-
-        if (depth >= 0)
-                CDEBUG(D_INFO, "lock_depth: %d\n", depth);
-        return owner;
+        unlock_res(res);
+        unlock_bitlock(lock);
 }
+