Whamcloud - gitweb
1. Adding iput(cache_inode) in smfs_clear_inode for clearing cache inode according...
[fs/lustre-release.git] / lustre / ldlm / l_lock.c
index 2566280..2a4f832 100644 (file)
@@ -20,8 +20,8 @@
  *
  */
 
-
-
+#define DEBUG_SUBSYSTEM S_LDLM
+#ifdef __KERNEL__
 #include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/smp_lock.h>
+#else 
+#include <liblustre.h>
+#endif
 
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include <linux/obd_class.h>
+#include <linux/lustre_dlm.h>
 #include <linux/lustre_lib.h>
 
 /* invariants:
@@ -61,14 +62,19 @@ void l_lock_init(struct lustre_lock *lock)
 void l_lock(struct lustre_lock *lock)
 {
         int owner = 0;
+
         spin_lock(&lock->l_spin);
-        if (lock->l_owner == current) { 
+        if (lock->l_owner == current)
                 owner = 1;
-        }
         spin_unlock(&lock->l_spin);
-        if (owner)
-                 ++lock->l_depth;
-        else { 
+
+        /* This is safe to increment outside the spinlock because we
+         * can only have 1 CPU running on the current task
+         * (i.e. l_owner == current), regardless of the number of CPUs.
+         */
+        if (owner) {
+                ++lock->l_depth;
+        } else {
                 down(&lock->l_sem);
                 spin_lock(&lock->l_spin);
                 lock->l_owner = current;
@@ -79,17 +85,54 @@ void l_lock(struct lustre_lock *lock)
 
 void l_unlock(struct lustre_lock *lock)
 {
-        if (lock->l_owner != current)
-                LBUG();
-        if (lock->l_depth < 0)
-                LBUG();
+        LASSERT(lock->l_owner == current);
+        LASSERT(lock->l_depth >= 0);
 
-        spin_lock(&lock->l_spin); 
-        if (--lock->l_depth < 0) { 
+        spin_lock(&lock->l_spin);
+        if (--lock->l_depth < 0) {
                 lock->l_owner = NULL;
                 spin_unlock(&lock->l_spin);
                 up(&lock->l_sem);
-                return ;
+                return;
+        }
+        spin_unlock(&lock->l_spin);
+}
+
+int l_has_lock(struct lustre_lock *lock)
+{
+        int depth = -1, owner = 0;
+
+        spin_lock(&lock->l_spin);
+        if (lock->l_owner == current) {
+                depth = lock->l_depth;
+                owner = 1;
         }
         spin_unlock(&lock->l_spin);
+
+        if (depth >= 0)
+                CDEBUG(D_INFO, "lock_depth: %d\n", depth);
+        return owner;
+}
+
+#ifdef __KERNEL__
+#include <linux/lustre_version.h>
+void l_check_no_ns_lock(struct ldlm_namespace *ns)
+{
+        static unsigned long next_msg;
+
+        if (l_has_lock(&ns->ns_lock) && time_after(jiffies, next_msg)) {
+                CERROR("namespace %s lock held during RPCs; tell phil\n",
+                       ns->ns_name);
+#if (LUSTRE_KERNEL_VERSION >= 30)
+                CERROR(portals_debug_dumpstack());
+#endif
+                next_msg = jiffies + 60 * HZ;
+        }
+}
+
+#else
+void l_check_no_ns_lock(struct ldlm_namespace *ns)
+{
+#warning "FIXME: check lock in user space??"
 }
+#endif /* __KERNEL__ */