Whamcloud - gitweb
- landing of b_hd_cleanup_merge to HEAD.
[fs/lustre-release.git] / lustre / ldlm / l_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.sf.net/projects/lustre/
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_LDLM
24 #ifdef __KERNEL__
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36
37 #include <linux/fs.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
40 #include <asm/segment.h>
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/smp_lock.h>
44 #else 
45 #include <liblustre.h>
46 #endif
47
48 #include <linux/lustre_dlm.h>
49 #include <linux/lustre_lib.h>
50
51 /* invariants:
52  - only the owner of the lock changes l_owner/l_depth
53  - if a non-owner changes or checks the variables a spin lock is taken
54 */
55
56 void l_lock_init(struct lustre_lock *lock)
57 {
58         sema_init(&lock->l_sem, 1);
59         spin_lock_init(&lock->l_spin);
60 }
61
62 void l_lock(struct lustre_lock *lock)
63 {
64         int owner = 0;
65
66         spin_lock(&lock->l_spin);
67         if (lock->l_owner == current)
68                 owner = 1;
69         spin_unlock(&lock->l_spin);
70
71         /* This is safe to increment outside the spinlock because we
72          * can only have 1 CPU running on the current task
73          * (i.e. l_owner == current), regardless of the number of CPUs.
74          */
75         if (owner) {
76                 ++lock->l_depth;
77         } else {
78                 down(&lock->l_sem);
79                 spin_lock(&lock->l_spin);
80                 lock->l_owner = current;
81                 lock->l_depth = 0;
82                 spin_unlock(&lock->l_spin);
83         }
84 }
85
86 void l_unlock(struct lustre_lock *lock)
87 {
88         LASSERTF(lock->l_owner == current, "lock %p, current %p\n",
89                  lock->l_owner, current);
90         LASSERTF(lock->l_depth >= 0, "depth %d\n", lock->l_depth);
91         spin_lock(&lock->l_spin);
92         if (--lock->l_depth < 0) {
93                 lock->l_owner = NULL;
94                 spin_unlock(&lock->l_spin);
95                 up(&lock->l_sem);
96                 return;
97         }
98         spin_unlock(&lock->l_spin);
99 }
100
101 int l_has_lock(struct lustre_lock *lock)
102 {
103         int depth = -1, owner = 0;
104
105         spin_lock(&lock->l_spin);
106         if (lock->l_owner == current) {
107                 depth = lock->l_depth;
108                 owner = 1;
109         }
110         spin_unlock(&lock->l_spin);
111
112         if (depth >= 0)
113                 CDEBUG(D_INFO, "lock_depth: %d\n", depth);
114         return owner;
115 }
116
117 #ifdef __KERNEL__
118 #include <linux/lustre_version.h>
119 void l_check_no_ns_lock(struct ldlm_namespace *ns)
120 {
121         static unsigned long next_msg;
122
123         if (l_has_lock(&ns->ns_lock) && time_after(jiffies, next_msg)) {
124                 CERROR("namespace %s lock held illegally; tell phil\n",
125                        ns->ns_name);
126                 next_msg = jiffies + 60 * HZ;
127         }
128 }
129
130 #else
131 void l_check_no_ns_lock(struct ldlm_namespace *ns)
132 {
133         if (l_has_lock(&ns->ns_lock)) {
134                 CERROR("namespace %s lock held illegally; tell phil\n",
135                        ns->ns_name);
136         }
137 }
138 #endif /* __KERNEL__ */