1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This code is issued under the GNU General Public License.
7 * See the file COPYING in this distribution
9 * by Cluster File Systems, Inc.
12 #define DEBUG_SUBSYSTEM S_LDLM
14 #include <linux/lustre_dlm.h>
15 #include <linux/obd_class.h>
17 kmem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
19 spinlock_t ldlm_namespace_lock = SPIN_LOCK_UNLOCKED;
20 struct list_head ldlm_namespace_list = LIST_HEAD_INIT(ldlm_namespace_list);
21 static struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
23 int ldlm_proc_setup(struct obd_device *obd)
27 if (obd->obd_proc_entry == NULL)
30 ldlm_ns_proc_dir = proc_mkdir("namespaces", obd->obd_proc_entry);
31 if (ldlm_ns_proc_dir == NULL) {
32 CERROR("Couldn't create /proc/lustre/ldlm/namespaces\n");
38 void ldlm_proc_cleanup(struct obd_device *obd)
40 proc_lustre_remove_obd_entry("namespaces", obd);
43 /* FIXME: This can go away when we start to really use lprocfs */
44 static int lprocfs_ll_rd(char *page, char **start, off_t off,
45 int count, int *eof, void *data)
48 __u64 *temp = (__u64 *)data;
50 len = snprintf(page, count, "%Lu\n", *temp);
55 struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 client)
57 struct ldlm_namespace *ns = NULL;
58 struct list_head *bucket;
59 struct proc_dir_entry *proc_entry;
61 OBD_ALLOC(ns, sizeof(*ns));
67 ns->ns_hash = vmalloc(sizeof(*ns->ns_hash) * RES_HASH_SIZE);
72 obd_memory += sizeof(*ns->ns_hash) * RES_HASH_SIZE;
74 OBD_ALLOC(ns->ns_name, strlen(name) + 1);
79 strcpy(ns->ns_name, name);
81 INIT_LIST_HEAD(&ns->ns_root_list);
82 l_lock_init(&ns->ns_lock);
84 ns->ns_client = client;
85 spin_lock_init(&ns->ns_counter_lock);
89 for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
91 INIT_LIST_HEAD(bucket);
93 spin_lock(&ldlm_namespace_lock);
94 list_add(&ns->ns_list_chain, &ldlm_namespace_list);
95 spin_unlock(&ldlm_namespace_lock);
97 ns->ns_proc_dir = proc_mkdir(ns->ns_name, ldlm_ns_proc_dir);
98 if (ns->ns_proc_dir == NULL)
99 CERROR("Unable to create proc directory for namespace.\n");
100 proc_entry = create_proc_entry("resource_count", 0444, ns->ns_proc_dir);
101 proc_entry->read_proc = lprocfs_ll_rd;
102 proc_entry->data = &ns->ns_resources;
103 proc_entry = create_proc_entry("lock_count", 0444, ns->ns_proc_dir);
104 proc_entry->read_proc = lprocfs_ll_rd;
105 proc_entry->data = &ns->ns_locks;
110 if (ns && ns->ns_hash) {
112 obd_memory -= sizeof(*ns->ns_hash) * RES_HASH_SIZE;
114 if (ns && ns->ns_name)
115 OBD_FREE(ns->ns_name, strlen(name) + 1);
117 OBD_FREE(ns, sizeof(*ns));
121 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
123 /* If 'local_only' is true, don't try to tell the server, just cleanup. */
124 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
127 struct list_head *tmp, *pos;
128 int rc = 0, client = res->lr_namespace->ns_client;
131 list_for_each_safe(tmp, pos, q) {
132 struct ldlm_lock *lock;
133 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
137 struct lustre_handle lockh;
138 ldlm_lock2handle(lock, &lockh);
140 rc = ldlm_cli_cancel(&lockh);
142 CERROR("ldlm_cli_cancel: %d\n", rc);
144 /* Force local cleanup on errors, too. */
145 if (local_only || rc != ELDLM_OK)
146 ldlm_lock_cancel(lock);
148 LDLM_DEBUG(lock, "Freeing a lock still held by a "
151 ldlm_resource_unlink_lock(lock);
152 ldlm_lock_destroy(lock);
158 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int local_only)
162 l_lock(&ns->ns_lock);
163 for (i = 0; i < RES_HASH_SIZE; i++) {
164 struct list_head *tmp, *pos;
165 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
166 struct ldlm_resource *res;
167 res = list_entry(tmp, struct ldlm_resource, lr_hash);
168 ldlm_resource_getref(res);
170 cleanup_resource(res, &res->lr_granted, local_only);
171 cleanup_resource(res, &res->lr_converting, local_only);
172 cleanup_resource(res, &res->lr_waiting, local_only);
174 /* XXX this is a bit counter-intuitive and should
175 * probably be cleaner: don't force cleanup if we're
176 * local_only (which is only used by recovery). We
177 * probably still have outstanding lock refs which
178 * reference these resources. -phil */
179 if (!ldlm_resource_put(res) && !local_only) {
180 CERROR("Resource refcount nonzero (%d) after "
181 "lock cleanup; forcing cleanup.\n",
182 atomic_read(&res->lr_refcount));
183 ldlm_resource_dump(res);
184 atomic_set(&res->lr_refcount, 1);
185 ldlm_resource_put(res);
189 l_unlock(&ns->ns_lock);
194 /* Cleanup, but also free, the namespace */
195 int ldlm_namespace_free(struct ldlm_namespace *ns)
200 spin_lock(&ldlm_namespace_lock);
201 list_del(&ns->ns_list_chain);
202 remove_proc_entry("resource_count", ns->ns_proc_dir);
203 remove_proc_entry("lock_count", ns->ns_proc_dir);
204 remove_proc_entry(ns->ns_name, ldlm_ns_proc_dir);
205 spin_unlock(&ldlm_namespace_lock);
207 ldlm_namespace_cleanup(ns, 0);
209 vfree(ns->ns_hash /* , sizeof(*ns->ns_hash) * RES_HASH_SIZE */);
210 obd_memory -= sizeof(*ns->ns_hash) * RES_HASH_SIZE;
211 OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
212 OBD_FREE(ns, sizeof(*ns));
217 int ldlm_client_free(struct obd_export *exp)
219 struct ldlm_export_data *led = &exp->exp_ldlm_data;
220 ptlrpc_cleanup_client(&led->led_import);
224 static __u32 ldlm_hash_fn(struct ldlm_resource *parent, __u64 *name)
229 for (i = 0; i < RES_NAME_SIZE; i++)
232 hash += (__u32)((unsigned long)parent >> 4);
234 return (hash & RES_HASH_MASK);
237 static struct ldlm_resource *ldlm_resource_new(void)
239 struct ldlm_resource *res;
241 res = kmem_cache_alloc(ldlm_resource_slab, SLAB_KERNEL);
246 memset(res, 0, sizeof(*res));
248 INIT_LIST_HEAD(&res->lr_children);
249 INIT_LIST_HEAD(&res->lr_childof);
250 INIT_LIST_HEAD(&res->lr_granted);
251 INIT_LIST_HEAD(&res->lr_converting);
252 INIT_LIST_HEAD(&res->lr_waiting);
254 atomic_set(&res->lr_refcount, 1);
259 /* Args: locked namespace
260 * Returns: newly-allocated, referenced, unlocked resource */
261 static struct ldlm_resource *ldlm_resource_add(struct ldlm_namespace *ns,
262 struct ldlm_resource *parent,
263 __u64 *name, __u32 type)
265 struct list_head *bucket;
266 struct ldlm_resource *res;
269 if (type < LDLM_MIN_TYPE || type > LDLM_MAX_TYPE) {
274 res = ldlm_resource_new();
280 spin_lock(&ns->ns_counter_lock);
282 spin_unlock(&ns->ns_counter_lock);
284 memcpy(res->lr_name, name, sizeof(res->lr_name));
285 res->lr_namespace = ns;
289 res->lr_most_restr = LCK_NL;
291 bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
292 list_add(&res->lr_hash, bucket);
295 list_add(&res->lr_childof, &ns->ns_root_list);
297 res->lr_parent = parent;
298 list_add(&res->lr_childof, &parent->lr_children);
304 /* Args: unlocked namespace
305 * Locks: takes and releases ns->ns_lock and res->lr_lock
306 * Returns: referenced, unlocked ldlm_resource or NULL */
307 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
308 struct ldlm_resource *parent,
309 __u64 *name, __u32 type, int create)
311 struct list_head *bucket;
312 struct list_head *tmp = bucket;
313 struct ldlm_resource *res = NULL;
316 if (ns == NULL || ns->ns_hash == NULL) {
321 l_lock(&ns->ns_lock);
322 bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
324 list_for_each(tmp, bucket) {
325 struct ldlm_resource *chk;
326 chk = list_entry(tmp, struct ldlm_resource, lr_hash);
328 if (memcmp(chk->lr_name, name, sizeof(chk->lr_name)) == 0) {
330 atomic_inc(&res->lr_refcount);
336 if (res == NULL && create)
337 res = ldlm_resource_add(ns, parent, name, type);
338 l_unlock(&ns->ns_lock);
343 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
345 atomic_inc(&res->lr_refcount);
349 /* Returns 1 if the resource was freed, 0 if it remains. */
350 int ldlm_resource_put(struct ldlm_resource *res)
354 if (atomic_dec_and_test(&res->lr_refcount)) {
355 struct ldlm_namespace *ns = res->lr_namespace;
358 l_lock(&ns->ns_lock);
360 if (atomic_read(&res->lr_refcount) != 0) {
361 /* We lost the race. */
362 l_unlock(&ns->ns_lock);
366 if (!list_empty(&res->lr_granted))
369 if (!list_empty(&res->lr_converting))
372 if (!list_empty(&res->lr_waiting))
375 if (!list_empty(&res->lr_children))
379 list_del(&res->lr_hash);
380 list_del(&res->lr_childof);
382 kmem_cache_free(ldlm_resource_slab, res);
383 l_unlock(&ns->ns_lock);
385 spin_lock(&ns->ns_counter_lock);
387 spin_unlock(&ns->ns_counter_lock);
393 if (atomic_read(&res->lr_refcount) < 0)
400 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
401 struct ldlm_lock *lock)
403 l_lock(&res->lr_namespace->ns_lock);
405 ldlm_resource_dump(res);
406 ldlm_lock_dump(lock);
408 if (!list_empty(&lock->l_res_link))
411 list_add(&lock->l_res_link, head);
412 l_unlock(&res->lr_namespace->ns_lock);
415 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
417 l_lock(&lock->l_resource->lr_namespace->ns_lock);
418 list_del_init(&lock->l_res_link);
419 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
422 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
424 desc->lr_type = res->lr_type;
425 memcpy(desc->lr_name, res->lr_name, sizeof(desc->lr_name));
426 memcpy(desc->lr_version, res->lr_version, sizeof(desc->lr_version));
429 void ldlm_dump_all_namespaces(void)
431 struct list_head *tmp;
433 spin_lock(&ldlm_namespace_lock);
435 list_for_each(tmp, &ldlm_namespace_list) {
436 struct ldlm_namespace *ns;
437 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
438 ldlm_namespace_dump(ns);
441 spin_unlock(&ldlm_namespace_lock);
444 void ldlm_namespace_dump(struct ldlm_namespace *ns)
446 struct list_head *tmp;
448 l_lock(&ns->ns_lock);
449 CDEBUG(D_OTHER, "--- Namespace: %s (rc: %d, client: %d)\n", ns->ns_name,
450 ns->ns_refcount, ns->ns_client);
452 list_for_each(tmp, &ns->ns_root_list) {
453 struct ldlm_resource *res;
454 res = list_entry(tmp, struct ldlm_resource, lr_childof);
456 /* Once we have resources with children, this should really dump
457 * them recursively. */
458 ldlm_resource_dump(res);
460 l_unlock(&ns->ns_lock);
463 void ldlm_resource_dump(struct ldlm_resource *res)
465 struct list_head *tmp;
468 if (RES_NAME_SIZE != 3)
471 snprintf(name, sizeof(name), "%Lx %Lx %Lx",
472 (unsigned long long)res->lr_name[0],
473 (unsigned long long)res->lr_name[1],
474 (unsigned long long)res->lr_name[2]);
476 CDEBUG(D_OTHER, "--- Resource: %p (%s) (rc: %d)\n", res, name,
477 atomic_read(&res->lr_refcount));
478 CDEBUG(D_OTHER, "Namespace: %p (%s)\n", res->lr_namespace,
479 res->lr_namespace->ns_name);
480 CDEBUG(D_OTHER, "Parent: %p, root: %p\n", res->lr_parent, res->lr_root);
482 CDEBUG(D_OTHER, "Granted locks:\n");
483 list_for_each(tmp, &res->lr_granted) {
484 struct ldlm_lock *lock;
485 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
486 ldlm_lock_dump(lock);
489 CDEBUG(D_OTHER, "Converting locks:\n");
490 list_for_each(tmp, &res->lr_converting) {
491 struct ldlm_lock *lock;
492 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
493 ldlm_lock_dump(lock);
496 CDEBUG(D_OTHER, "Waiting locks:\n");
497 list_for_each(tmp, &res->lr_waiting) {
498 struct ldlm_lock *lock;
499 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
500 ldlm_lock_dump(lock);