Whamcloud - gitweb
merge b_devel into HEAD, which will become 0.7.3
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *   Author: Peter Braam <braam@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25 #ifdef __KERNEL__
26 #include <linux/lustre_dlm.h>
27 #else
28 #include <liblustre.h>
29 #endif
30
31 #include <linux/obd_class.h>
32
33 kmem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
34
35 spinlock_t ldlm_namespace_lock = SPIN_LOCK_UNLOCKED;
36 struct list_head ldlm_namespace_list = LIST_HEAD_INIT(ldlm_namespace_list);
37 static struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
38
39 int ldlm_proc_setup(struct obd_device *obd)
40 {
41         int rc;
42         ENTRY;
43         LASSERT(ldlm_ns_proc_dir == NULL);
44         LASSERT(obd != NULL);
45         rc = lprocfs_obd_attach(obd, 0);
46         if (rc) {
47                 CERROR("LProcFS failed in ldlm-init\n");
48                 RETURN(rc);
49         }
50         ldlm_ns_proc_dir = obd->obd_proc_entry;
51         RETURN(0);
52 }
53
54 void ldlm_proc_cleanup(struct obd_device *obd)
55 {
56         if (ldlm_ns_proc_dir) {
57                 lprocfs_obd_detach(obd);
58                 ldlm_ns_proc_dir = NULL;
59         }
60 }
61
62 #ifdef __KERNEL__
63 static int lprocfs_uint_rd(char *page, char **start, off_t off,
64                            int count, int *eof, void *data)
65 {
66         unsigned int *temp = (unsigned int *)data;
67         return snprintf(page, count, "%u\n", *temp);
68 }
69
70
71 #define MAX_STRING_SIZE 128
72 void ldlm_proc_namespace(struct ldlm_namespace *ns)
73 {
74         struct lprocfs_vars lock_vars[2];
75         char lock_name[MAX_STRING_SIZE + 1];
76
77         LASSERT(ns != NULL);
78         LASSERT(ns->ns_name != NULL);
79
80         lock_name[MAX_STRING_SIZE] = '\0';
81
82         memset(lock_vars, 0, sizeof(lock_vars));
83         lock_vars[0].read_fptr = lprocfs_rd_u64;
84
85         lock_vars[0].name = lock_name;
86
87         snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
88
89         lock_vars[0].data = &ns->ns_resources;
90         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
91
92         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
93
94         lock_vars[0].data = &ns->ns_locks;
95         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
96
97         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
98                  ns->ns_name);
99         lock_vars[0].data = &ns->ns_nr_unused;
100         lock_vars[0].read_fptr = lprocfs_uint_rd;
101         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
102 }
103 #endif
104 #undef MAX_STRING_SIZE
105
106 #define LDLM_MAX_UNUSED 100
107 struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 client)
108 {
109         struct ldlm_namespace *ns = NULL;
110         struct list_head *bucket;
111         ENTRY;
112
113         OBD_ALLOC(ns, sizeof(*ns));
114         if (!ns)
115                 RETURN(NULL);
116
117         OBD_VMALLOC(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
118         if (!ns->ns_hash)
119                 GOTO(out_ns, NULL);
120
121         OBD_ALLOC(ns->ns_name, strlen(name) + 1);
122         if (!ns->ns_name)
123                 GOTO(out_hash, NULL);
124
125         strcpy(ns->ns_name, name);
126
127         INIT_LIST_HEAD(&ns->ns_root_list);
128         l_lock_init(&ns->ns_lock);
129         ns->ns_refcount = 0;
130         ns->ns_client = client;
131         spin_lock_init(&ns->ns_counter_lock);
132         ns->ns_locks = 0;
133         ns->ns_resources = 0;
134
135         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
136              bucket--)
137                 INIT_LIST_HEAD(bucket);
138
139         INIT_LIST_HEAD(&ns->ns_unused_list);
140         ns->ns_nr_unused = 0;
141         ns->ns_max_unused = LDLM_MAX_UNUSED;
142
143         spin_lock(&ldlm_namespace_lock);
144         list_add(&ns->ns_list_chain, &ldlm_namespace_list);
145         spin_unlock(&ldlm_namespace_lock);
146 #ifdef __KERNEL__
147         ldlm_proc_namespace(ns);
148 #endif
149         RETURN(ns);
150
151 out_hash:
152         POISON(ns->ns_hash, 0x5a, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
153         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
154 out_ns:
155         OBD_FREE(ns, sizeof(*ns));
156         return NULL;
157 }
158
159 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
160
161 /* If 'local_only' is true, don't try to tell the server, just cleanup.
162  * This is currently only used for recovery, and we make certain assumptions
163  * as a result--notably, that we shouldn't cancel locks with refs. -phil
164  *
165  * Called with the ns_lock held. */
166 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
167                              int local_only)
168 {
169         struct list_head *tmp, *pos;
170         int rc = 0, client = res->lr_namespace->ns_client;
171         ENTRY;
172
173         list_for_each_safe(tmp, pos, q) {
174                 struct ldlm_lock *lock;
175                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
176                 LDLM_LOCK_GET(lock);
177
178                 if (local_only && (lock->l_readers || lock->l_writers)) {
179                         /* This is a little bit gross, but much better than the
180                          * alternative: pretend that we got a blocking AST from
181                          * the server, so that when the lock is decref'd, it
182                          * will go away ... */
183                         lock->l_flags |= LDLM_FL_CBPENDING;
184                         /* ... without sending a CANCEL message. */
185                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
186                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
187                         /* ... and without calling the cancellation callback */
188                         lock->l_flags |= LDLM_FL_CANCEL;
189                         LDLM_LOCK_PUT(lock);
190                         continue;
191                 }
192
193                 /* At shutdown time, don't call the cancellation callback */
194                 lock->l_flags |= LDLM_FL_CANCEL;
195
196                 if (client) {
197                         struct lustre_handle lockh;
198                         ldlm_lock2handle(lock, &lockh);
199                         if (!local_only) {
200                                 rc = ldlm_cli_cancel(&lockh);
201                                 if (rc)
202                                         CERROR("ldlm_cli_cancel: %d\n", rc);
203                         }
204                         /* Force local cleanup on errors, too. */
205                         if (local_only || rc != ELDLM_OK)
206                                 ldlm_lock_cancel(lock);
207                 } else {
208                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
209                                    "client node");
210
211                         ldlm_resource_unlink_lock(lock);
212                         ldlm_lock_destroy(lock);
213                 }
214                 LDLM_LOCK_PUT(lock);
215         }
216         EXIT;
217 }
218
219 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int local_only)
220 {
221         int i;
222
223         if (ns == NULL) {
224                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
225                 return ELDLM_OK;
226         }
227
228         l_lock(&ns->ns_lock);
229         for (i = 0; i < RES_HASH_SIZE; i++) {
230                 struct list_head *tmp, *pos;
231                 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
232                         struct ldlm_resource *res;
233                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
234                         ldlm_resource_getref(res);
235
236                         cleanup_resource(res, &res->lr_granted, local_only);
237                         cleanup_resource(res, &res->lr_converting, local_only);
238                         cleanup_resource(res, &res->lr_waiting, local_only);
239
240                         /* XXX what a mess: don't force cleanup if we're
241                          * local_only (which is only used by recovery).  In that
242                          * case, we probably still have outstanding lock refs
243                          * which reference these resources. -phil */
244                         if (!ldlm_resource_putref(res) && !local_only) {
245                                 CERROR("Resource refcount nonzero (%d) after "
246                                        "lock cleanup; forcing cleanup.\n",
247                                        atomic_read(&res->lr_refcount));
248                                 ldlm_resource_dump(res);
249                                 atomic_set(&res->lr_refcount, 1);
250                                 ldlm_resource_putref(res);
251                         }
252                 }
253         }
254         l_unlock(&ns->ns_lock);
255
256         return ELDLM_OK;
257 }
258
259 /* Cleanup, but also free, the namespace */
260 int ldlm_namespace_free(struct ldlm_namespace *ns)
261 {
262         if (!ns)
263                 RETURN(ELDLM_OK);
264
265         spin_lock(&ldlm_namespace_lock);
266         list_del(&ns->ns_list_chain);
267
268         spin_unlock(&ldlm_namespace_lock);
269
270         ldlm_namespace_cleanup(ns, 0);
271
272         POISON(ns->ns_hash, 0x5a, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
273         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
274         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
275         OBD_FREE(ns, sizeof(*ns));
276
277         return ELDLM_OK;
278 }
279
280 static __u32 ldlm_hash_fn(struct ldlm_resource *parent, struct ldlm_res_id name)
281 {
282         __u32 hash = 0;
283         int i;
284
285         for (i = 0; i < RES_NAME_SIZE; i++)
286                 hash += name.name[i];
287
288         hash += (__u32)((unsigned long)parent >> 4);
289
290         return (hash & RES_HASH_MASK);
291 }
292
293 static struct ldlm_resource *ldlm_resource_new(void)
294 {
295         struct ldlm_resource *res;
296
297         OBD_SLAB_ALLOC(res, ldlm_resource_slab, SLAB_KERNEL, sizeof *res);
298         if (res == NULL) {
299                 LBUG();
300                 return NULL;
301         }
302         memset(res, 0, sizeof(*res));
303
304         INIT_LIST_HEAD(&res->lr_children);
305         INIT_LIST_HEAD(&res->lr_childof);
306         INIT_LIST_HEAD(&res->lr_granted);
307         INIT_LIST_HEAD(&res->lr_converting);
308         INIT_LIST_HEAD(&res->lr_waiting);
309
310         atomic_set(&res->lr_refcount, 1);
311
312         return res;
313 }
314
315 /* Args: locked namespace
316  * Returns: newly-allocated, referenced, unlocked resource */
317 static struct ldlm_resource *
318 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
319                   struct ldlm_res_id name, __u32 type)
320 {
321         struct list_head *bucket;
322         struct ldlm_resource *res;
323         ENTRY;
324
325         if (type < LDLM_MIN_TYPE || type > LDLM_MAX_TYPE) {
326                 LBUG();
327                 RETURN(NULL);
328         }
329
330         res = ldlm_resource_new();
331         if (!res) {
332                 LBUG();
333                 RETURN(NULL);
334         }
335
336         spin_lock(&ns->ns_counter_lock);
337         ns->ns_resources++;
338         spin_unlock(&ns->ns_counter_lock);
339
340         l_lock(&ns->ns_lock);
341         memcpy(&res->lr_name, &name, sizeof(res->lr_name));
342         res->lr_namespace = ns;
343         ns->ns_refcount++;
344
345         res->lr_type = type;
346         res->lr_most_restr = LCK_NL;
347
348         bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
349         list_add(&res->lr_hash, bucket);
350
351         if (parent == NULL) {
352                 list_add(&res->lr_childof, &ns->ns_root_list);
353         } else {
354                 res->lr_parent = parent;
355                 list_add(&res->lr_childof, &parent->lr_children);
356         }
357         l_unlock(&ns->ns_lock);
358
359         RETURN(res);
360 }
361
362 /* Args: unlocked namespace
363  * Locks: takes and releases ns->ns_lock and res->lr_lock
364  * Returns: referenced, unlocked ldlm_resource or NULL */
365 struct ldlm_resource *
366 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
367                   struct ldlm_res_id name, __u32 type, int create)
368 {
369         struct list_head *bucket, *tmp;
370         struct ldlm_resource *res = NULL;
371         ENTRY;
372
373         LASSERT(ns != NULL);
374         LASSERT(ns->ns_hash != NULL);
375
376         l_lock(&ns->ns_lock);
377         bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
378
379         list_for_each(tmp, bucket) {
380                 res = list_entry(tmp, struct ldlm_resource, lr_hash);
381
382                 if (memcmp(&res->lr_name, &name, sizeof(res->lr_name)) == 0) {
383                         ldlm_resource_getref(res);
384                         l_unlock(&ns->ns_lock);
385                         RETURN(res);
386                 }
387         }
388
389         if (create)
390                 res = ldlm_resource_add(ns, parent, name, type);
391         else
392                 res = NULL;
393
394         l_unlock(&ns->ns_lock);
395
396         RETURN(res);
397 }
398
399 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
400 {
401         atomic_inc(&res->lr_refcount);
402         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
403                atomic_read(&res->lr_refcount));
404         return res;
405 }
406
407 /* Returns 1 if the resource was freed, 0 if it remains. */
408 int ldlm_resource_putref(struct ldlm_resource *res)
409 {
410         int rc = 0;
411         ENTRY;
412
413         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
414                atomic_read(&res->lr_refcount) - 1);
415         LASSERT(atomic_read(&res->lr_refcount) > 0);
416         LASSERT(atomic_read(&res->lr_refcount) < 0x5a5a5a5a);
417
418         if (atomic_dec_and_test(&res->lr_refcount)) {
419                 struct ldlm_namespace *ns = res->lr_namespace;
420                 ENTRY;
421
422                 l_lock(&ns->ns_lock);
423
424                 if (atomic_read(&res->lr_refcount) != 0) {
425                         /* We lost the race. */
426                         l_unlock(&ns->ns_lock);
427                         RETURN(rc);
428                 }
429
430                 if (!list_empty(&res->lr_granted)) {
431                         ldlm_resource_dump(res);
432                         LBUG();
433                 }
434
435                 if (!list_empty(&res->lr_converting)) {
436                         ldlm_resource_dump(res);
437                         LBUG();
438                 }
439
440                 if (!list_empty(&res->lr_waiting)) {
441                         ldlm_resource_dump(res);
442                         LBUG();
443                 }
444
445                 if (!list_empty(&res->lr_children)) {
446                         ldlm_resource_dump(res);
447                         LBUG();
448                 }
449
450                 ns->ns_refcount--;
451                 list_del_init(&res->lr_hash);
452                 list_del_init(&res->lr_childof);
453
454                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
455                 l_unlock(&ns->ns_lock);
456
457                 spin_lock(&ns->ns_counter_lock);
458                 ns->ns_resources--;
459                 spin_unlock(&ns->ns_counter_lock);
460
461                 rc = 1;
462                 EXIT;
463         }
464
465         RETURN(rc);
466 }
467
468 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
469                             struct ldlm_lock *lock)
470 {
471         l_lock(&res->lr_namespace->ns_lock);
472
473         ldlm_resource_dump(res);
474         CDEBUG(D_OTHER, "About to add this lock:\n");
475         ldlm_lock_dump(D_OTHER, lock);
476
477         if (lock->l_destroyed) {
478                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
479                 return;
480         }
481
482         LASSERT(list_empty(&lock->l_res_link));
483
484         list_add_tail(&lock->l_res_link, head);
485         l_unlock(&res->lr_namespace->ns_lock);
486 }
487
488 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
489 {
490         l_lock(&lock->l_resource->lr_namespace->ns_lock);
491         list_del_init(&lock->l_res_link);
492         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
493 }
494
495 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
496 {
497         desc->lr_type = res->lr_type;
498         memcpy(&desc->lr_name, &res->lr_name, sizeof(desc->lr_name));
499         memcpy(desc->lr_version, res->lr_version, sizeof(desc->lr_version));
500 }
501
502 void ldlm_dump_all_namespaces(void)
503 {
504         struct list_head *tmp;
505
506         spin_lock(&ldlm_namespace_lock);
507
508         list_for_each(tmp, &ldlm_namespace_list) {
509                 struct ldlm_namespace *ns;
510                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
511                 ldlm_namespace_dump(ns);
512         }
513
514         spin_unlock(&ldlm_namespace_lock);
515 }
516
517 void ldlm_namespace_dump(struct ldlm_namespace *ns)
518 {
519         struct list_head *tmp;
520
521         l_lock(&ns->ns_lock);
522         CDEBUG(D_OTHER, "--- Namespace: %s (rc: %d, client: %d)\n", ns->ns_name,
523                ns->ns_refcount, ns->ns_client);
524
525         list_for_each(tmp, &ns->ns_root_list) {
526                 struct ldlm_resource *res;
527                 res = list_entry(tmp, struct ldlm_resource, lr_childof);
528
529                 /* Once we have resources with children, this should really dump
530                  * them recursively. */
531                 ldlm_resource_dump(res);
532         }
533         l_unlock(&ns->ns_lock);
534 }
535
536 void ldlm_resource_dump(struct ldlm_resource *res)
537 {
538         struct list_head *tmp;
539         char name[256];
540
541         if (RES_NAME_SIZE != 3)
542                 LBUG();
543
544         snprintf(name, sizeof(name), "%Lx %Lx %Lx",
545                  (unsigned long long)res->lr_name.name[0],
546                  (unsigned long long)res->lr_name.name[1],
547                  (unsigned long long)res->lr_name.name[2]);
548
549         CDEBUG(D_OTHER, "--- Resource: %p (%s) (rc: %d)\n", res, name,
550                atomic_read(&res->lr_refcount));
551         CDEBUG(D_OTHER, "Namespace: %p (%s)\n", res->lr_namespace,
552                res->lr_namespace->ns_name);
553         CDEBUG(D_OTHER, "Parent: %p, root: %p\n", res->lr_parent, res->lr_root);
554
555         CDEBUG(D_OTHER, "Granted locks:\n");
556         list_for_each(tmp, &res->lr_granted) {
557                 struct ldlm_lock *lock;
558                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
559                 ldlm_lock_dump(D_OTHER, lock);
560         }
561
562         CDEBUG(D_OTHER, "Converting locks:\n");
563         list_for_each(tmp, &res->lr_converting) {
564                 struct ldlm_lock *lock;
565                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
566                 ldlm_lock_dump(D_OTHER, lock);
567         }
568
569         CDEBUG(D_OTHER, "Waiting locks:\n");
570         list_for_each(tmp, &res->lr_waiting) {
571                 struct ldlm_lock *lock;
572                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
573                 ldlm_lock_dump(D_OTHER, lock);
574         }
575 }