Whamcloud - gitweb
84fdecc499f6df30698276785af92b57b0a28f76
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *   Author: Peter Braam <braam@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25 #ifdef __KERNEL__
26 #include <linux/lustre_dlm.h>
27 #else
28 #include <liblustre.h>
29 #endif
30
31 #include <linux/obd_class.h>
32
33 kmem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
34
35 spinlock_t ldlm_namespace_lock = SPIN_LOCK_UNLOCKED;
36 struct list_head ldlm_namespace_list = LIST_HEAD_INIT(ldlm_namespace_list);
37 static struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
38
39 int ldlm_proc_setup(struct obd_device *obd)
40 {
41         int rc;
42         ENTRY;
43         LASSERT(ldlm_ns_proc_dir == NULL);
44         LASSERT(obd != NULL);
45         rc = lprocfs_obd_attach(obd, 0);
46         if (rc) {
47                 CERROR("LProcFS failed in ldlm-init\n");
48                 RETURN(rc);
49         }
50         ldlm_ns_proc_dir = obd->obd_proc_entry;
51         RETURN(0);
52 }
53
54 void ldlm_proc_cleanup(struct obd_device *obd)
55 {
56         if (ldlm_ns_proc_dir) {
57                 lprocfs_obd_detach(obd);
58                 ldlm_ns_proc_dir = NULL;
59         }
60 }
61
62 #ifdef __KERNEL__
63 static int lprocfs_uint_rd(char *page, char **start, off_t off,
64                            int count, int *eof, void *data)
65 {
66         unsigned int *temp = (unsigned int *)data;
67         return snprintf(page, count, "%u\n", *temp);
68 }
69
70
71 #define MAX_STRING_SIZE 128
72 void ldlm_proc_namespace(struct ldlm_namespace *ns)
73 {
74         struct lprocfs_vars lock_vars[2];
75         char lock_name[MAX_STRING_SIZE + 1];
76
77         LASSERT(ns != NULL);
78         LASSERT(ns->ns_name != NULL);
79
80         lock_name[MAX_STRING_SIZE] = '\0';
81
82         memset(lock_vars, 0, sizeof(lock_vars));
83         lock_vars[0].read_fptr = lprocfs_rd_u64;
84
85         lock_vars[0].name = lock_name;
86
87         snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
88
89         lock_vars[0].data = &ns->ns_resources;
90         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
91
92         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
93
94         lock_vars[0].data = &ns->ns_locks;
95         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
96
97         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
98                  ns->ns_name);
99         lock_vars[0].data = &ns->ns_nr_unused;
100         lock_vars[0].read_fptr = lprocfs_uint_rd;
101         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
102 }
103 #endif
104 #undef MAX_STRING_SIZE
105
106 #define LDLM_MAX_UNUSED 100
107 struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 client)
108 {
109         struct ldlm_namespace *ns = NULL;
110         struct list_head *bucket;
111         ENTRY;
112
113         OBD_ALLOC(ns, sizeof(*ns));
114         if (!ns)
115                 RETURN(NULL);
116
117         ns->ns_hash = vmalloc(sizeof(*ns->ns_hash) * RES_HASH_SIZE);
118         if (!ns->ns_hash)
119                 GOTO(out_ns, NULL);
120
121         atomic_add(sizeof(*ns->ns_hash) * RES_HASH_SIZE, &obd_memory);
122
123         OBD_ALLOC(ns->ns_name, strlen(name) + 1);
124         if (!ns->ns_name)
125                 GOTO(out_hash, NULL);
126
127         strcpy(ns->ns_name, name);
128
129         INIT_LIST_HEAD(&ns->ns_root_list);
130         l_lock_init(&ns->ns_lock);
131         ns->ns_refcount = 0;
132         ns->ns_client = client;
133         spin_lock_init(&ns->ns_counter_lock);
134         ns->ns_locks = 0;
135         ns->ns_resources = 0;
136
137         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
138              bucket--)
139                 INIT_LIST_HEAD(bucket);
140
141         INIT_LIST_HEAD(&ns->ns_unused_list);
142         ns->ns_nr_unused = 0;
143         ns->ns_max_unused = LDLM_MAX_UNUSED;
144
145         spin_lock(&ldlm_namespace_lock);
146         list_add(&ns->ns_list_chain, &ldlm_namespace_list);
147         spin_unlock(&ldlm_namespace_lock);
148 #ifdef __KERNEL__
149         ldlm_proc_namespace(ns);
150 #endif
151         RETURN(ns);
152
153 out_hash:
154         POISON(ns->ns_hash, 0x5a, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
155         vfree(ns->ns_hash);
156         atomic_sub(sizeof(*ns->ns_hash) * RES_HASH_SIZE, &obd_memory);
157 out_ns:
158         OBD_FREE(ns, sizeof(*ns));
159         return NULL;
160 }
161
162 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
163
164 /* If 'local_only' is true, don't try to tell the server, just cleanup.
165  * This is currently only used for recovery, and we make certain assumptions
166  * as a result--notably, that we shouldn't cancel locks with refs. -phil
167  *
168  * Called with the ns_lock held. */
169 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
170                              int local_only)
171 {
172         struct list_head *tmp, *pos;
173         int rc = 0, client = res->lr_namespace->ns_client;
174         ENTRY;
175
176         list_for_each_safe(tmp, pos, q) {
177                 struct ldlm_lock *lock;
178                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
179                 LDLM_LOCK_GET(lock);
180
181                 if (local_only && (lock->l_readers || lock->l_writers)) {
182                         /* This is a little bit gross, but much better than the
183                          * alternative: pretend that we got a blocking AST from
184                          * the server, so that when the lock is decref'd, it
185                          * will go away ... */
186                         lock->l_flags |= LDLM_FL_CBPENDING;
187                         /* ... without sending a CANCEL message. */
188                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
189                         /* ... and without calling the cancellation callback */
190                         lock->l_flags |= LDLM_FL_CANCEL;
191                         LDLM_LOCK_PUT(lock);
192                         continue;
193                 }
194
195                 /* At shutdown time, don't call the cancellation callback */
196                 lock->l_flags |= LDLM_FL_CANCEL;
197
198                 if (client) {
199                         struct lustre_handle lockh;
200                         ldlm_lock2handle(lock, &lockh);
201                         if (!local_only) {
202                                 rc = ldlm_cli_cancel(&lockh);
203                                 if (rc)
204                                         CERROR("ldlm_cli_cancel: %d\n", rc);
205                         }
206                         /* Force local cleanup on errors, too. */
207                         if (local_only || rc != ELDLM_OK)
208                                 ldlm_lock_cancel(lock);
209                 } else {
210                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
211                                    "client node");
212
213                         ldlm_resource_unlink_lock(lock);
214                         ldlm_lock_destroy(lock);
215                 }
216                 LDLM_LOCK_PUT(lock);
217         }
218         EXIT;
219 }
220
221 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int local_only)
222 {
223         int i;
224
225         if (ns == NULL) {
226                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
227                 return ELDLM_OK;
228         }
229
230         l_lock(&ns->ns_lock);
231         for (i = 0; i < RES_HASH_SIZE; i++) {
232                 struct list_head *tmp, *pos;
233                 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
234                         struct ldlm_resource *res;
235                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
236                         ldlm_resource_getref(res);
237
238                         cleanup_resource(res, &res->lr_granted, local_only);
239                         cleanup_resource(res, &res->lr_converting, local_only);
240                         cleanup_resource(res, &res->lr_waiting, local_only);
241
242                         /* XXX what a mess: don't force cleanup if we're
243                          * local_only (which is only used by recovery).  In that
244                          * case, we probably still have outstanding lock refs
245                          * which reference these resources. -phil */
246                         if (!ldlm_resource_putref(res) && !local_only) {
247                                 CERROR("Resource refcount nonzero (%d) after "
248                                        "lock cleanup; forcing cleanup.\n",
249                                        atomic_read(&res->lr_refcount));
250                                 ldlm_resource_dump(res);
251                                 atomic_set(&res->lr_refcount, 1);
252                                 ldlm_resource_putref(res);
253                         }
254                 }
255         }
256         l_unlock(&ns->ns_lock);
257
258         return ELDLM_OK;
259 }
260
261 /* Cleanup, but also free, the namespace */
262 int ldlm_namespace_free(struct ldlm_namespace *ns)
263 {
264         if (!ns)
265                 RETURN(ELDLM_OK);
266
267         spin_lock(&ldlm_namespace_lock);
268         list_del(&ns->ns_list_chain);
269
270         spin_unlock(&ldlm_namespace_lock);
271
272         ldlm_namespace_cleanup(ns, 0);
273
274         POISON(ns->ns_hash, 0x5a, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
275         vfree(ns->ns_hash /* , sizeof(*ns->ns_hash) * RES_HASH_SIZE */);
276         atomic_sub(sizeof(*ns->ns_hash) * RES_HASH_SIZE, &obd_memory);
277         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
278         OBD_FREE(ns, sizeof(*ns));
279
280         return ELDLM_OK;
281 }
282
283 static __u32 ldlm_hash_fn(struct ldlm_resource *parent, struct ldlm_res_id name)
284 {
285         __u32 hash = 0;
286         int i;
287
288         for (i = 0; i < RES_NAME_SIZE; i++)
289                 hash += name.name[i];
290
291         hash += (__u32)((unsigned long)parent >> 4);
292
293         return (hash & RES_HASH_MASK);
294 }
295
296 static struct ldlm_resource *ldlm_resource_new(void)
297 {
298         struct ldlm_resource *res;
299
300         OBD_SLAB_ALLOC(res, ldlm_resource_slab, SLAB_KERNEL, sizeof *res);
301         if (res == NULL) {
302                 LBUG();
303                 return NULL;
304         }
305         memset(res, 0, sizeof(*res));
306
307         INIT_LIST_HEAD(&res->lr_children);
308         INIT_LIST_HEAD(&res->lr_childof);
309         INIT_LIST_HEAD(&res->lr_granted);
310         INIT_LIST_HEAD(&res->lr_converting);
311         INIT_LIST_HEAD(&res->lr_waiting);
312
313         atomic_set(&res->lr_refcount, 1);
314
315         return res;
316 }
317
318 /* Args: locked namespace
319  * Returns: newly-allocated, referenced, unlocked resource */
320 static struct ldlm_resource *
321 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
322                   struct ldlm_res_id name, __u32 type)
323 {
324         struct list_head *bucket;
325         struct ldlm_resource *res;
326         ENTRY;
327
328         if (type < LDLM_MIN_TYPE || type > LDLM_MAX_TYPE) {
329                 LBUG();
330                 RETURN(NULL);
331         }
332
333         res = ldlm_resource_new();
334         if (!res) {
335                 LBUG();
336                 RETURN(NULL);
337         }
338
339         spin_lock(&ns->ns_counter_lock);
340         ns->ns_resources++;
341         spin_unlock(&ns->ns_counter_lock);
342
343         l_lock(&ns->ns_lock);
344         memcpy(&res->lr_name, &name, sizeof(res->lr_name));
345         res->lr_namespace = ns;
346         ns->ns_refcount++;
347
348         res->lr_type = type;
349         res->lr_most_restr = LCK_NL;
350
351         bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
352         list_add(&res->lr_hash, bucket);
353
354         if (parent == NULL) {
355                 list_add(&res->lr_childof, &ns->ns_root_list);
356         } else {
357                 res->lr_parent = parent;
358                 list_add(&res->lr_childof, &parent->lr_children);
359         }
360         l_unlock(&ns->ns_lock);
361
362         RETURN(res);
363 }
364
365 /* Args: unlocked namespace
366  * Locks: takes and releases ns->ns_lock and res->lr_lock
367  * Returns: referenced, unlocked ldlm_resource or NULL */
368 struct ldlm_resource *
369 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
370                   struct ldlm_res_id name, __u32 type, int create)
371 {
372         struct list_head *bucket, *tmp;
373         struct ldlm_resource *res = NULL;
374         ENTRY;
375
376         LASSERT(ns != NULL);
377         LASSERT(ns->ns_hash != NULL);
378
379         l_lock(&ns->ns_lock);
380         bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
381
382         list_for_each(tmp, bucket) {
383                 res = list_entry(tmp, struct ldlm_resource, lr_hash);
384
385                 if (memcmp(&res->lr_name, &name, sizeof(res->lr_name)) == 0) {
386                         ldlm_resource_getref(res);
387                         l_unlock(&ns->ns_lock);
388                         RETURN(res);
389                 }
390         }
391
392         if (create)
393                 res = ldlm_resource_add(ns, parent, name, type);
394         else
395                 res = NULL;
396
397         l_unlock(&ns->ns_lock);
398
399         RETURN(res);
400 }
401
402 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
403 {
404         atomic_inc(&res->lr_refcount);
405         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
406                atomic_read(&res->lr_refcount));
407         return res;
408 }
409
410 /* Returns 1 if the resource was freed, 0 if it remains. */
411 int ldlm_resource_putref(struct ldlm_resource *res)
412 {
413         int rc = 0;
414         ENTRY;
415
416         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
417                atomic_read(&res->lr_refcount) - 1);
418         LASSERT(atomic_read(&res->lr_refcount) > 0);
419         LASSERT(atomic_read(&res->lr_refcount) < 0x5a5a5a5a);
420
421         if (atomic_dec_and_test(&res->lr_refcount)) {
422                 struct ldlm_namespace *ns = res->lr_namespace;
423                 ENTRY;
424
425                 l_lock(&ns->ns_lock);
426
427                 if (atomic_read(&res->lr_refcount) != 0) {
428                         /* We lost the race. */
429                         l_unlock(&ns->ns_lock);
430                         RETURN(rc);
431                 }
432
433                 if (!list_empty(&res->lr_granted)) {
434                         ldlm_resource_dump(res);
435                         LBUG();
436                 }
437
438                 if (!list_empty(&res->lr_converting)) {
439                         ldlm_resource_dump(res);
440                         LBUG();
441                 }
442
443                 if (!list_empty(&res->lr_waiting)) {
444                         ldlm_resource_dump(res);
445                         LBUG();
446                 }
447
448                 if (!list_empty(&res->lr_children)) {
449                         ldlm_resource_dump(res);
450                         LBUG();
451                 }
452
453                 ns->ns_refcount--;
454                 list_del_init(&res->lr_hash);
455                 list_del_init(&res->lr_childof);
456
457                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
458                 l_unlock(&ns->ns_lock);
459
460                 spin_lock(&ns->ns_counter_lock);
461                 ns->ns_resources--;
462                 spin_unlock(&ns->ns_counter_lock);
463
464                 rc = 1;
465                 EXIT;
466         }
467
468         RETURN(rc);
469 }
470
471 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
472                             struct ldlm_lock *lock)
473 {
474         l_lock(&res->lr_namespace->ns_lock);
475
476         ldlm_resource_dump(res);
477         CDEBUG(D_OTHER, "About to add this lock:\n");
478         ldlm_lock_dump(D_OTHER, lock);
479
480         if (lock->l_destroyed) {
481                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
482                 return;
483         }
484
485         LASSERT(list_empty(&lock->l_res_link));
486
487         list_add_tail(&lock->l_res_link, head);
488         l_unlock(&res->lr_namespace->ns_lock);
489 }
490
491 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
492 {
493         l_lock(&lock->l_resource->lr_namespace->ns_lock);
494         list_del_init(&lock->l_res_link);
495         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
496 }
497
498 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
499 {
500         desc->lr_type = res->lr_type;
501         memcpy(&desc->lr_name, &res->lr_name, sizeof(desc->lr_name));
502         memcpy(desc->lr_version, res->lr_version, sizeof(desc->lr_version));
503 }
504
505 void ldlm_dump_all_namespaces(void)
506 {
507         struct list_head *tmp;
508
509         spin_lock(&ldlm_namespace_lock);
510
511         list_for_each(tmp, &ldlm_namespace_list) {
512                 struct ldlm_namespace *ns;
513                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
514                 ldlm_namespace_dump(ns);
515         }
516
517         spin_unlock(&ldlm_namespace_lock);
518 }
519
520 void ldlm_namespace_dump(struct ldlm_namespace *ns)
521 {
522         struct list_head *tmp;
523
524         l_lock(&ns->ns_lock);
525         CDEBUG(D_OTHER, "--- Namespace: %s (rc: %d, client: %d)\n", ns->ns_name,
526                ns->ns_refcount, ns->ns_client);
527
528         list_for_each(tmp, &ns->ns_root_list) {
529                 struct ldlm_resource *res;
530                 res = list_entry(tmp, struct ldlm_resource, lr_childof);
531
532                 /* Once we have resources with children, this should really dump
533                  * them recursively. */
534                 ldlm_resource_dump(res);
535         }
536         l_unlock(&ns->ns_lock);
537 }
538
539 void ldlm_resource_dump(struct ldlm_resource *res)
540 {
541         struct list_head *tmp;
542         char name[256];
543
544         if (RES_NAME_SIZE != 3)
545                 LBUG();
546
547         snprintf(name, sizeof(name), "%Lx %Lx %Lx",
548                  (unsigned long long)res->lr_name.name[0],
549                  (unsigned long long)res->lr_name.name[1],
550                  (unsigned long long)res->lr_name.name[2]);
551
552         CDEBUG(D_OTHER, "--- Resource: %p (%s) (rc: %d)\n", res, name,
553                atomic_read(&res->lr_refcount));
554         CDEBUG(D_OTHER, "Namespace: %p (%s)\n", res->lr_namespace,
555                res->lr_namespace->ns_name);
556         CDEBUG(D_OTHER, "Parent: %p, root: %p\n", res->lr_parent, res->lr_root);
557
558         CDEBUG(D_OTHER, "Granted locks:\n");
559         list_for_each(tmp, &res->lr_granted) {
560                 struct ldlm_lock *lock;
561                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
562                 ldlm_lock_dump(D_OTHER, lock);
563         }
564
565         CDEBUG(D_OTHER, "Converting locks:\n");
566         list_for_each(tmp, &res->lr_converting) {
567                 struct ldlm_lock *lock;
568                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
569                 ldlm_lock_dump(D_OTHER, lock);
570         }
571
572         CDEBUG(D_OTHER, "Waiting locks:\n");
573         list_for_each(tmp, &res->lr_waiting) {
574                 struct ldlm_lock *lock;
575                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
576                 ldlm_lock_dump(D_OTHER, lock);
577         }
578 }