Whamcloud - gitweb
b=15440
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *   Author: Peter Braam <braam@clusterfs.com>
7  *
8  *   This file is part of the Lustre file system, http://www.lustre.org
9  *   Lustre is a trademark of Cluster File Systems, Inc.
10  *
11  *   You may have signed or agreed to another license before downloading
12  *   this software.  If so, you are bound by the terms and conditions
13  *   of that agreement, and the following does not apply to you.  See the
14  *   LICENSE file included with this distribution for more information.
15  *
16  *   If you did not agree to a different license, then this copy of Lustre
17  *   is open source software; you can redistribute it and/or modify it
18  *   under the terms of version 2 of the GNU General Public License as
19  *   published by the Free Software Foundation.
20  *
21  *   In either case, Lustre is distributed in the hope that it will be
22  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  *   license text for more details.
25  */
26
27 #define DEBUG_SUBSYSTEM S_LDLM
28 #ifdef __KERNEL__
29 # include <lustre_dlm.h>
30 #else
31 # include <liblustre.h>
32 #endif
33
34 #include <obd_class.h>
35 #include "ldlm_internal.h"
36
37 cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
38
39 atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
40 atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
41
42 struct semaphore ldlm_srv_namespace_lock;
43 CFS_LIST_HEAD(ldlm_srv_namespace_list);
44
45 struct semaphore ldlm_cli_namespace_lock;
46 CFS_LIST_HEAD(ldlm_cli_namespace_list);
47
48 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
49 cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
50 cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
51
52 #ifdef LPROCFS
53 static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
54                              unsigned long count, void *data)
55 {
56         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
57         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
58         RETURN(count);
59 }
60
61 int ldlm_proc_setup(void)
62 {
63         int rc;
64         struct lprocfs_vars list[] = {
65                 { "dump_namespaces", NULL, ldlm_proc_dump_ns, NULL },
66                 { NULL }};
67         ENTRY;
68         LASSERT(ldlm_ns_proc_dir == NULL);
69
70         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
71                                               proc_lustre_root,
72                                               NULL, NULL);
73         if (IS_ERR(ldlm_type_proc_dir)) {
74                 CERROR("LProcFS failed in ldlm-init\n");
75                 rc = PTR_ERR(ldlm_type_proc_dir);
76                 GOTO(err, rc);
77         }
78
79         ldlm_ns_proc_dir = lprocfs_register("namespaces",
80                                             ldlm_type_proc_dir,
81                                             NULL, NULL);
82         if (IS_ERR(ldlm_ns_proc_dir)) {
83                 CERROR("LProcFS failed in ldlm-init\n");
84                 rc = PTR_ERR(ldlm_ns_proc_dir);
85                 GOTO(err_type, rc);
86         }
87
88         ldlm_svc_proc_dir = lprocfs_register("services",
89                                             ldlm_type_proc_dir,
90                                             NULL, NULL);
91         if (IS_ERR(ldlm_svc_proc_dir)) {
92                 CERROR("LProcFS failed in ldlm-init\n");
93                 rc = PTR_ERR(ldlm_svc_proc_dir);
94                 GOTO(err_ns, rc);
95         }
96
97         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
98
99         RETURN(0);
100
101 err_ns:
102         lprocfs_remove(&ldlm_ns_proc_dir);
103 err_type:
104         lprocfs_remove(&ldlm_type_proc_dir);
105 err:
106         ldlm_svc_proc_dir = NULL;
107         RETURN(rc);
108 }
109
110 void ldlm_proc_cleanup(void)
111 {
112         if (ldlm_svc_proc_dir)
113                 lprocfs_remove(&ldlm_svc_proc_dir);
114
115         if (ldlm_ns_proc_dir)
116                 lprocfs_remove(&ldlm_ns_proc_dir);
117
118         if (ldlm_type_proc_dir)
119                 lprocfs_remove(&ldlm_type_proc_dir);
120 }
121
122 static int lprocfs_rd_lru_size(char *page, char **start, off_t off,
123                                int count, int *eof, void *data)
124 {
125         struct ldlm_namespace *ns = data;
126         __u32 *nr = &ns->ns_max_unused;
127
128         if (ns_connect_lru_resize(ns))
129                 nr = &ns->ns_nr_unused;
130         return lprocfs_rd_uint(page, start, off, count, eof, nr);
131 }
132
133 static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
134                                unsigned long count, void *data)
135 {
136         struct ldlm_namespace *ns = data;
137         char dummy[MAX_STRING_SIZE + 1], *end;
138         unsigned long tmp;
139         int lru_resize;
140
141         dummy[MAX_STRING_SIZE] = '\0';
142         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
143                 return -EFAULT;
144
145         if (count == 6 && memcmp(dummy, "clear", 5) == 0) {
146                 CDEBUG(D_DLMTRACE,
147                        "dropping all unused locks from namespace %s\n",
148                        ns->ns_name);
149                 if (ns_connect_lru_resize(ns)) {
150                         int canceled, unused  = ns->ns_nr_unused;
151                         
152                         /* Try to cancel all @ns_nr_unused locks. */
153                         canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC, 
154                                                    LDLM_CANCEL_PASSED);
155                         if (canceled < unused) {
156                                 CERROR("not all requested locks are canceled, "
157                                        "requested: %d, canceled: %d\n", unused, 
158                                        canceled);
159                                 return -EINVAL;
160                         }
161                 } else {
162                         tmp = ns->ns_max_unused;
163                         ns->ns_max_unused = 0;
164                         ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
165                         ns->ns_max_unused = tmp;
166                 }
167                 return count;
168         }
169
170         tmp = simple_strtoul(dummy, &end, 0);
171         if (dummy == end) {
172                 CERROR("invalid value written\n");
173                 return -EINVAL;
174         }
175         lru_resize = (tmp == 0);
176         
177         if (ns_connect_lru_resize(ns)) {
178                 if (!lru_resize)
179                         ns->ns_max_unused = (unsigned int)tmp;
180                         
181                 if (tmp > ns->ns_nr_unused)
182                         tmp = ns->ns_nr_unused;
183                 tmp = ns->ns_nr_unused - tmp;
184                 
185                 CDEBUG(D_DLMTRACE, "changing namespace %s unused locks from %u to %u\n", 
186                        ns->ns_name, ns->ns_nr_unused, (unsigned int)tmp);
187                 ldlm_cancel_lru(ns, (unsigned int)tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
188                 
189                 if (!lru_resize) {
190                         CDEBUG(D_DLMTRACE, "disable lru_resize for namespace %s\n", 
191                                ns->ns_name);
192                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
193                 }
194         } else {
195                 CDEBUG(D_DLMTRACE, "changing namespace %s max_unused from %u to %u\n",
196                        ns->ns_name, ns->ns_max_unused, (unsigned int)tmp);
197                 ns->ns_max_unused = (unsigned int)tmp;
198                 ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
199                 
200                 /* Make sure that originally lru resize was supported before 
201                  * turning it on here. */
202                 if (lru_resize && 
203                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
204                         CDEBUG(D_DLMTRACE, "enable lru_resize for namespace %s\n", 
205                                ns->ns_name);
206                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
207                 }
208         }
209
210         return count;
211 }
212
213 void ldlm_proc_namespace(struct ldlm_namespace *ns)
214 {
215         struct lprocfs_vars lock_vars[2];
216         char lock_name[MAX_STRING_SIZE + 1];
217
218         LASSERT(ns != NULL);
219         LASSERT(ns->ns_name != NULL);
220
221         lock_name[MAX_STRING_SIZE] = '\0';
222
223         memset(lock_vars, 0, sizeof(lock_vars));
224         lock_vars[0].name = lock_name;
225
226         snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
227         lock_vars[0].data = &ns->ns_refcount;
228         lock_vars[0].read_fptr = lprocfs_rd_atomic;
229         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
230
231         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
232         lock_vars[0].data = &ns->ns_locks;
233         lock_vars[0].read_fptr = lprocfs_rd_atomic;
234         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
235
236         if (ns_is_client(ns)) {
237                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
238                          ns->ns_name);
239                 lock_vars[0].data = &ns->ns_nr_unused;
240                 lock_vars[0].read_fptr = lprocfs_rd_uint;
241                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
242
243                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_size",
244                          ns->ns_name);
245                 lock_vars[0].data = ns;
246                 lock_vars[0].read_fptr = lprocfs_rd_lru_size;
247                 lock_vars[0].write_fptr = lprocfs_wr_lru_size;
248                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
249
250                 snprintf(lock_name, MAX_STRING_SIZE, "%s/shrink_thumb",
251                          ns->ns_name);
252                 lock_vars[0].data = ns;
253                 lock_vars[0].read_fptr = lprocfs_rd_uint;
254                 lock_vars[0].write_fptr = lprocfs_wr_uint;
255                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
256
257                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_max_age",
258                          ns->ns_name);
259                 lock_vars[0].data = &ns->ns_max_age;
260                 lock_vars[0].read_fptr = lprocfs_rd_uint;
261                 lock_vars[0].write_fptr = lprocfs_wr_uint;
262                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
263         } else {
264                 snprintf(lock_name, MAX_STRING_SIZE, "%s/ctime_age_limit",
265                          ns->ns_name);
266                 lock_vars[0].data = &ns->ns_ctime_age_limit;
267                 lock_vars[0].read_fptr = lprocfs_rd_uint;
268                 lock_vars[0].write_fptr = lprocfs_wr_uint;
269                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
270
271                 snprintf(lock_name, MAX_STRING_SIZE, "%s/max_nolock_bytes",
272                          ns->ns_name);
273                 lock_vars[0].data = &ns->ns_max_nolock_size;
274                 lock_vars[0].read_fptr = lprocfs_rd_uint;
275                 lock_vars[0].write_fptr = lprocfs_wr_uint;
276                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
277
278                 snprintf(lock_name, MAX_STRING_SIZE, "%s/contention_seconds",
279                          ns->ns_name);
280                 lock_vars[0].data = &ns->ns_contention_time;
281                 lock_vars[0].read_fptr = lprocfs_rd_uint;
282                 lock_vars[0].write_fptr = lprocfs_wr_uint;
283                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
284
285                 snprintf(lock_name, MAX_STRING_SIZE, "%s/contended_locks",
286                          ns->ns_name);
287                 lock_vars[0].data = &ns->ns_contended_locks;
288                 lock_vars[0].read_fptr = lprocfs_rd_uint;
289                 lock_vars[0].write_fptr = lprocfs_wr_uint;
290                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
291         }
292 }
293 #undef MAX_STRING_SIZE
294 #else
295 #define ldlm_proc_namespace(ns) do {} while (0)
296 #endif /* LPROCFS */
297
298 struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client, 
299                                           ldlm_appetite_t apt)
300 {
301         struct ldlm_namespace *ns = NULL;
302         struct list_head *bucket;
303         int rc, idx, namelen;
304         ENTRY;
305
306         rc = ldlm_get_ref();
307         if (rc) {
308                 CERROR("ldlm_get_ref failed: %d\n", rc);
309                 RETURN(NULL);
310         }
311
312         OBD_ALLOC_PTR(ns);
313         if (!ns)
314                 GOTO(out_ref, NULL);
315
316         OBD_VMALLOC(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
317         if (!ns->ns_hash)
318                 GOTO(out_ns, NULL);
319
320         ns->ns_shrink_thumb = LDLM_LOCK_SHRINK_THUMB;
321         ns->ns_appetite = apt;
322         namelen = strlen(name);
323         OBD_ALLOC(ns->ns_name, namelen + 1);
324         if (!ns->ns_name)
325                 GOTO(out_hash, NULL);
326
327         strcpy(ns->ns_name, name);
328
329         CFS_INIT_LIST_HEAD(&ns->ns_root_list);
330         CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
331         ns->ns_refcount = 0;
332         ns->ns_client = client;
333         spin_lock_init(&ns->ns_hash_lock);
334         atomic_set(&ns->ns_locks, 0);
335         ns->ns_resources = 0;
336         cfs_waitq_init(&ns->ns_waitq);
337         ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
338         ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
339         ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
340
341         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
342              bucket--)
343                 CFS_INIT_LIST_HEAD(bucket);
344
345         CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
346         ns->ns_nr_unused = 0;
347         ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
348         ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
349         ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
350         spin_lock_init(&ns->ns_unused_lock);
351         ns->ns_orig_connect_flags = 0;
352         ns->ns_connect_flags = 0;
353         ldlm_proc_namespace(ns);
354
355         idx = atomic_read(ldlm_namespace_nr(client));
356         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
357         if (rc) {
358                 CERROR("Can't initialize lock pool, rc %d\n", rc);
359                 GOTO(out_proc, rc);
360         }
361
362         ldlm_namespace_register(ns, client);
363         RETURN(ns);
364 out_proc:
365         ldlm_namespace_cleanup(ns, 0);
366         OBD_FREE(ns->ns_name, namelen + 1);
367 out_hash:
368         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
369 out_ns:
370         OBD_FREE_PTR(ns);
371 out_ref:
372         ldlm_put_ref();
373         RETURN(NULL);
374 }
375
376 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
377
378 /* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup.
379  * This is currently only used for recovery, and we make certain assumptions
380  * as a result--notably, that we shouldn't cancel locks with refs. -phil
381  *
382  * Called with the ns_lock held. */
383 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
384                              int flags)
385 {
386         struct list_head *tmp;
387         int rc = 0, client = ns_is_client(res->lr_namespace);
388         int local_only = (flags & LDLM_FL_LOCAL_ONLY);
389         ENTRY;
390
391
392         do {
393                 struct ldlm_lock *lock = NULL;
394
395                 /* first, we look for non-cleaned-yet lock
396                  * all cleaned locks are marked by CLEANED flag */
397                 lock_res(res);
398                 list_for_each(tmp, q) {
399                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
400                         if (lock->l_flags & LDLM_FL_CLEANED) {
401                                 lock = NULL;
402                                 continue;
403                         }
404                         LDLM_LOCK_GET(lock);
405                         lock->l_flags |= LDLM_FL_CLEANED;
406                         break;
407                 }
408
409                 if (lock == NULL) {
410                         unlock_res(res);
411                         break;
412                 }
413
414                 /* Set CBPENDING so nothing in the cancellation path
415                  * can match this lock */
416                 lock->l_flags |= LDLM_FL_CBPENDING;
417                 lock->l_flags |= LDLM_FL_FAILED;
418                 lock->l_flags |= flags;
419
420                 /* ... without sending a CANCEL message for local_only. */
421                 if (local_only)
422                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
423
424                 if (local_only && (lock->l_readers || lock->l_writers)) {
425                         /* This is a little bit gross, but much better than the
426                          * alternative: pretend that we got a blocking AST from
427                          * the server, so that when the lock is decref'd, it
428                          * will go away ... */
429                         unlock_res(res);
430                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
431                         if (lock->l_completion_ast)
432                                 lock->l_completion_ast(lock, 0, NULL);
433                         LDLM_LOCK_PUT(lock);
434                         continue;
435                 }
436
437                 if (client) {
438                         struct lustre_handle lockh;
439
440                         unlock_res(res);
441                         ldlm_lock2handle(lock, &lockh);
442                         rc = ldlm_cli_cancel(&lockh);
443                         if (rc)
444                                 CERROR("ldlm_cli_cancel: %d\n", rc);
445                 } else {
446                         ldlm_resource_unlink_lock(lock);
447                         unlock_res(res);
448                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
449                                    "client node");
450                         ldlm_lock_destroy(lock);
451                 }
452                 LDLM_LOCK_PUT(lock);
453         } while (1);
454
455         EXIT;
456 }
457
458 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
459 {
460         struct list_head *tmp;
461         int i;
462
463         if (ns == NULL) {
464                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
465                 return ELDLM_OK;
466         }
467
468         for (i = 0; i < RES_HASH_SIZE; i++) {
469                 spin_lock(&ns->ns_hash_lock);
470                 tmp = ns->ns_hash[i].next;
471                 while (tmp != &(ns->ns_hash[i])) {
472                         struct ldlm_resource *res;
473                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
474                         ldlm_resource_getref(res);
475                         spin_unlock(&ns->ns_hash_lock);
476
477                         cleanup_resource(res, &res->lr_granted, flags);
478                         cleanup_resource(res, &res->lr_converting, flags);
479                         cleanup_resource(res, &res->lr_waiting, flags);
480
481                         spin_lock(&ns->ns_hash_lock);
482                         tmp  = tmp->next;
483
484                         /* XXX: former stuff caused issues in case of race
485                          * between ldlm_namespace_cleanup() and lockd() when
486                          * client gets blocking ast when lock gets distracted by
487                          * server. This is 1_4 branch solution, let's see how
488                          * will it behave. */
489                         if (!ldlm_resource_putref_locked(res))
490                                 CDEBUG(D_INFO,
491                                        "Namespace %s resource refcount nonzero "
492                                        "(%d) after lock cleanup; forcing cleanup.\n",
493                                        ns->ns_name, atomic_read(&res->lr_refcount));
494                 }
495                 spin_unlock(&ns->ns_hash_lock);
496         }
497
498         return ELDLM_OK;
499 }
500
501 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
502 {
503         ENTRY;
504
505         /* At shutdown time, don't call the cancellation callback */
506         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
507
508         if (ns->ns_refcount > 0) {
509                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
510                 int rc;
511                 CDEBUG(D_DLMTRACE,
512                        "dlm namespace %s free waiting on refcount %d\n",
513                        ns->ns_name, ns->ns_refcount);
514 force_wait:
515                 if (force)
516                         lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
517
518                 rc = l_wait_event(ns->ns_waitq,
519                                   ns->ns_refcount == 0, &lwi);
520
521                 /* Forced cleanups should be able to reclaim all references,
522                  * so it's safe to wait forever... we can't leak locks... */
523                 if (force && rc == -ETIMEDOUT) {
524                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
525                                        "namespace with %d resources in use, "
526                                        "(rc=%d)\n", ns->ns_name,
527                                        ns->ns_refcount, rc);
528                         GOTO(force_wait, rc);
529                 }
530
531                 if (ns->ns_refcount) {
532                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
533                                        "with %d resources in use, (rc=%d)\n",
534                                        ns->ns_name,
535                                        ns->ns_refcount, rc);
536                         RETURN(ELDLM_NAMESPACE_EXISTS);
537                 }
538                 CDEBUG(D_DLMTRACE,
539                        "dlm namespace %s free done waiting\n", ns->ns_name);
540         }
541
542         RETURN(ELDLM_OK);
543 }
544
545 void ldlm_namespace_free_prior(struct ldlm_namespace *ns, 
546                                struct obd_import *imp, 
547                                int force)
548 {
549         int rc;
550         ENTRY;
551         if (!ns) {
552                 EXIT;
553                 return;
554         }
555
556         /* Remove @ns from list. */
557         ldlm_namespace_unregister(ns, ns->ns_client);
558
559         /* Can fail with -EINTR when force == 0 in which case try harder */
560         rc = __ldlm_namespace_free(ns, force);
561         if (rc != ELDLM_OK) {
562                 if (imp) {
563                         ptlrpc_disconnect_import(imp, 0);
564                         ptlrpc_invalidate_import(imp);
565                 }
566
567                 /* With all requests dropped and the import inactive
568                  * we are gaurenteed all reference will be dropped. */
569                 rc = __ldlm_namespace_free(ns, 1);
570                 LASSERT(rc == 0);
571         }
572         EXIT;
573 }
574
575 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
576 {
577         ENTRY;
578         if (!ns) {
579                 EXIT;
580                 return;
581         }
582
583         /* 
584          * Fini pool _before_ parent proc dir is removed. This is important as
585          * ldlm_pool_fini() removes own proc dir which is child to @dir. Removing
586          * it after @dir may cause oops.
587          */
588         ldlm_pool_fini(&ns->ns_pool);
589 #ifdef LPROCFS
590         {
591                 struct proc_dir_entry *dir;
592                 dir = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
593                 if (dir == NULL) {
594                         CERROR("dlm namespace %s has no procfs dir?\n",
595                                ns->ns_name);
596                 } else {
597                         lprocfs_remove(&dir);
598                 }
599         }
600 #endif
601
602         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
603         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
604         /* 
605          * @ns should be not on list in this time, otherwise this will cause
606          * issues realted to using freed @ns in pools thread. 
607          */
608         LASSERT(list_empty(&ns->ns_list_chain));
609         OBD_FREE_PTR(ns);
610         ldlm_put_ref();
611         EXIT;
612 }
613
614
615 /* Cleanup the resource, and free namespace.
616  * bug 12864:
617  * Deadlock issue:
618  * proc1: destroy import
619  *        class_disconnect_export(grab cl_sem) ->
620  *              -> ldlm_namespace_free ->
621  *              -> lprocfs_remove(grab _lprocfs_lock).
622  * proc2: read proc info
623  *        lprocfs_fops_read(grab _lprocfs_lock) ->
624  *              -> osc_rd_active, etc(grab cl_sem).
625  *
626  * So that I have to split the ldlm_namespace_free into two parts - the first
627  * part ldlm_namespace_free_prior is used to cleanup the resource which is
628  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
629  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
630  * held.
631  */
632 void ldlm_namespace_free(struct ldlm_namespace *ns, 
633                          struct obd_import *imp,
634                          int force)
635 {
636         ldlm_namespace_free_prior(ns, imp, force);
637         ldlm_namespace_free_post(ns);
638 }
639
640
641 void ldlm_namespace_get_locked(struct ldlm_namespace *ns)
642 {
643         LASSERT(ns->ns_refcount >= 0);
644         ns->ns_refcount++;
645 }
646
647 void ldlm_namespace_get(struct ldlm_namespace *ns)
648 {
649         spin_lock(&ns->ns_hash_lock);
650         ldlm_namespace_get_locked(ns);
651         spin_unlock(&ns->ns_hash_lock);
652 }
653
654 void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup)
655 {
656         LASSERT(ns->ns_refcount > 0);
657         ns->ns_refcount--;
658         if (ns->ns_refcount == 0 && wakeup)
659                 wake_up(&ns->ns_waitq);
660 }
661
662 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
663 {
664         spin_lock(&ns->ns_hash_lock);
665         ldlm_namespace_put_locked(ns, wakeup);
666         spin_unlock(&ns->ns_hash_lock);
667 }
668
669 /* Register @ns in the list of namespaces */
670 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
671 {
672         mutex_down(ldlm_namespace_lock(client));
673         LASSERT(list_empty(&ns->ns_list_chain));
674         list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
675         atomic_inc(ldlm_namespace_nr(client));
676         mutex_up(ldlm_namespace_lock(client));
677 }
678
679 /* Unregister @ns from the list of namespaces */
680 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
681 {
682         mutex_down(ldlm_namespace_lock(client));
683         LASSERT(!list_empty(&ns->ns_list_chain));
684         /*
685          * Some asserts and possibly other parts of code still using 
686          * list_empty(&ns->ns_list_chain). This is why it is important
687          * to use list_del_init() here.
688          */
689         list_del_init(&ns->ns_list_chain);
690         atomic_dec(ldlm_namespace_nr(client));
691         mutex_up(ldlm_namespace_lock(client));
692 }
693
694 /* Should be called under ldlm_namespace_lock(client) taken */
695 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
696 {
697         LASSERT(!list_empty(&ns->ns_list_chain));
698         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
699         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
700 }
701
702 /* Should be called under ldlm_namespace_lock(client) taken */
703 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
704 {
705         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
706         LASSERT(!list_empty(ldlm_namespace_list(client)));
707         return container_of(ldlm_namespace_list(client)->next, 
708                 struct ldlm_namespace, ns_list_chain);
709 }
710 static __u32 ldlm_hash_fn(struct ldlm_resource *parent,
711                           const struct ldlm_res_id *name)
712 {
713         __u32 hash = 0;
714         int i;
715
716         for (i = 0; i < RES_NAME_SIZE; i++)
717                 hash += name->name[i];
718
719         hash += (__u32)((unsigned long)parent >> 4);
720
721         return (hash & RES_HASH_MASK);
722 }
723
724 static struct ldlm_resource *ldlm_resource_new(void)
725 {
726         struct ldlm_resource *res;
727         int idx;
728
729         OBD_SLAB_ALLOC(res, ldlm_resource_slab, CFS_ALLOC_IO, sizeof *res);
730         if (res == NULL)
731                 return NULL;
732
733         memset(res, 0, sizeof(*res));
734
735         CFS_INIT_LIST_HEAD(&res->lr_children);
736         CFS_INIT_LIST_HEAD(&res->lr_childof);
737         CFS_INIT_LIST_HEAD(&res->lr_granted);
738         CFS_INIT_LIST_HEAD(&res->lr_converting);
739         CFS_INIT_LIST_HEAD(&res->lr_waiting);
740
741         /* initialize interval trees for each lock mode*/
742         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
743                 res->lr_itree[idx].lit_size = 0;
744                 res->lr_itree[idx].lit_mode = 1 << idx;
745                 res->lr_itree[idx].lit_root = NULL;
746         }
747
748         atomic_set(&res->lr_refcount, 1);
749         spin_lock_init(&res->lr_lock);
750
751         /* one who creates the resource must unlock
752          * the semaphore after lvb initialization */
753         init_MUTEX_LOCKED(&res->lr_lvb_sem);
754
755         return res;
756 }
757
758 /* must be called with hash lock held */
759 static struct ldlm_resource *
760 ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
761                    __u32 hash)
762 {
763         struct list_head *bucket, *tmp;
764         struct ldlm_resource *res;
765
766         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
767         bucket = ns->ns_hash + hash;
768
769         list_for_each(tmp, bucket) {
770                 res = list_entry(tmp, struct ldlm_resource, lr_hash);
771                 if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
772                         return res;
773         }
774
775         return NULL;
776 }
777
778 /* Args: locked namespace
779  * Returns: newly-allocated, referenced, unlocked resource */
780 static struct ldlm_resource *
781 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
782                   const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
783 {
784         struct list_head *bucket;
785         struct ldlm_resource *res, *old_res;
786         ENTRY;
787
788         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
789                  "type: %d\n", type);
790
791         res = ldlm_resource_new();
792         if (!res)
793                 RETURN(NULL);
794
795         res->lr_name = *name;
796         res->lr_namespace = ns;
797         res->lr_type = type;
798         res->lr_most_restr = LCK_NL;
799
800         spin_lock(&ns->ns_hash_lock);
801         old_res = ldlm_resource_find(ns, name, hash);
802         if (old_res) {
803                 /* someone won the race and added the resource before */
804                 ldlm_resource_getref(old_res);
805                 spin_unlock(&ns->ns_hash_lock);
806                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
807                 /* synchronize WRT resource creation */
808                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
809                         down(&old_res->lr_lvb_sem);
810                         up(&old_res->lr_lvb_sem);
811                 }
812                 RETURN(old_res);
813         }
814
815         /* we won! let's add the resource */
816         bucket = ns->ns_hash + hash;
817         list_add(&res->lr_hash, bucket);
818         ns->ns_resources++;
819         ldlm_namespace_get_locked(ns);
820
821         if (parent == NULL) {
822                 list_add(&res->lr_childof, &ns->ns_root_list);
823         } else {
824                 res->lr_parent = parent;
825                 list_add(&res->lr_childof, &parent->lr_children);
826         }
827         spin_unlock(&ns->ns_hash_lock);
828
829         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
830                 int rc;
831
832                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
833                 rc = ns->ns_lvbo->lvbo_init(res);
834                 if (rc)
835                         CERROR("lvbo_init failed for resource "
836                                LPU64": rc %d\n", name->name[0], rc);
837                 /* we create resource with locked lr_lvb_sem */
838                 up(&res->lr_lvb_sem);
839         }
840
841         RETURN(res);
842 }
843
844 /* Args: unlocked namespace
845  * Locks: takes and releases ns->ns_lock and res->lr_lock
846  * Returns: referenced, unlocked ldlm_resource or NULL */
847 struct ldlm_resource *
848 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
849                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
850 {
851         __u32 hash = ldlm_hash_fn(parent, name);
852         struct ldlm_resource *res = NULL;
853         ENTRY;
854
855         LASSERT(ns != NULL);
856         LASSERT(ns->ns_hash != NULL);
857         LASSERT(name->name[0] != 0);
858
859         spin_lock(&ns->ns_hash_lock);
860         res = ldlm_resource_find(ns, name, hash);
861         if (res) {
862                 ldlm_resource_getref(res);
863                 spin_unlock(&ns->ns_hash_lock);
864                 /* synchronize WRT resource creation */
865                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
866                         down(&res->lr_lvb_sem);
867                         up(&res->lr_lvb_sem);
868                 }
869                 RETURN(res);
870         }
871         spin_unlock(&ns->ns_hash_lock);
872
873         if (create == 0)
874                 RETURN(NULL);
875
876         res = ldlm_resource_add(ns, parent, name, hash, type);
877         RETURN(res);
878 }
879
880 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
881 {
882         LASSERT(res != NULL);
883         LASSERT(res != LP_POISON);
884         atomic_inc(&res->lr_refcount);
885         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
886                atomic_read(&res->lr_refcount));
887         return res;
888 }
889
890 void __ldlm_resource_putref_final(struct ldlm_resource *res)
891 {
892         struct ldlm_namespace *ns = res->lr_namespace;
893
894         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
895
896         if (!list_empty(&res->lr_granted)) {
897                 ldlm_resource_dump(D_ERROR, res);
898                 LBUG();
899         }
900
901         if (!list_empty(&res->lr_converting)) {
902                 ldlm_resource_dump(D_ERROR, res);
903                 LBUG();
904         }
905
906         if (!list_empty(&res->lr_waiting)) {
907                 ldlm_resource_dump(D_ERROR, res);
908                 LBUG();
909         }
910
911         if (!list_empty(&res->lr_children)) {
912                 ldlm_resource_dump(D_ERROR, res);
913                 LBUG();
914         }
915
916         /* Pass 0 here to not wake ->ns_waitq up yet, we will do it few 
917          * lines below when all children are freed. */
918         ldlm_namespace_put_locked(ns, 0);
919         list_del_init(&res->lr_hash);
920         list_del_init(&res->lr_childof);
921
922         ns->ns_resources--;
923         if (ns->ns_resources == 0)
924                 wake_up(&ns->ns_waitq);
925 }
926
927 /* Returns 1 if the resource was freed, 0 if it remains. */
928 int ldlm_resource_putref(struct ldlm_resource *res)
929 {
930         struct ldlm_namespace *ns = res->lr_namespace;
931         int rc = 0;
932         ENTRY;
933
934         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
935                atomic_read(&res->lr_refcount) - 1);
936         LASSERTF(atomic_read(&res->lr_refcount) > 0, "%d",
937                  atomic_read(&res->lr_refcount));
938         LASSERTF(atomic_read(&res->lr_refcount) < LI_POISON, "%d",
939                  atomic_read(&res->lr_refcount));
940
941         if (atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
942                 __ldlm_resource_putref_final(res);
943                 spin_unlock(&ns->ns_hash_lock);
944                 if (res->lr_lvb_data)
945                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
946                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
947                 rc = 1;
948         }
949
950         RETURN(rc);
951 }
952
953 /* Returns 1 if the resource was freed, 0 if it remains. */
954 int ldlm_resource_putref_locked(struct ldlm_resource *res)
955 {
956         int rc = 0;
957         ENTRY;
958
959         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
960                atomic_read(&res->lr_refcount) - 1);
961         LASSERT(atomic_read(&res->lr_refcount) > 0);
962         LASSERT(atomic_read(&res->lr_refcount) < LI_POISON);
963
964         LASSERT(atomic_read(&res->lr_refcount) >= 0);
965         if (atomic_dec_and_test(&res->lr_refcount)) {
966                 __ldlm_resource_putref_final(res);
967                 if (res->lr_lvb_data)
968                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
969                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
970                 rc = 1;
971         }
972
973         RETURN(rc);
974 }
975
976 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
977                             struct ldlm_lock *lock)
978 {
979         check_res_locked(res);
980
981         ldlm_resource_dump(D_OTHER, res);
982         CDEBUG(D_OTHER, "About to add this lock:\n");
983         ldlm_lock_dump(D_OTHER, lock, 0);
984
985         if (lock->l_destroyed) {
986                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
987                 return;
988         }
989
990         LASSERT(list_empty(&lock->l_res_link));
991
992         list_add_tail(&lock->l_res_link, head);
993 }
994
995 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
996                                      struct ldlm_lock *new)
997 {
998         struct ldlm_resource *res = original->l_resource;
999
1000         check_res_locked(res);
1001
1002         ldlm_resource_dump(D_OTHER, res);
1003         CDEBUG(D_OTHER, "About to insert this lock after %p:\n", original);
1004         ldlm_lock_dump(D_OTHER, new, 0);
1005
1006         if (new->l_destroyed) {
1007                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1008                 goto out;
1009         }
1010
1011         LASSERT(list_empty(&new->l_res_link));
1012
1013         list_add(&new->l_res_link, &original->l_res_link);
1014  out:;
1015 }
1016
1017 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1018 {
1019         int type = lock->l_resource->lr_type;
1020
1021         check_res_locked(lock->l_resource);
1022         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1023                 ldlm_unlink_lock_skiplist(lock);
1024         else if (type == LDLM_EXTENT)
1025                 ldlm_extent_unlink_lock(lock);
1026         list_del_init(&lock->l_res_link);
1027 }
1028
1029 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1030 {
1031         desc->lr_type = res->lr_type;
1032         desc->lr_name = res->lr_name;
1033 }
1034
1035 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1036 {
1037         struct list_head *tmp;
1038
1039         if (!((libcfs_debug | D_ERROR) & level))
1040                 return;
1041
1042         mutex_down(ldlm_namespace_lock(client));
1043
1044         list_for_each(tmp, ldlm_namespace_list(client)) {
1045                 struct ldlm_namespace *ns;
1046                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1047                 ldlm_namespace_dump(level, ns);
1048         }
1049
1050         mutex_up(ldlm_namespace_lock(client));
1051 }
1052
1053 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1054 {
1055         struct list_head *tmp;
1056
1057         if (!((libcfs_debug | D_ERROR) & level))
1058                 return;
1059
1060         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n", 
1061                ns->ns_name, ns->ns_refcount, 
1062                ns_is_client(ns) ? "client" : "server");
1063
1064         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1065                 return;
1066
1067         spin_lock(&ns->ns_hash_lock);
1068         tmp = ns->ns_root_list.next;
1069         while (tmp != &ns->ns_root_list) {
1070                 struct ldlm_resource *res;
1071                 res = list_entry(tmp, struct ldlm_resource, lr_childof);
1072
1073                 ldlm_resource_getref(res);
1074                 spin_unlock(&ns->ns_hash_lock);
1075
1076                 lock_res(res);
1077                 ldlm_resource_dump(level, res);
1078                 unlock_res(res);
1079
1080                 spin_lock(&ns->ns_hash_lock);
1081                 tmp = tmp->next;
1082                 ldlm_resource_putref_locked(res);
1083         }
1084         ns->ns_next_dump = cfs_time_shift(10);
1085         spin_unlock(&ns->ns_hash_lock);
1086 }
1087
1088 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1089 {
1090         struct list_head *tmp;
1091         int pos;
1092
1093         CLASSERT(RES_NAME_SIZE == 4);
1094
1095         if (!((libcfs_debug | D_ERROR) & level))
1096                 return;
1097
1098         CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
1099                ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
1100                res->lr_name.name[2], res->lr_name.name[3],
1101                atomic_read(&res->lr_refcount));
1102
1103         if (!list_empty(&res->lr_granted)) {
1104                 pos = 0;
1105                 CDEBUG(level, "Granted locks:\n");
1106                 list_for_each(tmp, &res->lr_granted) {
1107                         struct ldlm_lock *lock;
1108                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1109                         ldlm_lock_dump(level, lock, ++pos);
1110                 }
1111         }
1112         if (!list_empty(&res->lr_converting)) {
1113                 pos = 0;
1114                 CDEBUG(level, "Converting locks:\n");
1115                 list_for_each(tmp, &res->lr_converting) {
1116                         struct ldlm_lock *lock;
1117                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1118                         ldlm_lock_dump(level, lock, ++pos);
1119                 }
1120         }
1121         if (!list_empty(&res->lr_waiting)) {
1122                 pos = 0;
1123                 CDEBUG(level, "Waiting locks:\n");
1124                 list_for_each(tmp, &res->lr_waiting) {
1125                         struct ldlm_lock *lock;
1126                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1127                         ldlm_lock_dump(level, lock, ++pos);
1128                 }
1129         }
1130 }