Whamcloud - gitweb
use special macro for print time_t, cleanup in includes.
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *   Author: Peter Braam <braam@clusterfs.com>
7  *
8  *   This file is part of the Lustre file system, http://www.lustre.org
9  *   Lustre is a trademark of Cluster File Systems, Inc.
10  *
11  *   You may have signed or agreed to another license before downloading
12  *   this software.  If so, you are bound by the terms and conditions
13  *   of that agreement, and the following does not apply to you.  See the
14  *   LICENSE file included with this distribution for more information.
15  *
16  *   If you did not agree to a different license, then this copy of Lustre
17  *   is open source software; you can redistribute it and/or modify it
18  *   under the terms of version 2 of the GNU General Public License as
19  *   published by the Free Software Foundation.
20  *
21  *   In either case, Lustre is distributed in the hope that it will be
22  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  *   license text for more details.
25  */
26
27 #define DEBUG_SUBSYSTEM S_LDLM
28 #ifdef __KERNEL__
29 # include <lustre_dlm.h>
30 #else
31 # include <liblustre.h>
32 #endif
33
34 #include <obd_class.h>
35 #include "ldlm_internal.h"
36
37 cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
38
39 atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
40 atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
41
42 struct semaphore ldlm_srv_namespace_lock;
43 CFS_LIST_HEAD(ldlm_srv_namespace_list);
44
45 struct semaphore ldlm_cli_namespace_lock;
46 CFS_LIST_HEAD(ldlm_cli_namespace_list);
47
48 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
49 cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
50 cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
51
52 #ifdef LPROCFS
53 static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
54                              unsigned long count, void *data)
55 {
56         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
57         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
58         RETURN(count);
59 }
60
61 int ldlm_proc_setup(void)
62 {
63         int rc;
64         struct lprocfs_vars list[] = {
65                 { "dump_namespaces", NULL, ldlm_proc_dump_ns, NULL },
66                 { NULL }};
67         ENTRY;
68         LASSERT(ldlm_ns_proc_dir == NULL);
69
70         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
71                                               proc_lustre_root,
72                                               NULL, NULL);
73         if (IS_ERR(ldlm_type_proc_dir)) {
74                 CERROR("LProcFS failed in ldlm-init\n");
75                 rc = PTR_ERR(ldlm_type_proc_dir);
76                 GOTO(err, rc);
77         }
78
79         ldlm_ns_proc_dir = lprocfs_register("namespaces",
80                                             ldlm_type_proc_dir,
81                                             NULL, NULL);
82         if (IS_ERR(ldlm_ns_proc_dir)) {
83                 CERROR("LProcFS failed in ldlm-init\n");
84                 rc = PTR_ERR(ldlm_ns_proc_dir);
85                 GOTO(err_type, rc);
86         }
87
88         ldlm_svc_proc_dir = lprocfs_register("services",
89                                             ldlm_type_proc_dir,
90                                             NULL, NULL);
91         if (IS_ERR(ldlm_svc_proc_dir)) {
92                 CERROR("LProcFS failed in ldlm-init\n");
93                 rc = PTR_ERR(ldlm_svc_proc_dir);
94                 GOTO(err_ns, rc);
95         }
96
97         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
98
99         RETURN(0);
100
101 err_ns:
102         lprocfs_remove(&ldlm_ns_proc_dir);
103 err_type:
104         lprocfs_remove(&ldlm_type_proc_dir);
105 err:
106         ldlm_svc_proc_dir = NULL;
107         RETURN(rc);
108 }
109
110 void ldlm_proc_cleanup(void)
111 {
112         if (ldlm_svc_proc_dir)
113                 lprocfs_remove(&ldlm_svc_proc_dir);
114
115         if (ldlm_ns_proc_dir)
116                 lprocfs_remove(&ldlm_ns_proc_dir);
117
118         if (ldlm_type_proc_dir)
119                 lprocfs_remove(&ldlm_type_proc_dir);
120 }
121
122 static int lprocfs_rd_lru_size(char *page, char **start, off_t off,
123                                int count, int *eof, void *data)
124 {
125         struct ldlm_namespace *ns = data;
126         __u32 *nr = &ns->ns_max_unused;
127
128         if (ns_connect_lru_resize(ns))
129                 nr = &ns->ns_nr_unused;
130         return lprocfs_rd_uint(page, start, off, count, eof, nr);
131 }
132
133 static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
134                                unsigned long count, void *data)
135 {
136         struct ldlm_namespace *ns = data;
137         char dummy[MAX_STRING_SIZE + 1], *end;
138         unsigned long tmp;
139         int lru_resize;
140
141         dummy[MAX_STRING_SIZE] = '\0';
142         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
143                 return -EFAULT;
144
145         if (count == 6 && memcmp(dummy, "clear", 5) == 0) {
146                 CDEBUG(D_DLMTRACE,
147                        "dropping all unused locks from namespace %s\n",
148                        ns->ns_name);
149                 if (ns_connect_lru_resize(ns)) {
150                         int canceled, unused  = ns->ns_nr_unused;
151                         
152                         /* Try to cancel all @ns_nr_unused locks. */
153                         canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC, 
154                                                    LDLM_CANCEL_PASSED);
155                         if (canceled < unused) {
156                                 CERROR("not all requested locks are canceled, "
157                                        "requested: %d, canceled: %d\n", unused, 
158                                        canceled);
159                                 return -EINVAL;
160                         }
161                 } else {
162                         tmp = ns->ns_max_unused;
163                         ns->ns_max_unused = 0;
164                         ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
165                         ns->ns_max_unused = tmp;
166                 }
167                 return count;
168         }
169
170         tmp = simple_strtoul(dummy, &end, 0);
171         if (dummy == end) {
172                 CERROR("invalid value written\n");
173                 return -EINVAL;
174         }
175         lru_resize = (tmp == 0);
176         
177         if (ns_connect_lru_resize(ns)) {
178                 if (!lru_resize)
179                         ns->ns_max_unused = (unsigned int)tmp;
180                         
181                 if (tmp > ns->ns_nr_unused)
182                         tmp = ns->ns_nr_unused;
183                 tmp = ns->ns_nr_unused - tmp;
184                 
185                 CDEBUG(D_DLMTRACE, "changing namespace %s unused locks from %u to %u\n", 
186                        ns->ns_name, ns->ns_nr_unused, (unsigned int)tmp);
187                 ldlm_cancel_lru(ns, (unsigned int)tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
188                 
189                 if (!lru_resize) {
190                         CDEBUG(D_DLMTRACE, "disable lru_resize for namespace %s\n", 
191                                ns->ns_name);
192                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
193                 }
194         } else {
195                 CDEBUG(D_DLMTRACE, "changing namespace %s max_unused from %u to %u\n",
196                        ns->ns_name, ns->ns_max_unused, (unsigned int)tmp);
197                 ns->ns_max_unused = (unsigned int)tmp;
198                 ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
199                 
200                 /* Make sure that originally lru resize was supported before 
201                  * turning it on here. */
202                 if (lru_resize && 
203                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
204                         CDEBUG(D_DLMTRACE, "enable lru_resize for namespace %s\n", 
205                                ns->ns_name);
206                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
207                 }
208         }
209
210         return count;
211 }
212
213 void ldlm_proc_namespace(struct ldlm_namespace *ns)
214 {
215         struct lprocfs_vars lock_vars[2];
216         char lock_name[MAX_STRING_SIZE + 1];
217
218         LASSERT(ns != NULL);
219         LASSERT(ns->ns_name != NULL);
220
221         lock_name[MAX_STRING_SIZE] = '\0';
222
223         memset(lock_vars, 0, sizeof(lock_vars));
224         lock_vars[0].name = lock_name;
225
226         snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
227         lock_vars[0].data = &ns->ns_refcount;
228         lock_vars[0].read_fptr = lprocfs_rd_atomic;
229         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
230
231         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
232         lock_vars[0].data = &ns->ns_locks;
233         lock_vars[0].read_fptr = lprocfs_rd_atomic;
234         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
235
236         if (ns_is_client(ns)) {
237                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
238                          ns->ns_name);
239                 lock_vars[0].data = &ns->ns_nr_unused;
240                 lock_vars[0].read_fptr = lprocfs_rd_uint;
241                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
242
243                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_size",
244                          ns->ns_name);
245                 lock_vars[0].data = ns;
246                 lock_vars[0].read_fptr = lprocfs_rd_lru_size;
247                 lock_vars[0].write_fptr = lprocfs_wr_lru_size;
248                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
249
250                 snprintf(lock_name, MAX_STRING_SIZE, "%s/shrink_thumb",
251                          ns->ns_name);
252                 lock_vars[0].data = ns;
253                 lock_vars[0].read_fptr = lprocfs_rd_uint;
254                 lock_vars[0].write_fptr = lprocfs_wr_uint;
255                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
256
257                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_max_age",
258                          ns->ns_name);
259                 lock_vars[0].data = &ns->ns_max_age;
260                 lock_vars[0].read_fptr = lprocfs_rd_uint;
261                 lock_vars[0].write_fptr = lprocfs_wr_uint;
262                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
263         } else {
264                 snprintf(lock_name, MAX_STRING_SIZE, "%s/ctime_age_limit",
265                          ns->ns_name);
266                 lock_vars[0].data = &ns->ns_ctime_age_limit;
267                 lock_vars[0].read_fptr = lprocfs_rd_uint;
268                 lock_vars[0].write_fptr = lprocfs_wr_uint;
269                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
270
271                 snprintf(lock_name, MAX_STRING_SIZE, "%s/max_nolock_bytes",
272                          ns->ns_name);
273                 lock_vars[0].data = &ns->ns_max_nolock_size;
274                 lock_vars[0].read_fptr = lprocfs_rd_uint;
275                 lock_vars[0].write_fptr = lprocfs_wr_uint;
276                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
277
278                 snprintf(lock_name, MAX_STRING_SIZE, "%s/contention_seconds",
279                          ns->ns_name);
280                 lock_vars[0].data = &ns->ns_contention_time;
281                 lock_vars[0].read_fptr = lprocfs_rd_uint;
282                 lock_vars[0].write_fptr = lprocfs_wr_uint;
283                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
284
285                 snprintf(lock_name, MAX_STRING_SIZE, "%s/contended_locks",
286                          ns->ns_name);
287                 lock_vars[0].data = &ns->ns_contended_locks;
288                 lock_vars[0].read_fptr = lprocfs_rd_uint;
289                 lock_vars[0].write_fptr = lprocfs_wr_uint;
290                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
291         }
292 }
293 #undef MAX_STRING_SIZE
294 #else
295 #define ldlm_proc_namespace(ns) do {} while (0)
296 #endif /* LPROCFS */
297
298 struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client, 
299                                           ldlm_appetite_t apt)
300 {
301         struct ldlm_namespace *ns = NULL;
302         struct list_head *bucket;
303         int rc, idx, namelen;
304         ENTRY;
305
306         rc = ldlm_get_ref();
307         if (rc) {
308                 CERROR("ldlm_get_ref failed: %d\n", rc);
309                 RETURN(NULL);
310         }
311
312         OBD_ALLOC_PTR(ns);
313         if (!ns)
314                 GOTO(out_ref, NULL);
315
316         OBD_VMALLOC(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
317         if (!ns->ns_hash)
318                 GOTO(out_ns, NULL);
319
320         ns->ns_shrink_thumb = LDLM_LOCK_SHRINK_THUMB;
321         ns->ns_appetite = apt;
322         namelen = strlen(name);
323         OBD_ALLOC(ns->ns_name, namelen + 1);
324         if (!ns->ns_name)
325                 GOTO(out_hash, NULL);
326
327         strcpy(ns->ns_name, name);
328
329         CFS_INIT_LIST_HEAD(&ns->ns_root_list);
330         ns->ns_refcount = 0;
331         ns->ns_client = client;
332         spin_lock_init(&ns->ns_hash_lock);
333         atomic_set(&ns->ns_locks, 0);
334         ns->ns_resources = 0;
335         cfs_waitq_init(&ns->ns_waitq);
336         ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
337         ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
338         ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
339
340         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
341              bucket--)
342                 CFS_INIT_LIST_HEAD(bucket);
343
344         CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
345         ns->ns_nr_unused = 0;
346         ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
347         ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
348         ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
349         spin_lock_init(&ns->ns_unused_lock);
350         ns->ns_orig_connect_flags = 0;
351         ns->ns_connect_flags = 0;
352         ldlm_proc_namespace(ns);
353
354         idx = atomic_read(ldlm_namespace_nr(client));
355         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
356         if (rc) {
357                 CERROR("Can't initialize lock pool, rc %d\n", rc);
358                 GOTO(out_proc, rc);
359         }
360
361         mutex_down(ldlm_namespace_lock(client));
362         list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
363         atomic_inc(ldlm_namespace_nr(client));
364         mutex_up(ldlm_namespace_lock(client));
365
366         RETURN(ns);
367 out_proc:
368         ldlm_namespace_cleanup(ns, 0);
369         OBD_FREE(ns->ns_name, namelen + 1);
370 out_hash:
371         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
372 out_ns:
373         OBD_FREE_PTR(ns);
374 out_ref:
375         ldlm_put_ref(0);
376         RETURN(NULL);
377 }
378
379 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
380
381 /* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup.
382  * This is currently only used for recovery, and we make certain assumptions
383  * as a result--notably, that we shouldn't cancel locks with refs. -phil
384  *
385  * Called with the ns_lock held. */
386 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
387                              int flags)
388 {
389         struct list_head *tmp;
390         int rc = 0, client = ns_is_client(res->lr_namespace);
391         int local_only = (flags & LDLM_FL_LOCAL_ONLY);
392         ENTRY;
393
394
395         do {
396                 struct ldlm_lock *lock = NULL;
397
398                 /* first, we look for non-cleaned-yet lock
399                  * all cleaned locks are marked by CLEANED flag */
400                 lock_res(res);
401                 list_for_each(tmp, q) {
402                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
403                         if (lock->l_flags & LDLM_FL_CLEANED) {
404                                 lock = NULL;
405                                 continue;
406                         }
407                         LDLM_LOCK_GET(lock);
408                         lock->l_flags |= LDLM_FL_CLEANED;
409                         break;
410                 }
411
412                 if (lock == NULL) {
413                         unlock_res(res);
414                         break;
415                 }
416
417                 /* Set CBPENDING so nothing in the cancellation path
418                  * can match this lock */
419                 lock->l_flags |= LDLM_FL_CBPENDING;
420                 lock->l_flags |= LDLM_FL_FAILED;
421                 lock->l_flags |= flags;
422
423                 /* ... without sending a CANCEL message for local_only. */
424                 if (local_only)
425                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
426
427                 if (local_only && (lock->l_readers || lock->l_writers)) {
428                         /* This is a little bit gross, but much better than the
429                          * alternative: pretend that we got a blocking AST from
430                          * the server, so that when the lock is decref'd, it
431                          * will go away ... */
432                         unlock_res(res);
433                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
434                         if (lock->l_completion_ast)
435                                 lock->l_completion_ast(lock, 0, NULL);
436                         LDLM_LOCK_PUT(lock);
437                         continue;
438                 }
439
440                 if (client) {
441                         struct lustre_handle lockh;
442
443                         unlock_res(res);
444                         ldlm_lock2handle(lock, &lockh);
445                         rc = ldlm_cli_cancel(&lockh);
446                         if (rc)
447                                 CERROR("ldlm_cli_cancel: %d\n", rc);
448                 } else {
449                         ldlm_resource_unlink_lock(lock);
450                         unlock_res(res);
451                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
452                                    "client node");
453                         ldlm_lock_destroy(lock);
454                 }
455                 LDLM_LOCK_PUT(lock);
456         } while (1);
457
458         EXIT;
459 }
460
461 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
462 {
463         struct list_head *tmp;
464         int i;
465
466         if (ns == NULL) {
467                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
468                 return ELDLM_OK;
469         }
470
471         for (i = 0; i < RES_HASH_SIZE; i++) {
472                 spin_lock(&ns->ns_hash_lock);
473                 tmp = ns->ns_hash[i].next;
474                 while (tmp != &(ns->ns_hash[i])) {
475                         struct ldlm_resource *res;
476                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
477                         ldlm_resource_getref(res);
478                         spin_unlock(&ns->ns_hash_lock);
479
480                         cleanup_resource(res, &res->lr_granted, flags);
481                         cleanup_resource(res, &res->lr_converting, flags);
482                         cleanup_resource(res, &res->lr_waiting, flags);
483
484                         spin_lock(&ns->ns_hash_lock);
485                         tmp  = tmp->next;
486
487                         /* XXX: former stuff caused issues in case of race
488                          * between ldlm_namespace_cleanup() and lockd() when
489                          * client gets blocking ast when lock gets distracted by
490                          * server. This is 1_4 branch solution, let's see how
491                          * will it behave. */
492                         if (!ldlm_resource_putref_locked(res))
493                                 CDEBUG(D_INFO,
494                                        "Namespace %s resource refcount nonzero "
495                                        "(%d) after lock cleanup; forcing cleanup.\n",
496                                        ns->ns_name, atomic_read(&res->lr_refcount));
497                 }
498                 spin_unlock(&ns->ns_hash_lock);
499         }
500
501         return ELDLM_OK;
502 }
503
504 /* Cleanup, but also free, the namespace */
505 int ldlm_namespace_free_prior(struct ldlm_namespace *ns)
506 {
507         ENTRY;
508         if (!ns)
509                 RETURN(ELDLM_OK);
510
511         mutex_down(ldlm_namespace_lock(ns->ns_client));
512         /*
513          * Some asserts and possibly other parts of code still using 
514          * list_empty(&ns->ns_list_chain). This is why it is important
515          * to use list_del_init() here.
516          */
517         list_del_init(&ns->ns_list_chain);
518         atomic_dec(ldlm_namespace_nr(ns->ns_client));
519         ldlm_pool_fini(&ns->ns_pool);
520         mutex_up(ldlm_namespace_lock(ns->ns_client));
521
522         /* At shutdown time, don't call the cancellation callback */
523         ldlm_namespace_cleanup(ns, 0);
524
525         if (ns->ns_refcount > 0) {
526                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
527                 int rc;
528                 CDEBUG(D_DLMTRACE,
529                        "dlm namespace %s free waiting on refcount %d\n",
530                        ns->ns_name, ns->ns_refcount);
531                 rc = l_wait_event(ns->ns_waitq,
532                                   ns->ns_refcount == 0, &lwi);
533                 if (ns->ns_refcount)
534                         LCONSOLE_ERROR_MSG(0x139, "Lock manager: wait for %s "
535                                            "namespace cleanup aborted with %d "
536                                            "resources in use. (%d)\nI'm going "
537                                            "to try to clean up anyway, but I "
538                                            "might need a reboot of this node.\n",
539                                             ns->ns_name, (int) ns->ns_refcount, 
540                                             rc);
541                 CDEBUG(D_DLMTRACE,
542                        "dlm namespace %s free done waiting\n", ns->ns_name);
543         }
544
545         RETURN(ELDLM_OK);
546 }
547
548 int ldlm_namespace_free_post(struct ldlm_namespace *ns, int force)
549 {
550         ENTRY;
551         if (!ns)
552                 RETURN(ELDLM_OK);
553
554 #ifdef LPROCFS
555         {
556                 struct proc_dir_entry *dir;
557                 dir = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
558                 if (dir == NULL) {
559                         CERROR("dlm namespace %s has no procfs dir?\n",
560                                ns->ns_name);
561                 } else {
562                         lprocfs_remove(&dir);
563                 }
564         }
565 #endif
566
567         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
568         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
569         /* 
570          * @ns should be not on list in this time, otherwise this will cause
571          * issues realted to using freed @ns in pools thread. 
572          */
573         LASSERT(list_empty(&ns->ns_list_chain));
574         OBD_FREE_PTR(ns);
575         ldlm_put_ref(force);
576         RETURN(ELDLM_OK);
577 }
578
579
580 /* Cleanup the resource, and free namespace.
581  * bug 12864:
582  * Deadlock issue:
583  * proc1: destroy import
584  *        class_disconnect_export(grab cl_sem) ->
585  *              -> ldlm_namespace_free ->
586  *              -> lprocfs_remove(grab _lprocfs_lock).
587  * proc2: read proc info
588  *        lprocfs_fops_read(grab _lprocfs_lock) ->
589  *              -> osc_rd_active, etc(grab cl_sem).
590  *
591  * So that I have to split the ldlm_namespace_free into two parts - the first
592  * part ldlm_namespace_free_prior is used to cleanup the resource which is
593  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
594  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
595  * held.
596  */
597 int ldlm_namespace_free(struct ldlm_namespace *ns, int force)
598 {
599         ldlm_namespace_free_prior(ns);
600         ldlm_namespace_free_post(ns, force);
601         return ELDLM_OK;
602 }
603
604
605 void ldlm_namespace_get_nolock(struct ldlm_namespace *ns)
606 {
607         LASSERT(ns->ns_refcount >= 0);
608         ns->ns_refcount++;
609 }
610
611 void ldlm_namespace_get(struct ldlm_namespace *ns)
612 {
613         spin_lock(&ns->ns_hash_lock);
614         ldlm_namespace_get_nolock(ns);
615         spin_unlock(&ns->ns_hash_lock);
616 }
617
618 void ldlm_namespace_put_nolock(struct ldlm_namespace *ns, int wakeup)
619 {
620         LASSERT(ns->ns_refcount > 0);
621         ns->ns_refcount--;
622         if (ns->ns_refcount == 0 && wakeup)
623                 wake_up(&ns->ns_waitq);
624 }
625
626 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
627 {
628         spin_lock(&ns->ns_hash_lock);
629         ldlm_namespace_put_nolock(ns, wakeup);
630         spin_unlock(&ns->ns_hash_lock);
631 }
632
633 /* Should be called under ldlm_namespace_lock(client) taken */
634 void ldlm_namespace_move(struct ldlm_namespace *ns, ldlm_side_t client)
635 {
636         LASSERT(!list_empty(&ns->ns_list_chain));
637         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
638         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
639 }
640
641 /* Should be called under ldlm_namespace_lock(client) taken */
642 struct ldlm_namespace *ldlm_namespace_first(ldlm_side_t client)
643 {
644         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
645         LASSERT(!list_empty(ldlm_namespace_list(client)));
646         return container_of(ldlm_namespace_list(client)->next, 
647                 struct ldlm_namespace, ns_list_chain);
648 }
649 static __u32 ldlm_hash_fn(struct ldlm_resource *parent,
650                           const struct ldlm_res_id *name)
651 {
652         __u32 hash = 0;
653         int i;
654
655         for (i = 0; i < RES_NAME_SIZE; i++)
656                 hash += name->name[i];
657
658         hash += (__u32)((unsigned long)parent >> 4);
659
660         return (hash & RES_HASH_MASK);
661 }
662
663 static struct ldlm_resource *ldlm_resource_new(void)
664 {
665         struct ldlm_resource *res;
666         int idx;
667
668         OBD_SLAB_ALLOC(res, ldlm_resource_slab, CFS_ALLOC_IO, sizeof *res);
669         if (res == NULL)
670                 return NULL;
671
672         memset(res, 0, sizeof(*res));
673
674         CFS_INIT_LIST_HEAD(&res->lr_children);
675         CFS_INIT_LIST_HEAD(&res->lr_childof);
676         CFS_INIT_LIST_HEAD(&res->lr_granted);
677         CFS_INIT_LIST_HEAD(&res->lr_converting);
678         CFS_INIT_LIST_HEAD(&res->lr_waiting);
679
680         /* initialize interval trees for each lock mode*/
681         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
682                 res->lr_itree[idx].lit_size = 0;
683                 res->lr_itree[idx].lit_mode = 1 << idx;
684                 res->lr_itree[idx].lit_root = NULL;
685         }
686
687         atomic_set(&res->lr_refcount, 1);
688         spin_lock_init(&res->lr_lock);
689
690         /* one who creates the resource must unlock
691          * the semaphore after lvb initialization */
692         init_MUTEX_LOCKED(&res->lr_lvb_sem);
693
694         return res;
695 }
696
697 /* must be called with hash lock held */
698 static struct ldlm_resource *
699 ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
700                    __u32 hash)
701 {
702         struct list_head *bucket, *tmp;
703         struct ldlm_resource *res;
704
705         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
706         bucket = ns->ns_hash + hash;
707
708         list_for_each(tmp, bucket) {
709                 res = list_entry(tmp, struct ldlm_resource, lr_hash);
710                 if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
711                         return res;
712         }
713
714         return NULL;
715 }
716
717 /* Args: locked namespace
718  * Returns: newly-allocated, referenced, unlocked resource */
719 static struct ldlm_resource *
720 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
721                   const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
722 {
723         struct list_head *bucket;
724         struct ldlm_resource *res, *old_res;
725         ENTRY;
726
727         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
728                  "type: %d\n", type);
729
730         res = ldlm_resource_new();
731         if (!res)
732                 RETURN(NULL);
733
734         res->lr_name = *name;
735         res->lr_namespace = ns;
736         res->lr_type = type;
737         res->lr_most_restr = LCK_NL;
738
739         spin_lock(&ns->ns_hash_lock);
740         old_res = ldlm_resource_find(ns, name, hash);
741         if (old_res) {
742                 /* someone won the race and added the resource before */
743                 ldlm_resource_getref(old_res);
744                 spin_unlock(&ns->ns_hash_lock);
745                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
746                 /* synchronize WRT resource creation */
747                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
748                         down(&old_res->lr_lvb_sem);
749                         up(&old_res->lr_lvb_sem);
750                 }
751                 RETURN(old_res);
752         }
753
754         /* we won! let's add the resource */
755         bucket = ns->ns_hash + hash;
756         list_add(&res->lr_hash, bucket);
757         ns->ns_resources++;
758         ldlm_namespace_get_nolock(ns);
759
760         if (parent == NULL) {
761                 list_add(&res->lr_childof, &ns->ns_root_list);
762         } else {
763                 res->lr_parent = parent;
764                 list_add(&res->lr_childof, &parent->lr_children);
765         }
766         spin_unlock(&ns->ns_hash_lock);
767
768         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
769                 int rc;
770
771                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
772                 rc = ns->ns_lvbo->lvbo_init(res);
773                 if (rc)
774                         CERROR("lvbo_init failed for resource "
775                                LPU64": rc %d\n", name->name[0], rc);
776                 /* we create resource with locked lr_lvb_sem */
777                 up(&res->lr_lvb_sem);
778         }
779
780         RETURN(res);
781 }
782
783 /* Args: unlocked namespace
784  * Locks: takes and releases ns->ns_lock and res->lr_lock
785  * Returns: referenced, unlocked ldlm_resource or NULL */
786 struct ldlm_resource *
787 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
788                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
789 {
790         __u32 hash = ldlm_hash_fn(parent, name);
791         struct ldlm_resource *res = NULL;
792         ENTRY;
793
794         LASSERT(ns != NULL);
795         LASSERT(ns->ns_hash != NULL);
796         LASSERT(name->name[0] != 0);
797
798         spin_lock(&ns->ns_hash_lock);
799         res = ldlm_resource_find(ns, name, hash);
800         if (res) {
801                 ldlm_resource_getref(res);
802                 spin_unlock(&ns->ns_hash_lock);
803                 /* synchronize WRT resource creation */
804                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
805                         down(&res->lr_lvb_sem);
806                         up(&res->lr_lvb_sem);
807                 }
808                 RETURN(res);
809         }
810         spin_unlock(&ns->ns_hash_lock);
811
812         if (create == 0)
813                 RETURN(NULL);
814
815         res = ldlm_resource_add(ns, parent, name, hash, type);
816         RETURN(res);
817 }
818
819 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
820 {
821         LASSERT(res != NULL);
822         LASSERT(res != LP_POISON);
823         atomic_inc(&res->lr_refcount);
824         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
825                atomic_read(&res->lr_refcount));
826         return res;
827 }
828
829 void __ldlm_resource_putref_final(struct ldlm_resource *res)
830 {
831         struct ldlm_namespace *ns = res->lr_namespace;
832
833         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
834
835         if (!list_empty(&res->lr_granted)) {
836                 ldlm_resource_dump(D_ERROR, res);
837                 LBUG();
838         }
839
840         if (!list_empty(&res->lr_converting)) {
841                 ldlm_resource_dump(D_ERROR, res);
842                 LBUG();
843         }
844
845         if (!list_empty(&res->lr_waiting)) {
846                 ldlm_resource_dump(D_ERROR, res);
847                 LBUG();
848         }
849
850         if (!list_empty(&res->lr_children)) {
851                 ldlm_resource_dump(D_ERROR, res);
852                 LBUG();
853         }
854
855         /* Pass 0 here to not wake ->ns_waitq up yet, we will do it few 
856          * lines below when all children are freed. */
857         ldlm_namespace_put_nolock(ns, 0);
858         list_del_init(&res->lr_hash);
859         list_del_init(&res->lr_childof);
860
861         ns->ns_resources--;
862         if (ns->ns_resources == 0)
863                 wake_up(&ns->ns_waitq);
864 }
865
866 /* Returns 1 if the resource was freed, 0 if it remains. */
867 int ldlm_resource_putref(struct ldlm_resource *res)
868 {
869         struct ldlm_namespace *ns = res->lr_namespace;
870         int rc = 0;
871         ENTRY;
872
873         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
874                atomic_read(&res->lr_refcount) - 1);
875         LASSERTF(atomic_read(&res->lr_refcount) > 0, "%d",
876                  atomic_read(&res->lr_refcount));
877         LASSERTF(atomic_read(&res->lr_refcount) < LI_POISON, "%d",
878                  atomic_read(&res->lr_refcount));
879
880         if (atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
881                 __ldlm_resource_putref_final(res);
882                 spin_unlock(&ns->ns_hash_lock);
883                 if (res->lr_lvb_data)
884                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
885                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
886                 rc = 1;
887         }
888
889         RETURN(rc);
890 }
891
892 /* Returns 1 if the resource was freed, 0 if it remains. */
893 int ldlm_resource_putref_locked(struct ldlm_resource *res)
894 {
895         int rc = 0;
896         ENTRY;
897
898         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
899                atomic_read(&res->lr_refcount) - 1);
900         LASSERT(atomic_read(&res->lr_refcount) > 0);
901         LASSERT(atomic_read(&res->lr_refcount) < LI_POISON);
902
903         LASSERT(atomic_read(&res->lr_refcount) >= 0);
904         if (atomic_dec_and_test(&res->lr_refcount)) {
905                 __ldlm_resource_putref_final(res);
906                 if (res->lr_lvb_data)
907                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
908                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
909                 rc = 1;
910         }
911
912         RETURN(rc);
913 }
914
915 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
916                             struct ldlm_lock *lock)
917 {
918         check_res_locked(res);
919
920         ldlm_resource_dump(D_OTHER, res);
921         CDEBUG(D_OTHER, "About to add this lock:\n");
922         ldlm_lock_dump(D_OTHER, lock, 0);
923
924         if (lock->l_destroyed) {
925                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
926                 return;
927         }
928
929         LASSERT(list_empty(&lock->l_res_link));
930
931         list_add_tail(&lock->l_res_link, head);
932 }
933
934 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
935                                      struct ldlm_lock *new)
936 {
937         struct ldlm_resource *res = original->l_resource;
938
939         check_res_locked(res);
940
941         ldlm_resource_dump(D_OTHER, res);
942         CDEBUG(D_OTHER, "About to insert this lock after %p:\n", original);
943         ldlm_lock_dump(D_OTHER, new, 0);
944
945         if (new->l_destroyed) {
946                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
947                 goto out;
948         }
949
950         LASSERT(list_empty(&new->l_res_link));
951
952         list_add(&new->l_res_link, &original->l_res_link);
953  out:;
954 }
955
956 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
957 {
958         int type = lock->l_resource->lr_type;
959
960         check_res_locked(lock->l_resource);
961         if (type == LDLM_IBITS || type == LDLM_PLAIN)
962                 ldlm_unlink_lock_skiplist(lock);
963         else if (type == LDLM_EXTENT)
964                 ldlm_extent_unlink_lock(lock);
965         list_del_init(&lock->l_res_link);
966 }
967
968 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
969 {
970         desc->lr_type = res->lr_type;
971         desc->lr_name = res->lr_name;
972 }
973
974 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
975 {
976         struct list_head *tmp;
977
978         if (!((libcfs_debug | D_ERROR) & level))
979                 return;
980
981         mutex_down(ldlm_namespace_lock(client));
982
983         list_for_each(tmp, ldlm_namespace_list(client)) {
984                 struct ldlm_namespace *ns;
985                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
986                 ldlm_namespace_dump(level, ns);
987         }
988
989         mutex_up(ldlm_namespace_lock(client));
990 }
991
992 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
993 {
994         struct list_head *tmp;
995
996         if (!((libcfs_debug | D_ERROR) & level))
997                 return;
998
999         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n", 
1000                ns->ns_name, ns->ns_refcount, 
1001                ns_is_client(ns) ? "client" : "server");
1002
1003         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1004                 return;
1005
1006         spin_lock(&ns->ns_hash_lock);
1007         tmp = ns->ns_root_list.next;
1008         while (tmp != &ns->ns_root_list) {
1009                 struct ldlm_resource *res;
1010                 res = list_entry(tmp, struct ldlm_resource, lr_childof);
1011
1012                 ldlm_resource_getref(res);
1013                 spin_unlock(&ns->ns_hash_lock);
1014
1015                 lock_res(res);
1016                 ldlm_resource_dump(level, res);
1017                 unlock_res(res);
1018
1019                 spin_lock(&ns->ns_hash_lock);
1020                 tmp = tmp->next;
1021                 ldlm_resource_putref_locked(res);
1022         }
1023         ns->ns_next_dump = cfs_time_shift(10);
1024         spin_unlock(&ns->ns_hash_lock);
1025 }
1026
1027 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1028 {
1029         struct list_head *tmp;
1030         int pos;
1031
1032         CLASSERT(RES_NAME_SIZE == 4);
1033
1034         if (!((libcfs_debug | D_ERROR) & level))
1035                 return;
1036
1037         CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
1038                ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
1039                res->lr_name.name[2], res->lr_name.name[3],
1040                atomic_read(&res->lr_refcount));
1041
1042         if (!list_empty(&res->lr_granted)) {
1043                 pos = 0;
1044                 CDEBUG(level, "Granted locks:\n");
1045                 list_for_each(tmp, &res->lr_granted) {
1046                         struct ldlm_lock *lock;
1047                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1048                         ldlm_lock_dump(level, lock, ++pos);
1049                 }
1050         }
1051         if (!list_empty(&res->lr_converting)) {
1052                 pos = 0;
1053                 CDEBUG(level, "Converting locks:\n");
1054                 list_for_each(tmp, &res->lr_converting) {
1055                         struct ldlm_lock *lock;
1056                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1057                         ldlm_lock_dump(level, lock, ++pos);
1058                 }
1059         }
1060         if (!list_empty(&res->lr_waiting)) {
1061                 pos = 0;
1062                 CDEBUG(level, "Waiting locks:\n");
1063                 list_for_each(tmp, &res->lr_waiting) {
1064                         struct ldlm_lock *lock;
1065                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1066                         ldlm_lock_dump(level, lock, ++pos);
1067                 }
1068         }
1069 }