Whamcloud - gitweb
b=15440
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *   Author: Peter Braam <braam@clusterfs.com>
7  *
8  *   This file is part of the Lustre file system, http://www.lustre.org
9  *   Lustre is a trademark of Cluster File Systems, Inc.
10  *
11  *   You may have signed or agreed to another license before downloading
12  *   this software.  If so, you are bound by the terms and conditions
13  *   of that agreement, and the following does not apply to you.  See the
14  *   LICENSE file included with this distribution for more information.
15  *
16  *   If you did not agree to a different license, then this copy of Lustre
17  *   is open source software; you can redistribute it and/or modify it
18  *   under the terms of version 2 of the GNU General Public License as
19  *   published by the Free Software Foundation.
20  *
21  *   In either case, Lustre is distributed in the hope that it will be
22  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  *   license text for more details.
25  */
26
27 #define DEBUG_SUBSYSTEM S_LDLM
28 #ifdef __KERNEL__
29 # include <lustre_dlm.h>
30 #else
31 # include <liblustre.h>
32 #endif
33
34 #include <obd_class.h>
35 #include "ldlm_internal.h"
36
37 cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
38
39 atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
40 atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
41
42 struct semaphore ldlm_srv_namespace_lock;
43 struct list_head ldlm_srv_namespace_list = 
44         CFS_LIST_HEAD_INIT(ldlm_srv_namespace_list);
45
46 struct semaphore ldlm_cli_namespace_lock;
47 struct list_head ldlm_cli_namespace_list = 
48         CFS_LIST_HEAD_INIT(ldlm_cli_namespace_list);
49
50 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
51 cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
52 cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
53
54 #ifdef LPROCFS
55 static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
56                              unsigned long count, void *data)
57 {
58         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
59         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
60         RETURN(count);
61 }
62
63 int ldlm_proc_setup(void)
64 {
65         int rc;
66         struct lprocfs_vars list[] = {
67                 { "dump_namespaces", NULL, ldlm_proc_dump_ns, NULL },
68                 { NULL }};
69         ENTRY;
70         LASSERT(ldlm_ns_proc_dir == NULL);
71
72         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
73                                               proc_lustre_root,
74                                               NULL, NULL);
75         if (IS_ERR(ldlm_type_proc_dir)) {
76                 CERROR("LProcFS failed in ldlm-init\n");
77                 rc = PTR_ERR(ldlm_type_proc_dir);
78                 GOTO(err, rc);
79         }
80
81         ldlm_ns_proc_dir = lprocfs_register("namespaces",
82                                             ldlm_type_proc_dir,
83                                             NULL, NULL);
84         if (IS_ERR(ldlm_ns_proc_dir)) {
85                 CERROR("LProcFS failed in ldlm-init\n");
86                 rc = PTR_ERR(ldlm_ns_proc_dir);
87                 GOTO(err_type, rc);
88         }
89
90         ldlm_svc_proc_dir = lprocfs_register("services",
91                                             ldlm_type_proc_dir,
92                                             NULL, NULL);
93         if (IS_ERR(ldlm_svc_proc_dir)) {
94                 CERROR("LProcFS failed in ldlm-init\n");
95                 rc = PTR_ERR(ldlm_svc_proc_dir);
96                 GOTO(err_ns, rc);
97         }
98
99         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
100
101         RETURN(0);
102
103 err_ns:
104         lprocfs_remove(&ldlm_ns_proc_dir);
105 err_type:
106         lprocfs_remove(&ldlm_type_proc_dir);
107 err:
108         ldlm_svc_proc_dir = NULL;
109         RETURN(rc);
110 }
111
112 void ldlm_proc_cleanup(void)
113 {
114         if (ldlm_svc_proc_dir)
115                 lprocfs_remove(&ldlm_svc_proc_dir);
116
117         if (ldlm_ns_proc_dir)
118                 lprocfs_remove(&ldlm_ns_proc_dir);
119
120         if (ldlm_type_proc_dir)
121                 lprocfs_remove(&ldlm_type_proc_dir);
122 }
123
124 static int lprocfs_rd_lru_size(char *page, char **start, off_t off,
125                                int count, int *eof, void *data)
126 {
127         struct ldlm_namespace *ns = data;
128         __u32 *nr = &ns->ns_max_unused;
129
130         if (ns_connect_lru_resize(ns))
131                 nr = &ns->ns_nr_unused;
132         return lprocfs_rd_uint(page, start, off, count, eof, nr);
133 }
134
135 static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
136                                unsigned long count, void *data)
137 {
138         struct ldlm_namespace *ns = data;
139         char dummy[MAX_STRING_SIZE + 1], *end;
140         unsigned long tmp;
141         int lru_resize;
142
143         dummy[MAX_STRING_SIZE] = '\0';
144         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
145                 return -EFAULT;
146
147         if (count == 6 && memcmp(dummy, "clear", 5) == 0) {
148                 CDEBUG(D_DLMTRACE,
149                        "dropping all unused locks from namespace %s\n",
150                        ns->ns_name);
151                 if (ns_connect_lru_resize(ns)) {
152                         int canceled, unused  = ns->ns_nr_unused;
153                         
154                         /* Try to cancel all @ns_nr_unused locks. */
155                         canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC, 
156                                                    LDLM_CANCEL_PASSED);
157                         if (canceled < unused) {
158                                 CERROR("not all requested locks are canceled, "
159                                        "requested: %d, canceled: %d\n", unused, 
160                                        canceled);
161                                 return -EINVAL;
162                         }
163                 } else {
164                         tmp = ns->ns_max_unused;
165                         ns->ns_max_unused = 0;
166                         ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
167                         ns->ns_max_unused = tmp;
168                 }
169                 return count;
170         }
171
172         tmp = simple_strtoul(dummy, &end, 0);
173         if (dummy == end) {
174                 CERROR("invalid value written\n");
175                 return -EINVAL;
176         }
177         lru_resize = (tmp == 0);
178         
179         if (ns_connect_lru_resize(ns)) {
180                 if (!lru_resize)
181                         ns->ns_max_unused = (unsigned int)tmp;
182                         
183                 if (tmp > ns->ns_nr_unused)
184                         tmp = ns->ns_nr_unused;
185                 tmp = ns->ns_nr_unused - tmp;
186                 
187                 CDEBUG(D_DLMTRACE, "changing namespace %s unused locks from %u to %u\n", 
188                        ns->ns_name, ns->ns_nr_unused, (unsigned int)tmp);
189                 ldlm_cancel_lru(ns, (unsigned int)tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
190                 
191                 if (!lru_resize) {
192                         CDEBUG(D_DLMTRACE, "disable lru_resize for namespace %s\n", 
193                                ns->ns_name);
194                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
195                 }
196         } else {
197                 CDEBUG(D_DLMTRACE, "changing namespace %s max_unused from %u to %u\n",
198                        ns->ns_name, ns->ns_max_unused, (unsigned int)tmp);
199                 ns->ns_max_unused = (unsigned int)tmp;
200                 ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
201                 
202                 /* Make sure that originally lru resize was supported before 
203                  * turning it on here. */
204                 if (lru_resize && 
205                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
206                         CDEBUG(D_DLMTRACE, "enable lru_resize for namespace %s\n", 
207                                ns->ns_name);
208                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
209                 }
210         }
211
212         return count;
213 }
214
215 void ldlm_proc_namespace(struct ldlm_namespace *ns)
216 {
217         struct lprocfs_vars lock_vars[2];
218         char lock_name[MAX_STRING_SIZE + 1];
219
220         LASSERT(ns != NULL);
221         LASSERT(ns->ns_name != NULL);
222
223         lock_name[MAX_STRING_SIZE] = '\0';
224
225         memset(lock_vars, 0, sizeof(lock_vars));
226         lock_vars[0].name = lock_name;
227
228         snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
229         lock_vars[0].data = &ns->ns_refcount;
230         lock_vars[0].read_fptr = lprocfs_rd_atomic;
231         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
232
233         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
234         lock_vars[0].data = &ns->ns_locks;
235         lock_vars[0].read_fptr = lprocfs_rd_atomic;
236         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
237
238         if (ns_is_client(ns)) {
239                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
240                          ns->ns_name);
241                 lock_vars[0].data = &ns->ns_nr_unused;
242                 lock_vars[0].read_fptr = lprocfs_rd_uint;
243                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
244
245                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_size",
246                          ns->ns_name);
247                 lock_vars[0].data = ns;
248                 lock_vars[0].read_fptr = lprocfs_rd_lru_size;
249                 lock_vars[0].write_fptr = lprocfs_wr_lru_size;
250                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
251
252                 snprintf(lock_name, MAX_STRING_SIZE, "%s/shrink_thumb",
253                          ns->ns_name);
254                 lock_vars[0].data = ns;
255                 lock_vars[0].read_fptr = lprocfs_rd_uint;
256                 lock_vars[0].write_fptr = lprocfs_wr_uint;
257                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
258
259                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_max_age",
260                          ns->ns_name);
261                 lock_vars[0].data = &ns->ns_max_age;
262                 lock_vars[0].read_fptr = lprocfs_rd_uint;
263                 lock_vars[0].write_fptr = lprocfs_wr_uint;
264                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
265         } else {
266                 snprintf(lock_name, MAX_STRING_SIZE, "%s/ctime_age_limit",
267                          ns->ns_name);
268                 lock_vars[0].data = &ns->ns_ctime_age_limit;
269                 lock_vars[0].read_fptr = lprocfs_rd_uint;
270                 lock_vars[0].write_fptr = lprocfs_wr_uint;
271                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
272         }
273 }
274 #undef MAX_STRING_SIZE
275 #else
276 #define ldlm_proc_namespace(ns) do {} while (0)
277 #endif /* LPROCFS */
278
279 struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client, 
280                                           ldlm_appetite_t apt)
281 {
282         struct ldlm_namespace *ns = NULL;
283         struct list_head *bucket;
284         int rc, idx, namelen;
285         ENTRY;
286
287         rc = ldlm_get_ref();
288         if (rc) {
289                 CERROR("ldlm_get_ref failed: %d\n", rc);
290                 RETURN(NULL);
291         }
292
293         OBD_ALLOC_PTR(ns);
294         if (!ns)
295                 GOTO(out_ref, NULL);
296
297         OBD_VMALLOC(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
298         if (!ns->ns_hash)
299                 GOTO(out_ns, NULL);
300
301         ns->ns_shrink_thumb = LDLM_LOCK_SHRINK_THUMB;
302         ns->ns_appetite = apt;
303         namelen = strlen(name);
304         OBD_ALLOC(ns->ns_name, namelen + 1);
305         if (!ns->ns_name)
306                 GOTO(out_hash, NULL);
307
308         strcpy(ns->ns_name, name);
309
310         CFS_INIT_LIST_HEAD(&ns->ns_root_list);
311         ns->ns_refcount = 0;
312         ns->ns_client = client;
313         spin_lock_init(&ns->ns_hash_lock);
314         atomic_set(&ns->ns_locks, 0);
315         ns->ns_resources = 0;
316         cfs_waitq_init(&ns->ns_waitq);
317
318         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
319              bucket--)
320                 CFS_INIT_LIST_HEAD(bucket);
321
322         CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
323         ns->ns_nr_unused = 0;
324         ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
325         ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
326         ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
327         spin_lock_init(&ns->ns_unused_lock);
328         ns->ns_orig_connect_flags = 0;
329         ns->ns_connect_flags = 0;
330         ldlm_proc_namespace(ns);
331
332         idx = atomic_read(ldlm_namespace_nr(client));
333         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
334         if (rc) {
335                 CERROR("Can't initialize lock pool, rc %d\n", rc);
336                 GOTO(out_proc, rc);
337         }
338
339         mutex_down(ldlm_namespace_lock(client));
340         list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
341         atomic_inc(ldlm_namespace_nr(client));
342         mutex_up(ldlm_namespace_lock(client));
343
344         RETURN(ns);
345 out_proc:
346         ldlm_namespace_cleanup(ns, 0);
347         OBD_FREE(ns->ns_name, namelen + 1);
348 out_hash:
349         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
350 out_ns:
351         OBD_FREE_PTR(ns);
352 out_ref:
353         ldlm_put_ref(0);
354         RETURN(NULL);
355 }
356
357 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
358
359 /* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup.
360  * This is currently only used for recovery, and we make certain assumptions
361  * as a result--notably, that we shouldn't cancel locks with refs. -phil
362  *
363  * Called with the ns_lock held. */
364 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
365                              int flags)
366 {
367         struct list_head *tmp;
368         int rc = 0, client = ns_is_client(res->lr_namespace);
369         int local_only = (flags & LDLM_FL_LOCAL_ONLY);
370         ENTRY;
371
372
373         do {
374                 struct ldlm_lock *lock = NULL;
375
376                 /* first, we look for non-cleaned-yet lock
377                  * all cleaned locks are marked by CLEANED flag */
378                 lock_res(res);
379                 list_for_each(tmp, q) {
380                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
381                         if (lock->l_flags & LDLM_FL_CLEANED) {
382                                 lock = NULL;
383                                 continue;
384                         }
385                         LDLM_LOCK_GET(lock);
386                         lock->l_flags |= LDLM_FL_CLEANED;
387                         break;
388                 }
389
390                 if (lock == NULL) {
391                         unlock_res(res);
392                         break;
393                 }
394
395                 /* Set CBPENDING so nothing in the cancellation path
396                  * can match this lock */
397                 lock->l_flags |= LDLM_FL_CBPENDING;
398                 lock->l_flags |= LDLM_FL_FAILED;
399                 lock->l_flags |= flags;
400
401                 /* ... without sending a CANCEL message for local_only. */
402                 if (local_only)
403                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
404
405                 if (local_only && (lock->l_readers || lock->l_writers)) {
406                         /* This is a little bit gross, but much better than the
407                          * alternative: pretend that we got a blocking AST from
408                          * the server, so that when the lock is decref'd, it
409                          * will go away ... */
410                         unlock_res(res);
411                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
412                         if (lock->l_completion_ast)
413                                 lock->l_completion_ast(lock, 0, NULL);
414                         LDLM_LOCK_PUT(lock);
415                         continue;
416                 }
417
418                 if (client) {
419                         struct lustre_handle lockh;
420
421                         unlock_res(res);
422                         ldlm_lock2handle(lock, &lockh);
423                         rc = ldlm_cli_cancel(&lockh);
424                         if (rc)
425                                 CERROR("ldlm_cli_cancel: %d\n", rc);
426                 } else {
427                         ldlm_resource_unlink_lock(lock);
428                         unlock_res(res);
429                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
430                                    "client node");
431                         ldlm_lock_destroy(lock);
432                 }
433                 LDLM_LOCK_PUT(lock);
434         } while (1);
435
436         EXIT;
437 }
438
439 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
440 {
441         struct list_head *tmp;
442         int i;
443
444         if (ns == NULL) {
445                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
446                 return ELDLM_OK;
447         }
448
449         for (i = 0; i < RES_HASH_SIZE; i++) {
450                 spin_lock(&ns->ns_hash_lock);
451                 tmp = ns->ns_hash[i].next;
452                 while (tmp != &(ns->ns_hash[i])) {
453                         struct ldlm_resource *res;
454                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
455                         ldlm_resource_getref(res);
456                         spin_unlock(&ns->ns_hash_lock);
457
458                         cleanup_resource(res, &res->lr_granted, flags);
459                         cleanup_resource(res, &res->lr_converting, flags);
460                         cleanup_resource(res, &res->lr_waiting, flags);
461
462                         spin_lock(&ns->ns_hash_lock);
463                         tmp  = tmp->next;
464
465                         /* XXX: former stuff caused issues in case of race
466                          * between ldlm_namespace_cleanup() and lockd() when
467                          * client gets blocking ast when lock gets distracted by
468                          * server. This is 1_4 branch solution, let's see how
469                          * will it behave. */
470                         if (!ldlm_resource_putref_locked(res))
471                                 CDEBUG(D_INFO,
472                                        "Namespace %s resource refcount nonzero "
473                                        "(%d) after lock cleanup; forcing cleanup.\n",
474                                        ns->ns_name, atomic_read(&res->lr_refcount));
475                 }
476                 spin_unlock(&ns->ns_hash_lock);
477         }
478
479         return ELDLM_OK;
480 }
481
482 /* Cleanup, but also free, the namespace */
483 int ldlm_namespace_free_prior(struct ldlm_namespace *ns)
484 {
485         ENTRY;
486         if (!ns)
487                 RETURN(ELDLM_OK);
488
489         mutex_down(ldlm_namespace_lock(ns->ns_client));
490         /*
491          * Some asserts and possibly other parts of code still using 
492          * list_empty(&ns->ns_list_chain). This is why it is important
493          * to use list_del_init() here.
494          */
495         list_del_init(&ns->ns_list_chain);
496         atomic_dec(ldlm_namespace_nr(ns->ns_client));
497         ldlm_pool_fini(&ns->ns_pool);
498         mutex_up(ldlm_namespace_lock(ns->ns_client));
499
500         /* At shutdown time, don't call the cancellation callback */
501         ldlm_namespace_cleanup(ns, 0);
502
503         if (ns->ns_refcount > 0) {
504                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
505                 int rc;
506                 CDEBUG(D_DLMTRACE,
507                        "dlm namespace %s free waiting on refcount %d\n",
508                        ns->ns_name, ns->ns_refcount);
509                 rc = l_wait_event(ns->ns_waitq,
510                                   ns->ns_refcount == 0, &lwi);
511                 if (ns->ns_refcount)
512                         LCONSOLE_ERROR_MSG(0x139, "Lock manager: wait for %s "
513                                            "namespace cleanup aborted with %d "
514                                            "resources in use. (%d)\nI'm going "
515                                            "to try to clean up anyway, but I "
516                                            "might need a reboot of this node.\n",
517                                             ns->ns_name, (int) ns->ns_refcount, 
518                                             rc);
519                 CDEBUG(D_DLMTRACE,
520                        "dlm namespace %s free done waiting\n", ns->ns_name);
521         }
522
523         RETURN(ELDLM_OK);
524 }
525
526 int ldlm_namespace_free_post(struct ldlm_namespace *ns, int force)
527 {
528         ENTRY;
529         if (!ns)
530                 RETURN(ELDLM_OK);
531
532 #ifdef LPROCFS
533         {
534                 struct proc_dir_entry *dir;
535                 dir = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
536                 if (dir == NULL) {
537                         CERROR("dlm namespace %s has no procfs dir?\n",
538                                ns->ns_name);
539                 } else {
540                         lprocfs_remove(&dir);
541                 }
542         }
543 #endif
544
545         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
546         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
547         /* 
548          * @ns should be not on list in this time, otherwise this will cause
549          * issues realted to using freed @ns in pools thread. 
550          */
551         LASSERT(list_empty(&ns->ns_list_chain));
552         OBD_FREE_PTR(ns);
553         ldlm_put_ref(force);
554         RETURN(ELDLM_OK);
555 }
556
557
558 /* Cleanup the resource, and free namespace.
559  * bug 12864:
560  * Deadlock issue:
561  * proc1: destroy import
562  *        class_disconnect_export(grab cl_sem) ->
563  *              -> ldlm_namespace_free ->
564  *              -> lprocfs_remove(grab _lprocfs_lock).
565  * proc2: read proc info
566  *        lprocfs_fops_read(grab _lprocfs_lock) ->
567  *              -> osc_rd_active, etc(grab cl_sem).
568  *
569  * So that I have to split the ldlm_namespace_free into two parts - the first
570  * part ldlm_namespace_free_prior is used to cleanup the resource which is
571  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
572  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
573  * held.
574  */
575 int ldlm_namespace_free(struct ldlm_namespace *ns, int force)
576 {
577         ldlm_namespace_free_prior(ns);
578         ldlm_namespace_free_post(ns, force);
579         return ELDLM_OK;
580 }
581
582
583 void ldlm_namespace_get_nolock(struct ldlm_namespace *ns)
584 {
585         LASSERT(ns->ns_refcount >= 0);
586         ns->ns_refcount++;
587 }
588
589 void ldlm_namespace_get(struct ldlm_namespace *ns)
590 {
591         spin_lock(&ns->ns_hash_lock);
592         ldlm_namespace_get_nolock(ns);
593         spin_unlock(&ns->ns_hash_lock);
594 }
595
596 void ldlm_namespace_put_nolock(struct ldlm_namespace *ns, int wakeup)
597 {
598         LASSERT(ns->ns_refcount > 0);
599         ns->ns_refcount--;
600         if (ns->ns_refcount == 0 && wakeup)
601                 wake_up(&ns->ns_waitq);
602 }
603
604 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
605 {
606         spin_lock(&ns->ns_hash_lock);
607         ldlm_namespace_put_nolock(ns, wakeup);
608         spin_unlock(&ns->ns_hash_lock);
609 }
610
611 /* Should be called under ldlm_namespace_lock(client) taken */
612 void ldlm_namespace_move(struct ldlm_namespace *ns, ldlm_side_t client)
613 {
614         LASSERT(!list_empty(&ns->ns_list_chain));
615         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
616         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
617 }
618
619 /* Should be called under ldlm_namespace_lock(client) taken */
620 struct ldlm_namespace *ldlm_namespace_first(ldlm_side_t client)
621 {
622         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
623         LASSERT(!list_empty(ldlm_namespace_list(client)));
624         return container_of(ldlm_namespace_list(client)->next, 
625                 struct ldlm_namespace, ns_list_chain);
626 }
627 static __u32 ldlm_hash_fn(struct ldlm_resource *parent,
628                           const struct ldlm_res_id *name)
629 {
630         __u32 hash = 0;
631         int i;
632
633         for (i = 0; i < RES_NAME_SIZE; i++)
634                 hash += name->name[i];
635
636         hash += (__u32)((unsigned long)parent >> 4);
637
638         return (hash & RES_HASH_MASK);
639 }
640
641 static struct ldlm_resource *ldlm_resource_new(void)
642 {
643         struct ldlm_resource *res;
644         int idx;
645
646         OBD_SLAB_ALLOC(res, ldlm_resource_slab, CFS_ALLOC_IO, sizeof *res);
647         if (res == NULL)
648                 return NULL;
649
650         memset(res, 0, sizeof(*res));
651
652         CFS_INIT_LIST_HEAD(&res->lr_children);
653         CFS_INIT_LIST_HEAD(&res->lr_childof);
654         CFS_INIT_LIST_HEAD(&res->lr_granted);
655         CFS_INIT_LIST_HEAD(&res->lr_converting);
656         CFS_INIT_LIST_HEAD(&res->lr_waiting);
657
658         /* initialize interval trees for each lock mode*/
659         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
660                 res->lr_itree[idx].lit_size = 0;
661                 res->lr_itree[idx].lit_mode = 1 << idx;
662                 res->lr_itree[idx].lit_root = NULL;
663         }
664
665         atomic_set(&res->lr_refcount, 1);
666         spin_lock_init(&res->lr_lock);
667
668         /* one who creates the resource must unlock
669          * the semaphore after lvb initialization */
670         init_MUTEX_LOCKED(&res->lr_lvb_sem);
671
672         return res;
673 }
674
675 /* must be called with hash lock held */
676 static struct ldlm_resource *
677 ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
678                    __u32 hash)
679 {
680         struct list_head *bucket, *tmp;
681         struct ldlm_resource *res;
682
683         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
684         bucket = ns->ns_hash + hash;
685
686         list_for_each(tmp, bucket) {
687                 res = list_entry(tmp, struct ldlm_resource, lr_hash);
688                 if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
689                         return res;
690         }
691
692         return NULL;
693 }
694
695 /* Args: locked namespace
696  * Returns: newly-allocated, referenced, unlocked resource */
697 static struct ldlm_resource *
698 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
699                   const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
700 {
701         struct list_head *bucket;
702         struct ldlm_resource *res, *old_res;
703         ENTRY;
704
705         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
706                  "type: %d\n", type);
707
708         res = ldlm_resource_new();
709         if (!res)
710                 RETURN(NULL);
711
712         res->lr_name = *name;
713         res->lr_namespace = ns;
714         res->lr_type = type;
715         res->lr_most_restr = LCK_NL;
716
717         spin_lock(&ns->ns_hash_lock);
718         old_res = ldlm_resource_find(ns, name, hash);
719         if (old_res) {
720                 /* someone won the race and added the resource before */
721                 ldlm_resource_getref(old_res);
722                 spin_unlock(&ns->ns_hash_lock);
723                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
724                 /* synchronize WRT resource creation */
725                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
726                         down(&old_res->lr_lvb_sem);
727                         up(&old_res->lr_lvb_sem);
728                 }
729                 RETURN(old_res);
730         }
731
732         /* we won! let's add the resource */
733         bucket = ns->ns_hash + hash;
734         list_add(&res->lr_hash, bucket);
735         ns->ns_resources++;
736         ldlm_namespace_get_nolock(ns);
737
738         if (parent == NULL) {
739                 list_add(&res->lr_childof, &ns->ns_root_list);
740         } else {
741                 res->lr_parent = parent;
742                 list_add(&res->lr_childof, &parent->lr_children);
743         }
744         spin_unlock(&ns->ns_hash_lock);
745
746         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
747                 int rc;
748
749                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
750                 rc = ns->ns_lvbo->lvbo_init(res);
751                 if (rc)
752                         CERROR("lvbo_init failed for resource "
753                                LPU64": rc %d\n", name->name[0], rc);
754                 /* we create resource with locked lr_lvb_sem */
755                 up(&res->lr_lvb_sem);
756         }
757
758         RETURN(res);
759 }
760
761 /* Args: unlocked namespace
762  * Locks: takes and releases ns->ns_lock and res->lr_lock
763  * Returns: referenced, unlocked ldlm_resource or NULL */
764 struct ldlm_resource *
765 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
766                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
767 {
768         __u32 hash = ldlm_hash_fn(parent, name);
769         struct ldlm_resource *res = NULL;
770         ENTRY;
771
772         LASSERT(ns != NULL);
773         LASSERT(ns->ns_hash != NULL);
774         LASSERT(name->name[0] != 0);
775
776         spin_lock(&ns->ns_hash_lock);
777         res = ldlm_resource_find(ns, name, hash);
778         if (res) {
779                 ldlm_resource_getref(res);
780                 spin_unlock(&ns->ns_hash_lock);
781                 /* synchronize WRT resource creation */
782                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
783                         down(&res->lr_lvb_sem);
784                         up(&res->lr_lvb_sem);
785                 }
786                 RETURN(res);
787         }
788         spin_unlock(&ns->ns_hash_lock);
789
790         if (create == 0)
791                 RETURN(NULL);
792
793         res = ldlm_resource_add(ns, parent, name, hash, type);
794         RETURN(res);
795 }
796
797 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
798 {
799         LASSERT(res != NULL);
800         LASSERT(res != LP_POISON);
801         atomic_inc(&res->lr_refcount);
802         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
803                atomic_read(&res->lr_refcount));
804         return res;
805 }
806
807 void __ldlm_resource_putref_final(struct ldlm_resource *res)
808 {
809         struct ldlm_namespace *ns = res->lr_namespace;
810
811         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
812
813         if (!list_empty(&res->lr_granted)) {
814                 ldlm_resource_dump(D_ERROR, res);
815                 LBUG();
816         }
817
818         if (!list_empty(&res->lr_converting)) {
819                 ldlm_resource_dump(D_ERROR, res);
820                 LBUG();
821         }
822
823         if (!list_empty(&res->lr_waiting)) {
824                 ldlm_resource_dump(D_ERROR, res);
825                 LBUG();
826         }
827
828         if (!list_empty(&res->lr_children)) {
829                 ldlm_resource_dump(D_ERROR, res);
830                 LBUG();
831         }
832
833         /* Pass 0 here to not wake ->ns_waitq up yet, we will do it few 
834          * lines below when all children are freed. */
835         ldlm_namespace_put_nolock(ns, 0);
836         list_del_init(&res->lr_hash);
837         list_del_init(&res->lr_childof);
838
839         ns->ns_resources--;
840         if (ns->ns_resources == 0)
841                 wake_up(&ns->ns_waitq);
842 }
843
844 /* Returns 1 if the resource was freed, 0 if it remains. */
845 int ldlm_resource_putref(struct ldlm_resource *res)
846 {
847         struct ldlm_namespace *ns = res->lr_namespace;
848         int rc = 0;
849         ENTRY;
850
851         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
852                atomic_read(&res->lr_refcount) - 1);
853         LASSERTF(atomic_read(&res->lr_refcount) > 0, "%d",
854                  atomic_read(&res->lr_refcount));
855         LASSERTF(atomic_read(&res->lr_refcount) < LI_POISON, "%d",
856                  atomic_read(&res->lr_refcount));
857
858         if (atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
859                 __ldlm_resource_putref_final(res);
860                 spin_unlock(&ns->ns_hash_lock);
861                 if (res->lr_lvb_data)
862                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
863                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
864                 rc = 1;
865         }
866
867         RETURN(rc);
868 }
869
870 /* Returns 1 if the resource was freed, 0 if it remains. */
871 int ldlm_resource_putref_locked(struct ldlm_resource *res)
872 {
873         int rc = 0;
874         ENTRY;
875
876         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
877                atomic_read(&res->lr_refcount) - 1);
878         LASSERT(atomic_read(&res->lr_refcount) > 0);
879         LASSERT(atomic_read(&res->lr_refcount) < LI_POISON);
880
881         LASSERT(atomic_read(&res->lr_refcount) >= 0);
882         if (atomic_dec_and_test(&res->lr_refcount)) {
883                 __ldlm_resource_putref_final(res);
884                 if (res->lr_lvb_data)
885                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
886                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
887                 rc = 1;
888         }
889
890         RETURN(rc);
891 }
892
893 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
894                             struct ldlm_lock *lock)
895 {
896         check_res_locked(res);
897
898         ldlm_resource_dump(D_OTHER, res);
899         CDEBUG(D_OTHER, "About to add this lock:\n");
900         ldlm_lock_dump(D_OTHER, lock, 0);
901
902         if (lock->l_destroyed) {
903                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
904                 return;
905         }
906
907         LASSERT(list_empty(&lock->l_res_link));
908
909         list_add_tail(&lock->l_res_link, head);
910 }
911
912 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
913                                      struct ldlm_lock *new)
914 {
915         struct ldlm_resource *res = original->l_resource;
916
917         check_res_locked(res);
918
919         ldlm_resource_dump(D_OTHER, res);
920         CDEBUG(D_OTHER, "About to insert this lock after %p:\n", original);
921         ldlm_lock_dump(D_OTHER, new, 0);
922
923         if (new->l_destroyed) {
924                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
925                 goto out;
926         }
927
928         LASSERT(list_empty(&new->l_res_link));
929
930         list_add(&new->l_res_link, &original->l_res_link);
931  out:;
932 }
933
934 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
935 {
936         int type = lock->l_resource->lr_type;
937
938         check_res_locked(lock->l_resource);
939         if (type == LDLM_IBITS || type == LDLM_PLAIN)
940                 ldlm_unlink_lock_skiplist(lock);
941         else if (type == LDLM_EXTENT)
942                 ldlm_extent_unlink_lock(lock);
943         list_del_init(&lock->l_res_link);
944 }
945
946 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
947 {
948         desc->lr_type = res->lr_type;
949         desc->lr_name = res->lr_name;
950 }
951
952 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
953 {
954         struct list_head *tmp;
955
956         if (!((libcfs_debug | D_ERROR) & level))
957                 return;
958
959         mutex_down(ldlm_namespace_lock(client));
960
961         list_for_each(tmp, ldlm_namespace_list(client)) {
962                 struct ldlm_namespace *ns;
963                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
964                 ldlm_namespace_dump(level, ns);
965         }
966
967         mutex_up(ldlm_namespace_lock(client));
968 }
969
970 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
971 {
972         struct list_head *tmp;
973
974         if (!((libcfs_debug | D_ERROR) & level))
975                 return;
976
977         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n", 
978                ns->ns_name, ns->ns_refcount, 
979                ns_is_client(ns) ? "client" : "server");
980
981         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
982                 return;
983
984         spin_lock(&ns->ns_hash_lock);
985         tmp = ns->ns_root_list.next;
986         while (tmp != &ns->ns_root_list) {
987                 struct ldlm_resource *res;
988                 res = list_entry(tmp, struct ldlm_resource, lr_childof);
989
990                 ldlm_resource_getref(res);
991                 spin_unlock(&ns->ns_hash_lock);
992
993                 lock_res(res);
994                 ldlm_resource_dump(level, res);
995                 unlock_res(res);
996
997                 spin_lock(&ns->ns_hash_lock);
998                 tmp = tmp->next;
999                 ldlm_resource_putref_locked(res);
1000         }
1001         ns->ns_next_dump = cfs_time_shift(10);
1002         spin_unlock(&ns->ns_hash_lock);
1003 }
1004
1005 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1006 {
1007         struct list_head *tmp;
1008         int pos;
1009
1010         CLASSERT(RES_NAME_SIZE == 4);
1011
1012         if (!((libcfs_debug | D_ERROR) & level))
1013                 return;
1014
1015         CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
1016                ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
1017                res->lr_name.name[2], res->lr_name.name[3],
1018                atomic_read(&res->lr_refcount));
1019
1020         if (!list_empty(&res->lr_granted)) {
1021                 pos = 0;
1022                 CDEBUG(level, "Granted locks:\n");
1023                 list_for_each(tmp, &res->lr_granted) {
1024                         struct ldlm_lock *lock;
1025                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1026                         ldlm_lock_dump(level, lock, ++pos);
1027                 }
1028         }
1029         if (!list_empty(&res->lr_converting)) {
1030                 pos = 0;
1031                 CDEBUG(level, "Converting locks:\n");
1032                 list_for_each(tmp, &res->lr_converting) {
1033                         struct ldlm_lock *lock;
1034                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1035                         ldlm_lock_dump(level, lock, ++pos);
1036                 }
1037         }
1038         if (!list_empty(&res->lr_waiting)) {
1039                 pos = 0;
1040                 CDEBUG(level, "Waiting locks:\n");
1041                 list_for_each(tmp, &res->lr_waiting) {
1042                         struct ldlm_lock *lock;
1043                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1044                         ldlm_lock_dump(level, lock, ++pos);
1045                 }
1046         }
1047 }