Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *   Author: Peter Braam <braam@clusterfs.com>
7  *
8  *   This file is part of the Lustre file system, http://www.lustre.org
9  *   Lustre is a trademark of Cluster File Systems, Inc.
10  *
11  *   You may have signed or agreed to another license before downloading
12  *   this software.  If so, you are bound by the terms and conditions
13  *   of that agreement, and the following does not apply to you.  See the
14  *   LICENSE file included with this distribution for more information.
15  *
16  *   If you did not agree to a different license, then this copy of Lustre
17  *   is open source software; you can redistribute it and/or modify it
18  *   under the terms of version 2 of the GNU General Public License as
19  *   published by the Free Software Foundation.
20  *
21  *   In either case, Lustre is distributed in the hope that it will be
22  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  *   license text for more details.
25  */
26
27 #define DEBUG_SUBSYSTEM S_LDLM
28 #ifdef __KERNEL__
29 # include <lustre_dlm.h>
30 #else
31 # include <liblustre.h>
32 #endif
33
34 #include <obd_class.h>
35 #include "ldlm_internal.h"
36
37 cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
38
39 atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
40 atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
41
42 struct semaphore ldlm_srv_namespace_lock;
43 struct list_head ldlm_srv_namespace_list = 
44         CFS_LIST_HEAD_INIT(ldlm_srv_namespace_list);
45
46 struct semaphore ldlm_cli_namespace_lock;
47 struct list_head ldlm_cli_namespace_list = 
48         CFS_LIST_HEAD_INIT(ldlm_cli_namespace_list);
49
50 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
51 cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
52 cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
53
54 #ifdef LPROCFS
55 static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
56                              unsigned long count, void *data)
57 {
58         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
59         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
60         RETURN(count);
61 }
62
63 int ldlm_proc_setup(void)
64 {
65         int rc;
66         struct lprocfs_vars list[] = {
67                 { "dump_namespaces", NULL, ldlm_proc_dump_ns, NULL },
68                 { NULL }};
69         ENTRY;
70         LASSERT(ldlm_ns_proc_dir == NULL);
71
72         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
73                                               proc_lustre_root,
74                                               NULL, NULL);
75         if (IS_ERR(ldlm_type_proc_dir)) {
76                 CERROR("LProcFS failed in ldlm-init\n");
77                 rc = PTR_ERR(ldlm_type_proc_dir);
78                 GOTO(err, rc);
79         }
80
81         ldlm_ns_proc_dir = lprocfs_register("namespaces",
82                                             ldlm_type_proc_dir,
83                                             NULL, NULL);
84         if (IS_ERR(ldlm_ns_proc_dir)) {
85                 CERROR("LProcFS failed in ldlm-init\n");
86                 rc = PTR_ERR(ldlm_ns_proc_dir);
87                 GOTO(err_type, rc);
88         }
89
90         ldlm_svc_proc_dir = lprocfs_register("services",
91                                             ldlm_type_proc_dir,
92                                             NULL, NULL);
93         if (IS_ERR(ldlm_svc_proc_dir)) {
94                 CERROR("LProcFS failed in ldlm-init\n");
95                 rc = PTR_ERR(ldlm_svc_proc_dir);
96                 GOTO(err_ns, rc);
97         }
98
99         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
100
101         RETURN(0);
102
103 err_ns:
104         lprocfs_remove(&ldlm_ns_proc_dir);
105 err_type:
106         lprocfs_remove(&ldlm_type_proc_dir);
107 err:
108         ldlm_svc_proc_dir = NULL;
109         RETURN(rc);
110 }
111
112 void ldlm_proc_cleanup(void)
113 {
114         if (ldlm_svc_proc_dir)
115                 lprocfs_remove(&ldlm_svc_proc_dir);
116
117         if (ldlm_ns_proc_dir)
118                 lprocfs_remove(&ldlm_ns_proc_dir);
119
120         if (ldlm_type_proc_dir)
121                 lprocfs_remove(&ldlm_type_proc_dir);
122 }
123
124 static int lprocfs_rd_lru_size(char *page, char **start, off_t off,
125                                int count, int *eof, void *data)
126 {
127         struct ldlm_namespace *ns = data;
128         __u32 *nr = &ns->ns_max_unused;
129
130         if (ns_connect_lru_resize(ns))
131                 nr = &ns->ns_nr_unused;
132         return lprocfs_rd_uint(page, start, off, count, eof, nr);
133 }
134
135 static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
136                                unsigned long count, void *data)
137 {
138         struct ldlm_namespace *ns = data;
139         char dummy[MAX_STRING_SIZE + 1], *end;
140         unsigned long tmp;
141         int lru_resize;
142
143         dummy[MAX_STRING_SIZE] = '\0';
144         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
145                 return -EFAULT;
146
147         if (count == 6 && memcmp(dummy, "clear", 5) == 0) {
148                 CDEBUG(D_DLMTRACE,
149                        "dropping all unused locks from namespace %s\n",
150                        ns->ns_name);
151                 if (ns_connect_lru_resize(ns)) {
152                         int canceled, unused  = ns->ns_nr_unused;
153                         
154                         /* Try to cancel all @ns_nr_unused locks. */
155                         canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC, 
156                                                    LDLM_CANCEL_PASSED);
157                         if (canceled < unused) {
158                                 CERROR("not all requested locks are canceled, "
159                                        "requested: %d, canceled: %d\n", unused, 
160                                        canceled);
161                                 return -EINVAL;
162                         }
163                 } else {
164                         tmp = ns->ns_max_unused;
165                         ns->ns_max_unused = 0;
166                         ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
167                         ns->ns_max_unused = tmp;
168                 }
169                 return count;
170         }
171
172         tmp = simple_strtoul(dummy, &end, 0);
173         if (dummy == end) {
174                 CERROR("invalid value written\n");
175                 return -EINVAL;
176         }
177         lru_resize = (tmp == 0);
178         
179         if (ns_connect_lru_resize(ns)) {
180                 if (!lru_resize)
181                         ns->ns_max_unused = (unsigned int)tmp;
182                         
183                 if (tmp > ns->ns_nr_unused)
184                         tmp = ns->ns_nr_unused;
185                 tmp = ns->ns_nr_unused - tmp;
186                 
187                 CDEBUG(D_DLMTRACE, "changing namespace %s unused locks from %u to %u\n", 
188                        ns->ns_name, ns->ns_nr_unused, (unsigned int)tmp);
189                 ldlm_cancel_lru(ns, (unsigned int)tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
190                 
191                 if (!lru_resize) {
192                         CDEBUG(D_DLMTRACE, "disable lru_resize for namespace %s\n", 
193                                ns->ns_name);
194                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
195                 }
196         } else {
197                 CDEBUG(D_DLMTRACE, "changing namespace %s max_unused from %u to %u\n",
198                        ns->ns_name, ns->ns_max_unused, (unsigned int)tmp);
199                 ns->ns_max_unused = (unsigned int)tmp;
200                 ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
201                 
202                 /* Make sure that originally lru resize was supported before 
203                  * turning it on here. */
204                 if (lru_resize && 
205                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
206                         CDEBUG(D_DLMTRACE, "enable lru_resize for namespace %s\n", 
207                                ns->ns_name);
208                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
209                 }
210         }
211
212         return count;
213 }
214
215 void ldlm_proc_namespace(struct ldlm_namespace *ns)
216 {
217         struct lprocfs_vars lock_vars[2];
218         char lock_name[MAX_STRING_SIZE + 1];
219
220         LASSERT(ns != NULL);
221         LASSERT(ns->ns_name != NULL);
222
223         lock_name[MAX_STRING_SIZE] = '\0';
224
225         memset(lock_vars, 0, sizeof(lock_vars));
226         lock_vars[0].name = lock_name;
227
228         snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
229         lock_vars[0].data = &ns->ns_refcount;
230         lock_vars[0].read_fptr = lprocfs_rd_atomic;
231         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
232
233         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
234         lock_vars[0].data = &ns->ns_locks;
235         lock_vars[0].read_fptr = lprocfs_rd_atomic;
236         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
237
238         if (ns_is_client(ns)) {
239                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
240                          ns->ns_name);
241                 lock_vars[0].data = &ns->ns_nr_unused;
242                 lock_vars[0].read_fptr = lprocfs_rd_uint;
243                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
244
245                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_size",
246                          ns->ns_name);
247                 lock_vars[0].data = ns;
248                 lock_vars[0].read_fptr = lprocfs_rd_lru_size;
249                 lock_vars[0].write_fptr = lprocfs_wr_lru_size;
250                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
251
252                 snprintf(lock_name, MAX_STRING_SIZE, "%s/shrink_thumb",
253                          ns->ns_name);
254                 lock_vars[0].data = ns;
255                 lock_vars[0].read_fptr = lprocfs_rd_uint;
256                 lock_vars[0].write_fptr = lprocfs_wr_uint;
257                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
258
259                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_max_age",
260                          ns->ns_name);
261                 lock_vars[0].data = &ns->ns_max_age;
262                 lock_vars[0].read_fptr = lprocfs_rd_uint;
263                 lock_vars[0].write_fptr = lprocfs_wr_uint;
264                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
265         }
266 }
267 #undef MAX_STRING_SIZE
268 #else
269 #define ldlm_proc_namespace(ns) do {} while (0)
270 #endif /* LPROCFS */
271
272 struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client, 
273                                           ldlm_appetite_t apt)
274 {
275         struct ldlm_namespace *ns = NULL;
276         struct list_head *bucket;
277         int rc, idx, namelen;
278         ENTRY;
279
280         rc = ldlm_get_ref();
281         if (rc) {
282                 CERROR("ldlm_get_ref failed: %d\n", rc);
283                 RETURN(NULL);
284         }
285
286         OBD_ALLOC_PTR(ns);
287         if (!ns)
288                 GOTO(out_ref, NULL);
289
290         OBD_VMALLOC(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
291         if (!ns->ns_hash)
292                 GOTO(out_ns, NULL);
293
294         ns->ns_shrink_thumb = LDLM_LOCK_SHRINK_THUMB;
295         ns->ns_appetite = apt;
296         namelen = strlen(name);
297         OBD_ALLOC(ns->ns_name, namelen + 1);
298         if (!ns->ns_name)
299                 GOTO(out_hash, NULL);
300
301         strcpy(ns->ns_name, name);
302
303         CFS_INIT_LIST_HEAD(&ns->ns_root_list);
304         ns->ns_refcount = 0;
305         ns->ns_client = client;
306         spin_lock_init(&ns->ns_hash_lock);
307         atomic_set(&ns->ns_locks, 0);
308         ns->ns_resources = 0;
309         cfs_waitq_init(&ns->ns_waitq);
310
311         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
312              bucket--)
313                 CFS_INIT_LIST_HEAD(bucket);
314
315         CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
316         ns->ns_nr_unused = 0;
317         ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
318         ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
319         spin_lock_init(&ns->ns_unused_lock);
320         ns->ns_orig_connect_flags = 0;
321         ns->ns_connect_flags = 0;
322         ldlm_proc_namespace(ns);
323
324         idx = atomic_read(ldlm_namespace_nr(client));
325         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
326         if (rc) {
327                 CERROR("Can't initialize lock pool, rc %d\n", rc);
328                 GOTO(out_proc, rc);
329         }
330
331         mutex_down(ldlm_namespace_lock(client));
332         list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
333         atomic_inc(ldlm_namespace_nr(client));
334         mutex_up(ldlm_namespace_lock(client));
335
336         RETURN(ns);
337 out_proc:
338         ldlm_namespace_cleanup(ns, 0);
339         OBD_FREE(ns->ns_name, namelen + 1);
340 out_hash:
341         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
342 out_ns:
343         OBD_FREE_PTR(ns);
344 out_ref:
345         ldlm_put_ref(0);
346         RETURN(NULL);
347 }
348
349 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
350
351 /* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup.
352  * This is currently only used for recovery, and we make certain assumptions
353  * as a result--notably, that we shouldn't cancel locks with refs. -phil
354  *
355  * Called with the ns_lock held. */
356 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
357                              int flags)
358 {
359         struct list_head *tmp;
360         int rc = 0, client = ns_is_client(res->lr_namespace);
361         int local_only = (flags & LDLM_FL_LOCAL_ONLY);
362         ENTRY;
363
364
365         do {
366                 struct ldlm_lock *lock = NULL;
367
368                 /* first, we look for non-cleaned-yet lock
369                  * all cleaned locks are marked by CLEANED flag */
370                 lock_res(res);
371                 list_for_each(tmp, q) {
372                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
373                         if (lock->l_flags & LDLM_FL_CLEANED) {
374                                 lock = NULL;
375                                 continue;
376                         }
377                         LDLM_LOCK_GET(lock);
378                         lock->l_flags |= LDLM_FL_CLEANED;
379                         break;
380                 }
381
382                 if (lock == NULL) {
383                         unlock_res(res);
384                         break;
385                 }
386
387                 /* Set CBPENDING so nothing in the cancellation path
388                  * can match this lock */
389                 lock->l_flags |= LDLM_FL_CBPENDING;
390                 lock->l_flags |= LDLM_FL_FAILED;
391                 lock->l_flags |= flags;
392
393                 /* ... without sending a CANCEL message for local_only. */
394                 if (local_only)
395                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
396
397                 if (local_only && (lock->l_readers || lock->l_writers)) {
398                         /* This is a little bit gross, but much better than the
399                          * alternative: pretend that we got a blocking AST from
400                          * the server, so that when the lock is decref'd, it
401                          * will go away ... */
402                         unlock_res(res);
403                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
404                         if (lock->l_completion_ast)
405                                 lock->l_completion_ast(lock, 0, NULL);
406                         LDLM_LOCK_PUT(lock);
407                         continue;
408                 }
409
410                 if (client) {
411                         struct lustre_handle lockh;
412
413                         unlock_res(res);
414                         ldlm_lock2handle(lock, &lockh);
415                         rc = ldlm_cli_cancel(&lockh);
416                         if (rc)
417                                 CERROR("ldlm_cli_cancel: %d\n", rc);
418                 } else {
419                         ldlm_resource_unlink_lock(lock);
420                         unlock_res(res);
421                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
422                                    "client node");
423                         ldlm_lock_destroy(lock);
424                 }
425                 LDLM_LOCK_PUT(lock);
426         } while (1);
427
428         EXIT;
429 }
430
431 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
432 {
433         struct list_head *tmp;
434         int i;
435
436         if (ns == NULL) {
437                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
438                 return ELDLM_OK;
439         }
440
441         for (i = 0; i < RES_HASH_SIZE; i++) {
442                 spin_lock(&ns->ns_hash_lock);
443                 tmp = ns->ns_hash[i].next;
444                 while (tmp != &(ns->ns_hash[i])) {
445                         struct ldlm_resource *res;
446                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
447                         ldlm_resource_getref(res);
448                         spin_unlock(&ns->ns_hash_lock);
449
450                         cleanup_resource(res, &res->lr_granted, flags);
451                         cleanup_resource(res, &res->lr_converting, flags);
452                         cleanup_resource(res, &res->lr_waiting, flags);
453
454                         spin_lock(&ns->ns_hash_lock);
455                         tmp  = tmp->next;
456
457                         /* XXX: former stuff caused issues in case of race
458                          * between ldlm_namespace_cleanup() and lockd() when
459                          * client gets blocking ast when lock gets distracted by
460                          * server. This is 1_4 branch solution, let's see how
461                          * will it behave. */
462                         if (!ldlm_resource_putref_locked(res))
463                                 CDEBUG(D_INFO,
464                                        "Namespace %s resource refcount nonzero "
465                                        "(%d) after lock cleanup; forcing cleanup.\n",
466                                        ns->ns_name, atomic_read(&res->lr_refcount));
467                 }
468                 spin_unlock(&ns->ns_hash_lock);
469         }
470
471         return ELDLM_OK;
472 }
473
474 /* Cleanup, but also free, the namespace */
475 int ldlm_namespace_free_prior(struct ldlm_namespace *ns)
476 {
477         ENTRY;
478         if (!ns)
479                 RETURN(ELDLM_OK);
480
481         mutex_down(ldlm_namespace_lock(ns->ns_client));
482         list_del(&ns->ns_list_chain);
483         atomic_dec(ldlm_namespace_nr(ns->ns_client));
484         ldlm_pool_fini(&ns->ns_pool);
485         mutex_up(ldlm_namespace_lock(ns->ns_client));
486
487         /* At shutdown time, don't call the cancellation callback */
488         ldlm_namespace_cleanup(ns, 0);
489
490         if (ns->ns_refcount > 0) {
491                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
492                 int rc;
493                 CDEBUG(D_DLMTRACE,
494                        "dlm namespace %s free waiting on refcount %d\n",
495                        ns->ns_name, ns->ns_refcount);
496                 rc = l_wait_event(ns->ns_waitq,
497                                   ns->ns_refcount == 0, &lwi);
498                 if (ns->ns_refcount)
499                         LCONSOLE_ERROR_MSG(0x139, "Lock manager: wait for %s "
500                                            "namespace cleanup aborted with %d "
501                                            "resources in use. (%d)\nI'm going "
502                                            "to try to clean up anyway, but I "
503                                            "might need a reboot of this node.\n",
504                                             ns->ns_name, (int) ns->ns_refcount, 
505                                             rc);
506                 CDEBUG(D_DLMTRACE,
507                        "dlm namespace %s free done waiting\n", ns->ns_name);
508         }
509
510         RETURN(ELDLM_OK);
511 }
512
513 int ldlm_namespace_free_post(struct ldlm_namespace *ns, int force)
514 {
515         ENTRY;
516         if (!ns)
517                 RETURN(ELDLM_OK);
518
519 #ifdef LPROCFS
520         {
521                 struct proc_dir_entry *dir;
522                 dir = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
523                 if (dir == NULL) {
524                         CERROR("dlm namespace %s has no procfs dir?\n",
525                                ns->ns_name);
526                 } else {
527                         lprocfs_remove(&dir);
528                 }
529         }
530 #endif
531
532         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
533         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
534         OBD_FREE_PTR(ns);
535         ldlm_put_ref(force);
536         RETURN(ELDLM_OK);
537 }
538
539
540 /* Cleanup the resource, and free namespace.
541  * bug 12864:
542  * Deadlock issue:
543  * proc1: destroy import
544  *        class_disconnect_export(grab cl_sem) ->
545  *              -> ldlm_namespace_free ->
546  *              -> lprocfs_remove(grab _lprocfs_lock).
547  * proc2: read proc info
548  *        lprocfs_fops_read(grab _lprocfs_lock) ->
549  *              -> osc_rd_active, etc(grab cl_sem).
550  *
551  * So that I have to split the ldlm_namespace_free into two parts - the first
552  * part ldlm_namespace_free_prior is used to cleanup the resource which is
553  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
554  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
555  * held.
556  */
557 int ldlm_namespace_free(struct ldlm_namespace *ns, int force)
558 {
559         ldlm_namespace_free_prior(ns);
560         ldlm_namespace_free_post(ns, force);
561         return ELDLM_OK;
562 }
563
564
565 void ldlm_namespace_get_nolock(struct ldlm_namespace *ns)
566 {
567         LASSERT(ns->ns_refcount >= 0);
568         ns->ns_refcount++;
569 }
570
571 void ldlm_namespace_get(struct ldlm_namespace *ns)
572 {
573         spin_lock(&ns->ns_hash_lock);
574         ldlm_namespace_get_nolock(ns);
575         spin_unlock(&ns->ns_hash_lock);
576 }
577
578 void ldlm_namespace_put_nolock(struct ldlm_namespace *ns, int wakeup)
579 {
580         LASSERT(ns->ns_refcount > 0);
581         ns->ns_refcount--;
582         if (ns->ns_refcount == 0 && wakeup)
583                 wake_up(&ns->ns_waitq);
584 }
585
586 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
587 {
588         spin_lock(&ns->ns_hash_lock);
589         ldlm_namespace_put_nolock(ns, wakeup);
590         spin_unlock(&ns->ns_hash_lock);
591 }
592
593 /* Should be called under ldlm_namespace_lock(client) taken */
594 void ldlm_namespace_move(struct ldlm_namespace *ns, ldlm_side_t client)
595 {
596         LASSERT(!list_empty(&ns->ns_list_chain));
597         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
598         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
599 }
600
601 /* Should be called under ldlm_namespace_lock(client) taken */
602 struct ldlm_namespace *ldlm_namespace_first(ldlm_side_t client)
603 {
604         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
605         LASSERT(!list_empty(ldlm_namespace_list(client)));
606         return container_of(ldlm_namespace_list(client)->next, 
607                 struct ldlm_namespace, ns_list_chain);
608 }
609 static __u32 ldlm_hash_fn(struct ldlm_resource *parent,
610                           const struct ldlm_res_id *name)
611 {
612         __u32 hash = 0;
613         int i;
614
615         for (i = 0; i < RES_NAME_SIZE; i++)
616                 hash += name->name[i];
617
618         hash += (__u32)((unsigned long)parent >> 4);
619
620         return (hash & RES_HASH_MASK);
621 }
622
623 static struct ldlm_resource *ldlm_resource_new(void)
624 {
625         struct ldlm_resource *res;
626         int idx;
627
628         OBD_SLAB_ALLOC(res, ldlm_resource_slab, CFS_ALLOC_IO, sizeof *res);
629         if (res == NULL)
630                 return NULL;
631
632         memset(res, 0, sizeof(*res));
633
634         CFS_INIT_LIST_HEAD(&res->lr_children);
635         CFS_INIT_LIST_HEAD(&res->lr_childof);
636         CFS_INIT_LIST_HEAD(&res->lr_granted);
637         CFS_INIT_LIST_HEAD(&res->lr_converting);
638         CFS_INIT_LIST_HEAD(&res->lr_waiting);
639
640         /* initialize interval trees for each lock mode*/
641         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
642                 res->lr_itree[idx].lit_size = 0;
643                 res->lr_itree[idx].lit_mode = 1 << idx;
644                 res->lr_itree[idx].lit_root = NULL;
645         }
646
647         atomic_set(&res->lr_refcount, 1);
648         spin_lock_init(&res->lr_lock);
649
650         /* one who creates the resource must unlock
651          * the semaphore after lvb initialization */
652         init_MUTEX_LOCKED(&res->lr_lvb_sem);
653
654         return res;
655 }
656
657 /* must be called with hash lock held */
658 static struct ldlm_resource *
659 ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
660                    __u32 hash)
661 {
662         struct list_head *bucket, *tmp;
663         struct ldlm_resource *res;
664
665         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
666         bucket = ns->ns_hash + hash;
667
668         list_for_each(tmp, bucket) {
669                 res = list_entry(tmp, struct ldlm_resource, lr_hash);
670                 if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
671                         return res;
672         }
673
674         return NULL;
675 }
676
677 /* Args: locked namespace
678  * Returns: newly-allocated, referenced, unlocked resource */
679 static struct ldlm_resource *
680 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
681                   const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
682 {
683         struct list_head *bucket;
684         struct ldlm_resource *res, *old_res;
685         ENTRY;
686
687         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
688                  "type: %d\n", type);
689
690         res = ldlm_resource_new();
691         if (!res)
692                 RETURN(NULL);
693
694         res->lr_name = *name;
695         res->lr_namespace = ns;
696         res->lr_type = type;
697         res->lr_most_restr = LCK_NL;
698
699         spin_lock(&ns->ns_hash_lock);
700         old_res = ldlm_resource_find(ns, name, hash);
701         if (old_res) {
702                 /* someone won the race and added the resource before */
703                 ldlm_resource_getref(old_res);
704                 spin_unlock(&ns->ns_hash_lock);
705                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
706                 /* synchronize WRT resource creation */
707                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
708                         down(&old_res->lr_lvb_sem);
709                         up(&old_res->lr_lvb_sem);
710                 }
711                 RETURN(old_res);
712         }
713
714         /* we won! let's add the resource */
715         bucket = ns->ns_hash + hash;
716         list_add(&res->lr_hash, bucket);
717         ns->ns_resources++;
718         ldlm_namespace_get_nolock(ns);
719
720         if (parent == NULL) {
721                 list_add(&res->lr_childof, &ns->ns_root_list);
722         } else {
723                 res->lr_parent = parent;
724                 list_add(&res->lr_childof, &parent->lr_children);
725         }
726         spin_unlock(&ns->ns_hash_lock);
727
728         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
729                 int rc;
730
731                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
732                 rc = ns->ns_lvbo->lvbo_init(res);
733                 if (rc)
734                         CERROR("lvbo_init failed for resource "
735                                LPU64": rc %d\n", name->name[0], rc);
736                 /* we create resource with locked lr_lvb_sem */
737                 up(&res->lr_lvb_sem);
738         }
739
740         RETURN(res);
741 }
742
743 /* Args: unlocked namespace
744  * Locks: takes and releases ns->ns_lock and res->lr_lock
745  * Returns: referenced, unlocked ldlm_resource or NULL */
746 struct ldlm_resource *
747 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
748                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
749 {
750         __u32 hash = ldlm_hash_fn(parent, name);
751         struct ldlm_resource *res = NULL;
752         ENTRY;
753
754         LASSERT(ns != NULL);
755         LASSERT(ns->ns_hash != NULL);
756         LASSERT(name->name[0] != 0);
757
758         spin_lock(&ns->ns_hash_lock);
759         res = ldlm_resource_find(ns, name, hash);
760         if (res) {
761                 ldlm_resource_getref(res);
762                 spin_unlock(&ns->ns_hash_lock);
763                 /* synchronize WRT resource creation */
764                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
765                         down(&res->lr_lvb_sem);
766                         up(&res->lr_lvb_sem);
767                 }
768                 RETURN(res);
769         }
770         spin_unlock(&ns->ns_hash_lock);
771
772         if (create == 0)
773                 RETURN(NULL);
774
775         res = ldlm_resource_add(ns, parent, name, hash, type);
776         RETURN(res);
777 }
778
779 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
780 {
781         LASSERT(res != NULL);
782         LASSERT(res != LP_POISON);
783         atomic_inc(&res->lr_refcount);
784         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
785                atomic_read(&res->lr_refcount));
786         return res;
787 }
788
789 void __ldlm_resource_putref_final(struct ldlm_resource *res)
790 {
791         struct ldlm_namespace *ns = res->lr_namespace;
792
793         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
794
795         if (!list_empty(&res->lr_granted)) {
796                 ldlm_resource_dump(D_ERROR, res);
797                 LBUG();
798         }
799
800         if (!list_empty(&res->lr_converting)) {
801                 ldlm_resource_dump(D_ERROR, res);
802                 LBUG();
803         }
804
805         if (!list_empty(&res->lr_waiting)) {
806                 ldlm_resource_dump(D_ERROR, res);
807                 LBUG();
808         }
809
810         if (!list_empty(&res->lr_children)) {
811                 ldlm_resource_dump(D_ERROR, res);
812                 LBUG();
813         }
814
815         /* Pass 0 here to not wake ->ns_waitq up yet, we will do it few 
816          * lines below when all children are freed. */
817         ldlm_namespace_put_nolock(ns, 0);
818         list_del_init(&res->lr_hash);
819         list_del_init(&res->lr_childof);
820
821         ns->ns_resources--;
822         if (ns->ns_resources == 0)
823                 wake_up(&ns->ns_waitq);
824 }
825
826 /* Returns 1 if the resource was freed, 0 if it remains. */
827 int ldlm_resource_putref(struct ldlm_resource *res)
828 {
829         struct ldlm_namespace *ns = res->lr_namespace;
830         int rc = 0;
831         ENTRY;
832
833         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
834                atomic_read(&res->lr_refcount) - 1);
835         LASSERTF(atomic_read(&res->lr_refcount) > 0, "%d",
836                  atomic_read(&res->lr_refcount));
837         LASSERTF(atomic_read(&res->lr_refcount) < LI_POISON, "%d",
838                  atomic_read(&res->lr_refcount));
839
840         if (atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
841                 __ldlm_resource_putref_final(res);
842                 spin_unlock(&ns->ns_hash_lock);
843                 if (res->lr_lvb_data)
844                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
845                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
846                 rc = 1;
847         }
848
849         RETURN(rc);
850 }
851
852 /* Returns 1 if the resource was freed, 0 if it remains. */
853 int ldlm_resource_putref_locked(struct ldlm_resource *res)
854 {
855         int rc = 0;
856         ENTRY;
857
858         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
859                atomic_read(&res->lr_refcount) - 1);
860         LASSERT(atomic_read(&res->lr_refcount) > 0);
861         LASSERT(atomic_read(&res->lr_refcount) < LI_POISON);
862
863         LASSERT(atomic_read(&res->lr_refcount) >= 0);
864         if (atomic_dec_and_test(&res->lr_refcount)) {
865                 __ldlm_resource_putref_final(res);
866                 if (res->lr_lvb_data)
867                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
868                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
869                 rc = 1;
870         }
871
872         RETURN(rc);
873 }
874
875 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
876                             struct ldlm_lock *lock)
877 {
878         check_res_locked(res);
879
880         ldlm_resource_dump(D_OTHER, res);
881         CDEBUG(D_OTHER, "About to add this lock:\n");
882         ldlm_lock_dump(D_OTHER, lock, 0);
883
884         if (lock->l_destroyed) {
885                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
886                 return;
887         }
888
889         LASSERT(list_empty(&lock->l_res_link));
890
891         list_add_tail(&lock->l_res_link, head);
892 }
893
894 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
895                                      struct ldlm_lock *new)
896 {
897         struct ldlm_resource *res = original->l_resource;
898
899         check_res_locked(res);
900
901         ldlm_resource_dump(D_OTHER, res);
902         CDEBUG(D_OTHER, "About to insert this lock after %p:\n", original);
903         ldlm_lock_dump(D_OTHER, new, 0);
904
905         if (new->l_destroyed) {
906                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
907                 goto out;
908         }
909
910         LASSERT(list_empty(&new->l_res_link));
911
912         list_add(&new->l_res_link, &original->l_res_link);
913  out:;
914 }
915
916 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
917 {
918         int type = lock->l_resource->lr_type;
919
920         check_res_locked(lock->l_resource);
921         if (type == LDLM_IBITS || type == LDLM_PLAIN)
922                 ldlm_unlink_lock_skiplist(lock);
923         else if (type == LDLM_EXTENT)
924                 ldlm_extent_unlink_lock(lock);
925         list_del_init(&lock->l_res_link);
926 }
927
928 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
929 {
930         desc->lr_type = res->lr_type;
931         desc->lr_name = res->lr_name;
932 }
933
934 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
935 {
936         struct list_head *tmp;
937
938         if (!((libcfs_debug | D_ERROR) & level))
939                 return;
940
941         mutex_down(ldlm_namespace_lock(client));
942
943         list_for_each(tmp, ldlm_namespace_list(client)) {
944                 struct ldlm_namespace *ns;
945                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
946                 ldlm_namespace_dump(level, ns);
947         }
948
949         mutex_up(ldlm_namespace_lock(client));
950 }
951
952 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
953 {
954         struct list_head *tmp;
955
956         if (!((libcfs_debug | D_ERROR) & level))
957                 return;
958
959         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n", 
960                ns->ns_name, ns->ns_refcount, 
961                ns_is_client(ns) ? "client" : "server");
962
963         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
964                 return;
965
966         spin_lock(&ns->ns_hash_lock);
967         tmp = ns->ns_root_list.next;
968         while (tmp != &ns->ns_root_list) {
969                 struct ldlm_resource *res;
970                 res = list_entry(tmp, struct ldlm_resource, lr_childof);
971
972                 ldlm_resource_getref(res);
973                 spin_unlock(&ns->ns_hash_lock);
974
975                 lock_res(res);
976                 ldlm_resource_dump(level, res);
977                 unlock_res(res);
978
979                 spin_lock(&ns->ns_hash_lock);
980                 tmp = tmp->next;
981                 ldlm_resource_putref_locked(res);
982         }
983         ns->ns_next_dump = cfs_time_shift(10);
984         spin_unlock(&ns->ns_hash_lock);
985 }
986
987 void ldlm_resource_dump(int level, struct ldlm_resource *res)
988 {
989         struct list_head *tmp;
990         int pos;
991
992         CLASSERT(RES_NAME_SIZE == 4);
993
994         if (!((libcfs_debug | D_ERROR) & level))
995                 return;
996
997         CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
998                ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
999                res->lr_name.name[2], res->lr_name.name[3],
1000                atomic_read(&res->lr_refcount));
1001
1002         if (!list_empty(&res->lr_granted)) {
1003                 pos = 0;
1004                 CDEBUG(level, "Granted locks:\n");
1005                 list_for_each(tmp, &res->lr_granted) {
1006                         struct ldlm_lock *lock;
1007                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1008                         ldlm_lock_dump(level, lock, ++pos);
1009                 }
1010         }
1011         if (!list_empty(&res->lr_converting)) {
1012                 pos = 0;
1013                 CDEBUG(level, "Converting locks:\n");
1014                 list_for_each(tmp, &res->lr_converting) {
1015                         struct ldlm_lock *lock;
1016                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1017                         ldlm_lock_dump(level, lock, ++pos);
1018                 }
1019         }
1020         if (!list_empty(&res->lr_waiting)) {
1021                 pos = 0;
1022                 CDEBUG(level, "Waiting locks:\n");
1023                 list_for_each(tmp, &res->lr_waiting) {
1024                         struct ldlm_lock *lock;
1025                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1026                         ldlm_lock_dump(level, lock, ++pos);
1027                 }
1028         }
1029 }