Whamcloud - gitweb
b=16098
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see [sun.com URL with a
20  * copy of GPLv2].
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #ifdef __KERNEL__
44 # include <lustre_dlm.h>
45 #else
46 # include <liblustre.h>
47 #endif
48
49 #include <obd_class.h>
50 #include "ldlm_internal.h"
51
52 cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
53
54 atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
55 atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
56
57 struct semaphore ldlm_srv_namespace_lock;
58 CFS_LIST_HEAD(ldlm_srv_namespace_list);
59
60 struct semaphore ldlm_cli_namespace_lock;
61 CFS_LIST_HEAD(ldlm_cli_namespace_list);
62
63 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
64 cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
65 cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
66
67 #ifdef LPROCFS
68 static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
69                              unsigned long count, void *data)
70 {
71         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
72         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
73         RETURN(count);
74 }
75
76 int ldlm_proc_setup(void)
77 {
78         int rc;
79         struct lprocfs_vars list[] = {
80                 { "dump_namespaces", NULL, ldlm_proc_dump_ns, NULL },
81                 { NULL }};
82         ENTRY;
83         LASSERT(ldlm_ns_proc_dir == NULL);
84
85         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
86                                               proc_lustre_root,
87                                               NULL, NULL);
88         if (IS_ERR(ldlm_type_proc_dir)) {
89                 CERROR("LProcFS failed in ldlm-init\n");
90                 rc = PTR_ERR(ldlm_type_proc_dir);
91                 GOTO(err, rc);
92         }
93
94         ldlm_ns_proc_dir = lprocfs_register("namespaces",
95                                             ldlm_type_proc_dir,
96                                             NULL, NULL);
97         if (IS_ERR(ldlm_ns_proc_dir)) {
98                 CERROR("LProcFS failed in ldlm-init\n");
99                 rc = PTR_ERR(ldlm_ns_proc_dir);
100                 GOTO(err_type, rc);
101         }
102
103         ldlm_svc_proc_dir = lprocfs_register("services",
104                                             ldlm_type_proc_dir,
105                                             NULL, NULL);
106         if (IS_ERR(ldlm_svc_proc_dir)) {
107                 CERROR("LProcFS failed in ldlm-init\n");
108                 rc = PTR_ERR(ldlm_svc_proc_dir);
109                 GOTO(err_ns, rc);
110         }
111
112         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
113
114         RETURN(0);
115
116 err_ns:
117         lprocfs_remove(&ldlm_ns_proc_dir);
118 err_type:
119         lprocfs_remove(&ldlm_type_proc_dir);
120 err:
121         ldlm_svc_proc_dir = NULL;
122         RETURN(rc);
123 }
124
125 void ldlm_proc_cleanup(void)
126 {
127         if (ldlm_svc_proc_dir)
128                 lprocfs_remove(&ldlm_svc_proc_dir);
129
130         if (ldlm_ns_proc_dir)
131                 lprocfs_remove(&ldlm_ns_proc_dir);
132
133         if (ldlm_type_proc_dir)
134                 lprocfs_remove(&ldlm_type_proc_dir);
135 }
136
137 static int lprocfs_rd_lru_size(char *page, char **start, off_t off,
138                                int count, int *eof, void *data)
139 {
140         struct ldlm_namespace *ns = data;
141         __u32 *nr = &ns->ns_max_unused;
142
143         if (ns_connect_lru_resize(ns))
144                 nr = &ns->ns_nr_unused;
145         return lprocfs_rd_uint(page, start, off, count, eof, nr);
146 }
147
148 static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
149                                unsigned long count, void *data)
150 {
151         struct ldlm_namespace *ns = data;
152         char dummy[MAX_STRING_SIZE + 1], *end;
153         unsigned long tmp;
154         int lru_resize;
155
156         dummy[MAX_STRING_SIZE] = '\0';
157         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
158                 return -EFAULT;
159
160         if (strncmp(dummy, "clear", 5) == 0) {
161                 CDEBUG(D_DLMTRACE,
162                        "dropping all unused locks from namespace %s\n",
163                        ns->ns_name);
164                 if (ns_connect_lru_resize(ns)) {
165                         int canceled, unused  = ns->ns_nr_unused;
166
167                         /* Try to cancel all @ns_nr_unused locks. */
168                         canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC,
169                                                    LDLM_CANCEL_PASSED);
170                         if (canceled < unused) {
171                                 CERROR("not all requested locks are canceled, "
172                                        "requested: %d, canceled: %d\n", unused,
173                                        canceled);
174                                 return -EINVAL;
175                         }
176                 } else {
177                         tmp = ns->ns_max_unused;
178                         ns->ns_max_unused = 0;
179                         ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
180                         ns->ns_max_unused = tmp;
181                 }
182                 return count;
183         }
184
185         tmp = simple_strtoul(dummy, &end, 0);
186         if (dummy == end) {
187                 CERROR("invalid value written\n");
188                 return -EINVAL;
189         }
190         lru_resize = (tmp == 0);
191
192         if (ns_connect_lru_resize(ns)) {
193                 if (!lru_resize)
194                         ns->ns_max_unused = (unsigned int)tmp;
195
196                 if (tmp > ns->ns_nr_unused)
197                         tmp = ns->ns_nr_unused;
198                 tmp = ns->ns_nr_unused - tmp;
199
200                 CDEBUG(D_DLMTRACE,
201                        "changing namespace %s unused locks from %u to %u\n",
202                        ns->ns_name, ns->ns_nr_unused, (unsigned int)tmp);
203                 ldlm_cancel_lru(ns, tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
204
205                 if (!lru_resize) {
206                         CDEBUG(D_DLMTRACE,
207                                "disable lru_resize for namespace %s\n",
208                                ns->ns_name);
209                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
210                 }
211         } else {
212                 CDEBUG(D_DLMTRACE,
213                        "changing namespace %s max_unused from %u to %u\n",
214                        ns->ns_name, ns->ns_max_unused, (unsigned int)tmp);
215                 ns->ns_max_unused = (unsigned int)tmp;
216                 ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
217
218                 /* Make sure that originally lru resize was supported before
219                  * turning it on here. */
220                 if (lru_resize &&
221                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
222                         CDEBUG(D_DLMTRACE,
223                                "enable lru_resize for namespace %s\n",
224                                ns->ns_name);
225                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
226                 }
227         }
228
229         return count;
230 }
231
232 void ldlm_proc_namespace(struct ldlm_namespace *ns)
233 {
234         struct lprocfs_vars lock_vars[2];
235         char lock_name[MAX_STRING_SIZE + 1];
236
237         LASSERT(ns != NULL);
238         LASSERT(ns->ns_name != NULL);
239
240         lock_name[MAX_STRING_SIZE] = '\0';
241
242         memset(lock_vars, 0, sizeof(lock_vars));
243         lock_vars[0].name = lock_name;
244
245         snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
246         lock_vars[0].data = &ns->ns_refcount;
247         lock_vars[0].read_fptr = lprocfs_rd_atomic;
248         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
249
250         snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
251         lock_vars[0].data = &ns->ns_locks;
252         lock_vars[0].read_fptr = lprocfs_rd_atomic;
253         lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
254
255         if (ns_is_client(ns)) {
256                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
257                          ns->ns_name);
258                 lock_vars[0].data = &ns->ns_nr_unused;
259                 lock_vars[0].read_fptr = lprocfs_rd_uint;
260                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
261
262                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_size",
263                          ns->ns_name);
264                 lock_vars[0].data = ns;
265                 lock_vars[0].read_fptr = lprocfs_rd_lru_size;
266                 lock_vars[0].write_fptr = lprocfs_wr_lru_size;
267                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
268
269                 snprintf(lock_name, MAX_STRING_SIZE, "%s/shrink_thumb",
270                          ns->ns_name);
271                 lock_vars[0].data = ns;
272                 lock_vars[0].read_fptr = lprocfs_rd_uint;
273                 lock_vars[0].write_fptr = lprocfs_wr_uint;
274                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
275
276                 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_max_age",
277                          ns->ns_name);
278                 lock_vars[0].data = &ns->ns_max_age;
279                 lock_vars[0].read_fptr = lprocfs_rd_uint;
280                 lock_vars[0].write_fptr = lprocfs_wr_uint;
281                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
282         } else {
283                 snprintf(lock_name, MAX_STRING_SIZE, "%s/ctime_age_limit",
284                          ns->ns_name);
285                 lock_vars[0].data = &ns->ns_ctime_age_limit;
286                 lock_vars[0].read_fptr = lprocfs_rd_uint;
287                 lock_vars[0].write_fptr = lprocfs_wr_uint;
288                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
289
290                 snprintf(lock_name, MAX_STRING_SIZE, "%s/max_nolock_bytes",
291                          ns->ns_name);
292                 lock_vars[0].data = &ns->ns_max_nolock_size;
293                 lock_vars[0].read_fptr = lprocfs_rd_uint;
294                 lock_vars[0].write_fptr = lprocfs_wr_uint;
295                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
296
297                 snprintf(lock_name, MAX_STRING_SIZE, "%s/contention_seconds",
298                          ns->ns_name);
299                 lock_vars[0].data = &ns->ns_contention_time;
300                 lock_vars[0].read_fptr = lprocfs_rd_uint;
301                 lock_vars[0].write_fptr = lprocfs_wr_uint;
302                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
303
304                 snprintf(lock_name, MAX_STRING_SIZE, "%s/contended_locks",
305                          ns->ns_name);
306                 lock_vars[0].data = &ns->ns_contended_locks;
307                 lock_vars[0].read_fptr = lprocfs_rd_uint;
308                 lock_vars[0].write_fptr = lprocfs_wr_uint;
309                 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
310         }
311 }
312 #undef MAX_STRING_SIZE
313 #else
314 #define ldlm_proc_namespace(ns) do {} while (0)
315 #endif /* LPROCFS */
316
317 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
318                                           ldlm_side_t client, ldlm_appetite_t apt)
319 {
320         struct ldlm_namespace *ns = NULL;
321         struct list_head *bucket;
322         int rc, idx, namelen;
323         ENTRY;
324
325         rc = ldlm_get_ref();
326         if (rc) {
327                 CERROR("ldlm_get_ref failed: %d\n", rc);
328                 RETURN(NULL);
329         }
330
331         OBD_ALLOC_PTR(ns);
332         if (!ns)
333                 GOTO(out_ref, NULL);
334
335         OBD_VMALLOC(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
336         if (!ns->ns_hash)
337                 GOTO(out_ns, NULL);
338
339         ns->ns_shrink_thumb = LDLM_LOCK_SHRINK_THUMB;
340         ns->ns_appetite = apt;
341
342         LASSERT(obd != NULL);
343         ns->ns_obd = obd;
344
345         namelen = strlen(name);
346         OBD_ALLOC(ns->ns_name, namelen + 1);
347         if (!ns->ns_name)
348                 GOTO(out_hash, NULL);
349
350         strcpy(ns->ns_name, name);
351
352         CFS_INIT_LIST_HEAD(&ns->ns_root_list);
353         CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
354         ns->ns_refcount = 0;
355         ns->ns_client = client;
356         spin_lock_init(&ns->ns_hash_lock);
357         atomic_set(&ns->ns_locks, 0);
358         ns->ns_resources = 0;
359         cfs_waitq_init(&ns->ns_waitq);
360         ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
361         ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
362         ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
363
364         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
365              bucket--)
366                 CFS_INIT_LIST_HEAD(bucket);
367
368         CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
369         ns->ns_nr_unused = 0;
370         ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
371         ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
372         ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
373         spin_lock_init(&ns->ns_unused_lock);
374         ns->ns_orig_connect_flags = 0;
375         ns->ns_connect_flags = 0;
376         ldlm_proc_namespace(ns);
377
378         idx = atomic_read(ldlm_namespace_nr(client));
379         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
380         if (rc) {
381                 CERROR("Can't initialize lock pool, rc %d\n", rc);
382                 GOTO(out_proc, rc);
383         }
384
385         at_init(&ns->ns_at_estimate, ldlm_enqueue_min, 0);
386
387         ldlm_namespace_register(ns, client);
388         RETURN(ns);
389 out_proc:
390         ldlm_namespace_cleanup(ns, 0);
391         OBD_FREE(ns->ns_name, namelen + 1);
392 out_hash:
393         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
394 out_ns:
395         OBD_FREE_PTR(ns);
396 out_ref:
397         ldlm_put_ref();
398         RETURN(NULL);
399 }
400
401 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
402
403 /* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup.
404  * This is currently only used for recovery, and we make certain assumptions
405  * as a result--notably, that we shouldn't cancel locks with refs. -phil
406  *
407  * Called with the ns_lock held. */
408 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
409                              int flags)
410 {
411         struct list_head *tmp;
412         int rc = 0, client = ns_is_client(res->lr_namespace);
413         int local_only = (flags & LDLM_FL_LOCAL_ONLY);
414         ENTRY;
415
416
417         do {
418                 struct ldlm_lock *lock = NULL;
419
420                 /* first, we look for non-cleaned-yet lock
421                  * all cleaned locks are marked by CLEANED flag */
422                 lock_res(res);
423                 list_for_each(tmp, q) {
424                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
425                         if (lock->l_flags & LDLM_FL_CLEANED) {
426                                 lock = NULL;
427                                 continue;
428                         }
429                         LDLM_LOCK_GET(lock);
430                         lock->l_flags |= LDLM_FL_CLEANED;
431                         break;
432                 }
433
434                 if (lock == NULL) {
435                         unlock_res(res);
436                         break;
437                 }
438
439                 /* Set CBPENDING so nothing in the cancellation path
440                  * can match this lock */
441                 lock->l_flags |= LDLM_FL_CBPENDING;
442                 lock->l_flags |= LDLM_FL_FAILED;
443                 lock->l_flags |= flags;
444
445                 /* ... without sending a CANCEL message for local_only. */
446                 if (local_only)
447                         lock->l_flags |= LDLM_FL_LOCAL_ONLY;
448
449                 if (local_only && (lock->l_readers || lock->l_writers)) {
450                         /* This is a little bit gross, but much better than the
451                          * alternative: pretend that we got a blocking AST from
452                          * the server, so that when the lock is decref'd, it
453                          * will go away ... */
454                         unlock_res(res);
455                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
456                         if (lock->l_completion_ast)
457                                 lock->l_completion_ast(lock, 0, NULL);
458                         LDLM_LOCK_PUT(lock);
459                         continue;
460                 }
461
462                 if (client) {
463                         struct lustre_handle lockh;
464
465                         unlock_res(res);
466                         ldlm_lock2handle(lock, &lockh);
467                         rc = ldlm_cli_cancel(&lockh);
468                         if (rc)
469                                 CERROR("ldlm_cli_cancel: %d\n", rc);
470                 } else {
471                         ldlm_resource_unlink_lock(lock);
472                         unlock_res(res);
473                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
474                                    "client node");
475                         ldlm_lock_destroy(lock);
476                 }
477                 LDLM_LOCK_PUT(lock);
478         } while (1);
479
480         EXIT;
481 }
482
483 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
484 {
485         struct list_head *tmp;
486         int i;
487
488         if (ns == NULL) {
489                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
490                 return ELDLM_OK;
491         }
492
493         for (i = 0; i < RES_HASH_SIZE; i++) {
494                 spin_lock(&ns->ns_hash_lock);
495                 tmp = ns->ns_hash[i].next;
496                 while (tmp != &(ns->ns_hash[i])) {
497                         struct ldlm_resource *res;
498                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
499                         ldlm_resource_getref(res);
500                         spin_unlock(&ns->ns_hash_lock);
501
502                         cleanup_resource(res, &res->lr_granted, flags);
503                         cleanup_resource(res, &res->lr_converting, flags);
504                         cleanup_resource(res, &res->lr_waiting, flags);
505
506                         spin_lock(&ns->ns_hash_lock);
507                         tmp  = tmp->next;
508
509                         /* XXX: former stuff caused issues in case of race
510                          * between ldlm_namespace_cleanup() and lockd() when
511                          * client gets blocking ast when lock gets distracted by
512                          * server. This is 1_4 branch solution, let's see how
513                          * will it behave. */
514                         if (!ldlm_resource_putref_locked(res))
515                                 CDEBUG(D_INFO,
516                                        "Namespace %s resource refcount nonzero "
517                                        "(%d) after lock cleanup; forcing cleanup.\n",
518                                        ns->ns_name, atomic_read(&res->lr_refcount));
519                 }
520                 spin_unlock(&ns->ns_hash_lock);
521         }
522
523         return ELDLM_OK;
524 }
525
526 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
527 {
528         ENTRY;
529
530         /* At shutdown time, don't call the cancellation callback */
531         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
532
533         if (ns->ns_refcount > 0) {
534                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
535                 int rc;
536                 CDEBUG(D_DLMTRACE,
537                        "dlm namespace %s free waiting on refcount %d\n",
538                        ns->ns_name, ns->ns_refcount);
539 force_wait:
540                 if (force)
541                         lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
542
543                 rc = l_wait_event(ns->ns_waitq,
544                                   ns->ns_refcount == 0, &lwi);
545
546                 /* Forced cleanups should be able to reclaim all references,
547                  * so it's safe to wait forever... we can't leak locks... */
548                 if (force && rc == -ETIMEDOUT) {
549                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
550                                        "namespace with %d resources in use, "
551                                        "(rc=%d)\n", ns->ns_name,
552                                        ns->ns_refcount, rc);
553                         GOTO(force_wait, rc);
554                 }
555
556                 if (ns->ns_refcount) {
557                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
558                                        "with %d resources in use, (rc=%d)\n",
559                                        ns->ns_name,
560                                        ns->ns_refcount, rc);
561                         RETURN(ELDLM_NAMESPACE_EXISTS);
562                 }
563                 CDEBUG(D_DLMTRACE,
564                        "dlm namespace %s free done waiting\n", ns->ns_name);
565         }
566
567         RETURN(ELDLM_OK);
568 }
569
570 /**
571  * Performs various cleanups for passed \a ns to make it drop refc and be ready
572  * for freeing. Waits for refc == 0.
573  *
574  * The following is done:
575  * (0) Unregister \a ns from its list to make inaccessible for potential users
576  * like pools thread and others;
577  * (1) Clear all locks in \a ns.
578  */
579 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
580                                struct obd_import *imp,
581                                int force)
582 {
583         int rc;
584         ENTRY;
585         if (!ns) {
586                 EXIT;
587                 return;
588         }
589
590         /*
591          * Make sure that nobody can find this ns in its list.
592          */
593         ldlm_namespace_unregister(ns, ns->ns_client);
594
595         /*
596          * Can fail with -EINTR when force == 0 in which case try harder.
597          */
598         rc = __ldlm_namespace_free(ns, force);
599         if (rc != ELDLM_OK) {
600                 if (imp) {
601                         ptlrpc_disconnect_import(imp, 0);
602                         ptlrpc_invalidate_import(imp);
603                 }
604
605                 /*
606                  * With all requests dropped and the import inactive
607                  * we are gaurenteed all reference will be dropped.
608                  */
609                 rc = __ldlm_namespace_free(ns, 1);
610                 LASSERT(rc == 0);
611         }
612         EXIT;
613 }
614
615 /**
616  * Performs freeing memory structures related to \a ns. This is only done when
617  * ldlm_namespce_free_prior() successfully removed all resources referencing
618  * \a ns and its refc == 0.
619  */
620 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
621 {
622         ENTRY;
623         if (!ns) {
624                 EXIT;
625                 return;
626         }
627
628         /*
629          * Fini pool _before_ parent proc dir is removed. This is important as
630          * ldlm_pool_fini() removes own proc dir which is child to @dir. Removing
631          * it after @dir may cause oops.
632          */
633         ldlm_pool_fini(&ns->ns_pool);
634
635 #ifdef LPROCFS
636         {
637                 struct proc_dir_entry *dir;
638                 dir = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
639                 if (dir == NULL) {
640                         CERROR("dlm namespace %s has no procfs dir?\n",
641                                ns->ns_name);
642                 } else {
643                         lprocfs_remove(&dir);
644                 }
645         }
646 #endif
647
648         OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
649         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
650
651         /*
652          * Namespace \a ns should be not on list in this time, otherwise this
653          * will cause issues realted to using freed \a ns in pools thread.
654          */
655         LASSERT(list_empty(&ns->ns_list_chain));
656         OBD_FREE_PTR(ns);
657         ldlm_put_ref();
658         EXIT;
659 }
660
661
662 /* Cleanup the resource, and free namespace.
663  * bug 12864:
664  * Deadlock issue:
665  * proc1: destroy import
666  *        class_disconnect_export(grab cl_sem) ->
667  *              -> ldlm_namespace_free ->
668  *              -> lprocfs_remove(grab _lprocfs_lock).
669  * proc2: read proc info
670  *        lprocfs_fops_read(grab _lprocfs_lock) ->
671  *              -> osc_rd_active, etc(grab cl_sem).
672  *
673  * So that I have to split the ldlm_namespace_free into two parts - the first
674  * part ldlm_namespace_free_prior is used to cleanup the resource which is
675  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
676  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
677  * held.
678  */
679 void ldlm_namespace_free(struct ldlm_namespace *ns,
680                          struct obd_import *imp,
681                          int force)
682 {
683         ldlm_namespace_free_prior(ns, imp, force);
684         ldlm_namespace_free_post(ns);
685 }
686
687
688 void ldlm_namespace_get_locked(struct ldlm_namespace *ns)
689 {
690         LASSERT(ns->ns_refcount >= 0);
691         ns->ns_refcount++;
692 }
693
694 void ldlm_namespace_get(struct ldlm_namespace *ns)
695 {
696         spin_lock(&ns->ns_hash_lock);
697         ldlm_namespace_get_locked(ns);
698         spin_unlock(&ns->ns_hash_lock);
699 }
700
701 void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup)
702 {
703         LASSERT(ns->ns_refcount > 0);
704         ns->ns_refcount--;
705         if (ns->ns_refcount == 0 && wakeup)
706                 wake_up(&ns->ns_waitq);
707 }
708
709 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
710 {
711         spin_lock(&ns->ns_hash_lock);
712         ldlm_namespace_put_locked(ns, wakeup);
713         spin_unlock(&ns->ns_hash_lock);
714 }
715
716 /* Register @ns in the list of namespaces */
717 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
718 {
719         mutex_down(ldlm_namespace_lock(client));
720         LASSERT(list_empty(&ns->ns_list_chain));
721         list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
722         atomic_inc(ldlm_namespace_nr(client));
723         mutex_up(ldlm_namespace_lock(client));
724 }
725
726 /* Unregister @ns from the list of namespaces */
727 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
728 {
729         mutex_down(ldlm_namespace_lock(client));
730         LASSERT(!list_empty(&ns->ns_list_chain));
731         /*
732          * Some asserts and possibly other parts of code still using
733          * list_empty(&ns->ns_list_chain). This is why it is important
734          * to use list_del_init() here.
735          */
736         list_del_init(&ns->ns_list_chain);
737         atomic_dec(ldlm_namespace_nr(client));
738         mutex_up(ldlm_namespace_lock(client));
739 }
740
741 /* Should be called under ldlm_namespace_lock(client) taken */
742 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
743 {
744         LASSERT(!list_empty(&ns->ns_list_chain));
745         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
746         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
747 }
748
749 /* Should be called under ldlm_namespace_lock(client) taken */
750 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
751 {
752         LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
753         LASSERT(!list_empty(ldlm_namespace_list(client)));
754         return container_of(ldlm_namespace_list(client)->next,
755                 struct ldlm_namespace, ns_list_chain);
756 }
757 static __u32 ldlm_hash_fn(struct ldlm_resource *parent,
758                           const struct ldlm_res_id *name)
759 {
760         __u32 hash = 0;
761         int i;
762
763         for (i = 0; i < RES_NAME_SIZE; i++)
764                 hash += name->name[i];
765
766         hash += (__u32)((unsigned long)parent >> 4);
767
768         return (hash & RES_HASH_MASK);
769 }
770
771 static struct ldlm_resource *ldlm_resource_new(void)
772 {
773         struct ldlm_resource *res;
774         int idx;
775
776         OBD_SLAB_ALLOC(res, ldlm_resource_slab, CFS_ALLOC_IO, sizeof *res);
777         if (res == NULL)
778                 return NULL;
779
780         memset(res, 0, sizeof(*res));
781
782         CFS_INIT_LIST_HEAD(&res->lr_children);
783         CFS_INIT_LIST_HEAD(&res->lr_childof);
784         CFS_INIT_LIST_HEAD(&res->lr_granted);
785         CFS_INIT_LIST_HEAD(&res->lr_converting);
786         CFS_INIT_LIST_HEAD(&res->lr_waiting);
787
788         /* initialize interval trees for each lock mode*/
789         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
790                 res->lr_itree[idx].lit_size = 0;
791                 res->lr_itree[idx].lit_mode = 1 << idx;
792                 res->lr_itree[idx].lit_root = NULL;
793         }
794
795         atomic_set(&res->lr_refcount, 1);
796         spin_lock_init(&res->lr_lock);
797
798         /* one who creates the resource must unlock
799          * the semaphore after lvb initialization */
800         init_MUTEX_LOCKED(&res->lr_lvb_sem);
801
802         return res;
803 }
804
805 /* must be called with hash lock held */
806 static struct ldlm_resource *
807 ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
808                    __u32 hash)
809 {
810         struct list_head *bucket, *tmp;
811         struct ldlm_resource *res;
812
813         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
814         bucket = ns->ns_hash + hash;
815
816         list_for_each(tmp, bucket) {
817                 res = list_entry(tmp, struct ldlm_resource, lr_hash);
818                 if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
819                         return res;
820         }
821
822         return NULL;
823 }
824
825 /* Args: locked namespace
826  * Returns: newly-allocated, referenced, unlocked resource */
827 static struct ldlm_resource *
828 ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
829                   const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
830 {
831         struct list_head *bucket;
832         struct ldlm_resource *res, *old_res;
833         ENTRY;
834
835         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
836                  "type: %d\n", type);
837
838         res = ldlm_resource_new();
839         if (!res)
840                 RETURN(NULL);
841
842         res->lr_name = *name;
843         res->lr_namespace = ns;
844         res->lr_type = type;
845         res->lr_most_restr = LCK_NL;
846
847         spin_lock(&ns->ns_hash_lock);
848         old_res = ldlm_resource_find(ns, name, hash);
849         if (old_res) {
850                 /* someone won the race and added the resource before */
851                 ldlm_resource_getref(old_res);
852                 spin_unlock(&ns->ns_hash_lock);
853                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
854                 /* synchronize WRT resource creation */
855                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
856                         down(&old_res->lr_lvb_sem);
857                         up(&old_res->lr_lvb_sem);
858                 }
859                 RETURN(old_res);
860         }
861
862         /* we won! let's add the resource */
863         bucket = ns->ns_hash + hash;
864         list_add(&res->lr_hash, bucket);
865         ns->ns_resources++;
866         ldlm_namespace_get_locked(ns);
867
868         if (parent == NULL) {
869                 list_add(&res->lr_childof, &ns->ns_root_list);
870         } else {
871                 res->lr_parent = parent;
872                 list_add(&res->lr_childof, &parent->lr_children);
873         }
874         spin_unlock(&ns->ns_hash_lock);
875
876         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
877                 int rc;
878
879                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
880                 rc = ns->ns_lvbo->lvbo_init(res);
881                 if (rc)
882                         CERROR("lvbo_init failed for resource "
883                                LPU64": rc %d\n", name->name[0], rc);
884                 /* we create resource with locked lr_lvb_sem */
885                 up(&res->lr_lvb_sem);
886         }
887
888         RETURN(res);
889 }
890
891 /* Args: unlocked namespace
892  * Locks: takes and releases ns->ns_lock and res->lr_lock
893  * Returns: referenced, unlocked ldlm_resource or NULL */
894 struct ldlm_resource *
895 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
896                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
897 {
898         __u32 hash = ldlm_hash_fn(parent, name);
899         struct ldlm_resource *res = NULL;
900         ENTRY;
901
902         LASSERT(ns != NULL);
903         LASSERT(ns->ns_hash != NULL);
904         LASSERT(name->name[0] != 0);
905
906         spin_lock(&ns->ns_hash_lock);
907         res = ldlm_resource_find(ns, name, hash);
908         if (res) {
909                 ldlm_resource_getref(res);
910                 spin_unlock(&ns->ns_hash_lock);
911                 /* synchronize WRT resource creation */
912                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
913                         down(&res->lr_lvb_sem);
914                         up(&res->lr_lvb_sem);
915                 }
916                 RETURN(res);
917         }
918         spin_unlock(&ns->ns_hash_lock);
919
920         if (create == 0)
921                 RETURN(NULL);
922
923         res = ldlm_resource_add(ns, parent, name, hash, type);
924         RETURN(res);
925 }
926
927 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
928 {
929         LASSERT(res != NULL);
930         LASSERT(res != LP_POISON);
931         atomic_inc(&res->lr_refcount);
932         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
933                atomic_read(&res->lr_refcount));
934         return res;
935 }
936
937 void __ldlm_resource_putref_final(struct ldlm_resource *res)
938 {
939         struct ldlm_namespace *ns = res->lr_namespace;
940
941         LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
942
943         if (!list_empty(&res->lr_granted)) {
944                 ldlm_resource_dump(D_ERROR, res);
945                 LBUG();
946         }
947
948         if (!list_empty(&res->lr_converting)) {
949                 ldlm_resource_dump(D_ERROR, res);
950                 LBUG();
951         }
952
953         if (!list_empty(&res->lr_waiting)) {
954                 ldlm_resource_dump(D_ERROR, res);
955                 LBUG();
956         }
957
958         if (!list_empty(&res->lr_children)) {
959                 ldlm_resource_dump(D_ERROR, res);
960                 LBUG();
961         }
962
963         /* Pass 0 here to not wake ->ns_waitq up yet, we will do it few
964          * lines below when all children are freed. */
965         ldlm_namespace_put_locked(ns, 0);
966         list_del_init(&res->lr_hash);
967         list_del_init(&res->lr_childof);
968
969         ns->ns_resources--;
970         if (ns->ns_resources == 0)
971                 wake_up(&ns->ns_waitq);
972 }
973
974 /* Returns 1 if the resource was freed, 0 if it remains. */
975 int ldlm_resource_putref(struct ldlm_resource *res)
976 {
977         struct ldlm_namespace *ns = res->lr_namespace;
978         int rc = 0;
979         ENTRY;
980
981         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
982                atomic_read(&res->lr_refcount) - 1);
983         LASSERTF(atomic_read(&res->lr_refcount) > 0, "%d",
984                  atomic_read(&res->lr_refcount));
985         LASSERTF(atomic_read(&res->lr_refcount) < LI_POISON, "%d",
986                  atomic_read(&res->lr_refcount));
987
988         if (atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
989                 __ldlm_resource_putref_final(res);
990                 spin_unlock(&ns->ns_hash_lock);
991                 if (res->lr_lvb_data)
992                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
993                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
994                 rc = 1;
995         }
996
997         RETURN(rc);
998 }
999
1000 /* Returns 1 if the resource was freed, 0 if it remains. */
1001 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1002 {
1003         int rc = 0;
1004         ENTRY;
1005
1006         CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
1007                atomic_read(&res->lr_refcount) - 1);
1008         LASSERT(atomic_read(&res->lr_refcount) > 0);
1009         LASSERT(atomic_read(&res->lr_refcount) < LI_POISON);
1010
1011         LASSERT(atomic_read(&res->lr_refcount) >= 0);
1012         if (atomic_dec_and_test(&res->lr_refcount)) {
1013                 __ldlm_resource_putref_final(res);
1014                 if (res->lr_lvb_data)
1015                         OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
1016                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1017                 rc = 1;
1018         }
1019
1020         RETURN(rc);
1021 }
1022
1023 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1024                             struct ldlm_lock *lock)
1025 {
1026         check_res_locked(res);
1027
1028         ldlm_resource_dump(D_INFO, res);
1029         CDEBUG(D_INFO, "About to add this lock:\n");
1030         ldlm_lock_dump(D_INFO, lock, 0);
1031
1032         if (lock->l_destroyed) {
1033                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1034                 return;
1035         }
1036
1037         LASSERT(list_empty(&lock->l_res_link));
1038
1039         list_add_tail(&lock->l_res_link, head);
1040 }
1041
1042 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1043                                      struct ldlm_lock *new)
1044 {
1045         struct ldlm_resource *res = original->l_resource;
1046
1047         check_res_locked(res);
1048
1049         ldlm_resource_dump(D_OTHER, res);
1050         CDEBUG(D_OTHER, "About to insert this lock after %p:\n", original);
1051         ldlm_lock_dump(D_OTHER, new, 0);
1052
1053         if (new->l_destroyed) {
1054                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1055                 goto out;
1056         }
1057
1058         LASSERT(list_empty(&new->l_res_link));
1059
1060         list_add(&new->l_res_link, &original->l_res_link);
1061  out:;
1062 }
1063
1064 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1065 {
1066         int type = lock->l_resource->lr_type;
1067
1068         check_res_locked(lock->l_resource);
1069         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1070                 ldlm_unlink_lock_skiplist(lock);
1071         else if (type == LDLM_EXTENT)
1072                 ldlm_extent_unlink_lock(lock);
1073         list_del_init(&lock->l_res_link);
1074 }
1075
1076 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1077 {
1078         desc->lr_type = res->lr_type;
1079         desc->lr_name = res->lr_name;
1080 }
1081
1082 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1083 {
1084         struct list_head *tmp;
1085
1086         if (!((libcfs_debug | D_ERROR) & level))
1087                 return;
1088
1089         mutex_down(ldlm_namespace_lock(client));
1090
1091         list_for_each(tmp, ldlm_namespace_list(client)) {
1092                 struct ldlm_namespace *ns;
1093                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1094                 ldlm_namespace_dump(level, ns);
1095         }
1096
1097         mutex_up(ldlm_namespace_lock(client));
1098 }
1099
1100 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1101 {
1102         struct list_head *tmp;
1103
1104         if (!((libcfs_debug | D_ERROR) & level))
1105                 return;
1106
1107         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1108                ns->ns_name, ns->ns_refcount,
1109                ns_is_client(ns) ? "client" : "server");
1110
1111         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1112                 return;
1113
1114         spin_lock(&ns->ns_hash_lock);
1115         tmp = ns->ns_root_list.next;
1116         while (tmp != &ns->ns_root_list) {
1117                 struct ldlm_resource *res;
1118                 res = list_entry(tmp, struct ldlm_resource, lr_childof);
1119
1120                 ldlm_resource_getref(res);
1121                 spin_unlock(&ns->ns_hash_lock);
1122
1123                 lock_res(res);
1124                 ldlm_resource_dump(level, res);
1125                 unlock_res(res);
1126
1127                 spin_lock(&ns->ns_hash_lock);
1128                 tmp = tmp->next;
1129                 ldlm_resource_putref_locked(res);
1130         }
1131         ns->ns_next_dump = cfs_time_shift(10);
1132         spin_unlock(&ns->ns_hash_lock);
1133 }
1134
1135 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1136 {
1137         struct list_head *tmp;
1138         int pos;
1139
1140         CLASSERT(RES_NAME_SIZE == 4);
1141
1142         if (!((libcfs_debug | D_ERROR) & level))
1143                 return;
1144
1145         CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
1146                ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
1147                res->lr_name.name[2], res->lr_name.name[3],
1148                atomic_read(&res->lr_refcount));
1149
1150         if (!list_empty(&res->lr_granted)) {
1151                 pos = 0;
1152                 CDEBUG(level, "Granted locks:\n");
1153                 list_for_each(tmp, &res->lr_granted) {
1154                         struct ldlm_lock *lock;
1155                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1156                         ldlm_lock_dump(level, lock, ++pos);
1157                 }
1158         }
1159         if (!list_empty(&res->lr_converting)) {
1160                 pos = 0;
1161                 CDEBUG(level, "Converting locks:\n");
1162                 list_for_each(tmp, &res->lr_converting) {
1163                         struct ldlm_lock *lock;
1164                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1165                         ldlm_lock_dump(level, lock, ++pos);
1166                 }
1167         }
1168         if (!list_empty(&res->lr_waiting)) {
1169                 pos = 0;
1170                 CDEBUG(level, "Waiting locks:\n");
1171                 list_for_each(tmp, &res->lr_waiting) {
1172                         struct ldlm_lock *lock;
1173                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1174                         ldlm_lock_dump(level, lock, ++pos);
1175                 }
1176         }
1177 }