Whamcloud - gitweb
LU-6304 ldlm: crash on umount in cleanup_resource
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include <lustre_dlm.h>
44 #include <lustre_fid.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49 struct kmem_cache *ldlm_interval_tree_slab;
50
51 int ldlm_srv_namespace_nr = 0;
52 int ldlm_cli_namespace_nr = 0;
53
54 struct mutex ldlm_srv_namespace_lock;
55 struct list_head ldlm_srv_namespace_list;
56
57 struct mutex ldlm_cli_namespace_lock;
58 /* Client Namespaces that have active resources in them.
59  * Once all resources go away, ldlm_poold moves such namespaces to the
60  * inactive list */
61 struct list_head ldlm_cli_active_namespace_list;
62 /* Client namespaces that don't have any locks in them */
63 struct list_head ldlm_cli_inactive_namespace_list;
64
65 static struct proc_dir_entry *ldlm_type_proc_dir;
66 static struct proc_dir_entry *ldlm_ns_proc_dir;
67 struct proc_dir_entry *ldlm_svc_proc_dir;
68
69 /* during debug dump certain amount of granted locks for one resource to avoid
70  * DDOS. */
71 static unsigned int ldlm_dump_granted_max = 256;
72
73 #ifdef CONFIG_PROC_FS
74 static ssize_t
75 lprocfs_dump_ns_seq_write(struct file *file, const char __user *buffer,
76                           size_t count, loff_t *off)
77 {
78         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
80         RETURN(count);
81 }
82 LPROC_SEQ_FOPS_WO_TYPE(ldlm, dump_ns);
83
84 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
85 LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
86
87 #ifdef HAVE_SERVER_SUPPORT
88
89 static int seq_watermark_show(struct seq_file *m, void *data)
90 {
91         return seq_printf(m, LPU64"\n", *(__u64 *)m->private);
92 }
93
94 static ssize_t seq_watermark_write(struct file *file,
95                                    const char __user *buffer, size_t count,
96                                    loff_t *off)
97 {
98         __u64 watermark;
99         __u64 *data = ((struct seq_file *)file->private_data)->private;
100         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
101         int rc;
102
103         rc = lprocfs_write_frac_u64_helper(buffer, count, &watermark, 1 << 20);
104         if (rc) {
105                 CERROR("Failed to set %s, rc = %d.\n",
106                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
107                        rc);
108                 return rc;
109         } else if (watermark != 0 && watermark < (1 << 20)) {
110                 CERROR("%s should be greater than 1MB.\n",
111                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
112                 return -EINVAL;
113         }
114         watermark >>= 20;
115
116         if (wm_low) {
117                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
118                         CERROR("lock_reclaim_threshold_mb must be smaller than "
119                                "lock_limit_mb.\n");
120                         return -EINVAL;
121                 }
122
123                 *data = watermark;
124                 if (watermark != 0) {
125                         watermark <<= 20;
126                         do_div(watermark, sizeof(struct ldlm_lock));
127                 }
128                 ldlm_reclaim_threshold = watermark;
129         } else {
130                 if (ldlm_reclaim_threshold_mb != 0 &&
131                     watermark < ldlm_reclaim_threshold_mb) {
132                         CERROR("lock_limit_mb must be greater than "
133                                "lock_reclaim_threshold_mb.\n");
134                         return -EINVAL;
135                 }
136
137                 *data = watermark;
138                 if (watermark != 0) {
139                         watermark <<= 20;
140                         do_div(watermark, sizeof(struct ldlm_lock));
141                 }
142                 ldlm_lock_limit = watermark;
143         }
144
145         return count;
146 }
147
148 static int seq_watermark_open(struct inode *inode, struct file *file)
149 {
150         return single_open(file, seq_watermark_show, PDE_DATA(inode));
151 }
152
153 static const struct file_operations ldlm_watermark_fops = {
154         .owner          = THIS_MODULE,
155         .open           = seq_watermark_open,
156         .read           = seq_read,
157         .write          = seq_watermark_write,
158         .llseek         = seq_lseek,
159         .release        = lprocfs_single_release,
160 };
161
162 static int seq_granted_show(struct seq_file *m, void *data)
163 {
164         return seq_printf(m, LPU64"\n", percpu_counter_sum_positive(
165                                 (struct percpu_counter *)m->private));
166 }
167
168 static int seq_granted_open(struct inode *inode, struct file *file)
169 {
170         return single_open(file, seq_granted_show, PDE_DATA(inode));
171 }
172
173 static const struct file_operations ldlm_granted_fops = {
174         .owner  = THIS_MODULE,
175         .open   = seq_granted_open,
176         .read   = seq_read,
177         .llseek = seq_lseek,
178         .release = seq_release,
179 };
180
181 #endif /* HAVE_SERVER_SUPPORT */
182
183 int ldlm_proc_setup(void)
184 {
185         int rc;
186         struct lprocfs_vars list[] = {
187                 { .name =       "dump_namespaces",
188                   .fops =       &ldlm_dump_ns_fops,
189                   .proc_mode =  0222 },
190                 { .name =       "dump_granted_max",
191                   .fops =       &ldlm_rw_uint_fops,
192                   .data =       &ldlm_dump_granted_max },
193                 { .name =       "cancel_unused_locks_before_replay",
194                   .fops =       &ldlm_rw_uint_fops,
195                   .data =       &ldlm_cancel_unused_locks_before_replay },
196 #ifdef HAVE_SERVER_SUPPORT
197                 { .name =       "lock_reclaim_threshold_mb",
198                   .fops =       &ldlm_watermark_fops,
199                   .data =       &ldlm_reclaim_threshold_mb },
200                 { .name =       "lock_limit_mb",
201                   .fops =       &ldlm_watermark_fops,
202                   .data =       &ldlm_lock_limit_mb },
203                 { .name =       "lock_granted_count",
204                   .fops =       &ldlm_granted_fops,
205                   .data =       &ldlm_granted_total },
206 #endif
207                 { NULL }};
208         ENTRY;
209         LASSERT(ldlm_ns_proc_dir == NULL);
210
211         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
212                                               proc_lustre_root,
213                                               NULL, NULL);
214         if (IS_ERR(ldlm_type_proc_dir)) {
215                 CERROR("LProcFS failed in ldlm-init\n");
216                 rc = PTR_ERR(ldlm_type_proc_dir);
217                 GOTO(err, rc);
218         }
219
220         ldlm_ns_proc_dir = lprocfs_register("namespaces",
221                                             ldlm_type_proc_dir,
222                                             NULL, NULL);
223         if (IS_ERR(ldlm_ns_proc_dir)) {
224                 CERROR("LProcFS failed in ldlm-init\n");
225                 rc = PTR_ERR(ldlm_ns_proc_dir);
226                 GOTO(err_type, rc);
227         }
228
229         ldlm_svc_proc_dir = lprocfs_register("services",
230                                              ldlm_type_proc_dir,
231                                              NULL, NULL);
232         if (IS_ERR(ldlm_svc_proc_dir)) {
233                 CERROR("LProcFS failed in ldlm-init\n");
234                 rc = PTR_ERR(ldlm_svc_proc_dir);
235                 GOTO(err_ns, rc);
236         }
237
238         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
239         if (rc != 0) {
240                 CERROR("LProcFS failed in ldlm-init\n");
241                 GOTO(err_svc, rc);
242         }
243
244         RETURN(0);
245
246 err_svc:
247         lprocfs_remove(&ldlm_svc_proc_dir);
248 err_ns:
249         lprocfs_remove(&ldlm_ns_proc_dir);
250 err_type:
251         lprocfs_remove(&ldlm_type_proc_dir);
252 err:
253         ldlm_svc_proc_dir = NULL;
254         RETURN(rc);
255 }
256
257 void ldlm_proc_cleanup(void)
258 {
259         if (ldlm_svc_proc_dir)
260                 lprocfs_remove(&ldlm_svc_proc_dir);
261
262         if (ldlm_ns_proc_dir)
263                 lprocfs_remove(&ldlm_ns_proc_dir);
264
265         if (ldlm_type_proc_dir)
266                 lprocfs_remove(&ldlm_type_proc_dir);
267 }
268
269 static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
270 {
271         struct ldlm_namespace   *ns  = m->private;
272         __u64                   res = 0;
273         struct cfs_hash_bd              bd;
274         int                     i;
275
276         /* result is not strictly consistant */
277         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
278                 res += cfs_hash_bd_count_get(&bd);
279         return lprocfs_u64_seq_show(m, &res);
280 }
281 LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
282
283 static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
284 {
285         struct ldlm_namespace   *ns = m->private;
286         __u64                   locks;
287
288         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
289                                         LPROCFS_FIELDS_FLAGS_SUM);
290         return lprocfs_u64_seq_show(m, &locks);
291 }
292 LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
293
294 static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
295 {
296         struct ldlm_namespace *ns = m->private;
297         __u32 *nr = &ns->ns_max_unused;
298
299         if (ns_connect_lru_resize(ns))
300                 nr = &ns->ns_nr_unused;
301         return lprocfs_uint_seq_show(m, nr);
302 }
303
304 static ssize_t lprocfs_lru_size_seq_write(struct file *file,
305                                           const char __user *buffer,
306                                           size_t count, loff_t *off)
307 {
308         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
309         char dummy[MAX_STRING_SIZE + 1], *end;
310         unsigned long tmp;
311         int lru_resize;
312
313         dummy[MAX_STRING_SIZE] = '\0';
314         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
315                 return -EFAULT;
316
317         if (strncmp(dummy, "clear", 5) == 0) {
318                 CDEBUG(D_DLMTRACE,
319                        "dropping all unused locks from namespace %s\n",
320                        ldlm_ns_name(ns));
321                 if (ns_connect_lru_resize(ns)) {
322                         int canceled, unused  = ns->ns_nr_unused;
323
324                         /* Try to cancel all @ns_nr_unused locks. */
325                         canceled = ldlm_cancel_lru(ns, unused, 0,
326                                                    LDLM_LRU_FLAG_PASSED);
327                         if (canceled < unused) {
328                                 CDEBUG(D_DLMTRACE,
329                                        "not all requested locks are canceled, "
330                                        "requested: %d, canceled: %d\n", unused,
331                                        canceled);
332                                 return -EINVAL;
333                         }
334                 } else {
335                         tmp = ns->ns_max_unused;
336                         ns->ns_max_unused = 0;
337                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
338                         ns->ns_max_unused = tmp;
339                 }
340                 return count;
341         }
342
343         tmp = simple_strtoul(dummy, &end, 0);
344         if (dummy == end) {
345                 CERROR("invalid value written\n");
346                 return -EINVAL;
347         }
348         lru_resize = (tmp == 0);
349
350         if (ns_connect_lru_resize(ns)) {
351                 if (!lru_resize)
352                         ns->ns_max_unused = tmp;
353
354                 if (tmp > ns->ns_nr_unused)
355                         tmp = ns->ns_nr_unused;
356                 tmp = ns->ns_nr_unused - tmp;
357
358                 CDEBUG(D_DLMTRACE,
359                        "changing namespace %s unused locks from %u to %u\n",
360                        ldlm_ns_name(ns), ns->ns_nr_unused,
361                        (unsigned int)tmp);
362                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
363
364                 if (!lru_resize) {
365                         CDEBUG(D_DLMTRACE,
366                                "disable lru_resize for namespace %s\n",
367                                ldlm_ns_name(ns));
368                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
369                 }
370         } else {
371                 CDEBUG(D_DLMTRACE,
372                        "changing namespace %s max_unused from %u to %u\n",
373                        ldlm_ns_name(ns), ns->ns_max_unused,
374                        (unsigned int)tmp);
375                 ns->ns_max_unused = (unsigned int)tmp;
376                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
377
378                 /* Make sure that LRU resize was originally supported before
379                  * turning it on here. */
380                 if (lru_resize &&
381                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
382                         CDEBUG(D_DLMTRACE,
383                                "enable lru_resize for namespace %s\n",
384                                ldlm_ns_name(ns));
385                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
386                 }
387         }
388
389         return count;
390 }
391 LPROC_SEQ_FOPS(lprocfs_lru_size);
392
393 static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
394 {
395         struct ldlm_namespace *ns = m->private;
396         unsigned int supp = ns_connect_cancelset(ns);
397
398         return lprocfs_uint_seq_show(m, &supp);
399 }
400
401 static ssize_t lprocfs_elc_seq_write(struct file *file,
402                                      const char __user *buffer,
403                                      size_t count, loff_t *off)
404 {
405         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
406         unsigned int supp = -1;
407         int rc;
408
409         rc = lprocfs_wr_uint(file, buffer, count, &supp);
410         if (rc < 0)
411                 return rc;
412
413         if (supp == 0)
414                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
415         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
416                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
417         return count;
418 }
419 LPROC_SEQ_FOPS(lprocfs_elc);
420
421 static void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
422 {
423         if (ns->ns_proc_dir_entry == NULL)
424                 CERROR("dlm namespace %s has no procfs dir?\n",
425                        ldlm_ns_name(ns));
426         else
427                 lprocfs_remove(&ns->ns_proc_dir_entry);
428
429         if (ns->ns_stats != NULL)
430                 lprocfs_free_stats(&ns->ns_stats);
431 }
432
433 static int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
434 {
435         struct lprocfs_vars lock_vars[2];
436         char lock_name[MAX_STRING_SIZE + 1];
437         struct proc_dir_entry *ns_pde;
438
439         LASSERT(ns != NULL);
440         LASSERT(ns->ns_rs_hash != NULL);
441
442         if (ns->ns_proc_dir_entry != NULL) {
443                 ns_pde = ns->ns_proc_dir_entry;
444         } else {
445                 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
446                 if (ns_pde == NULL)
447                         return -ENOMEM;
448                 ns->ns_proc_dir_entry = ns_pde;
449         }
450
451         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
452         if (ns->ns_stats == NULL)
453                 return -ENOMEM;
454
455         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
456                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
457
458         lock_name[MAX_STRING_SIZE] = '\0';
459
460         memset(lock_vars, 0, sizeof(lock_vars));
461         lock_vars[0].name = lock_name;
462
463         ldlm_add_var(&lock_vars[0], ns_pde, "resource_count", ns,
464                      &lprocfs_ns_resources_fops);
465         ldlm_add_var(&lock_vars[0], ns_pde, "lock_count", ns,
466                      &lprocfs_ns_locks_fops);
467
468         if (ns_is_client(ns)) {
469                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_unused_count",
470                              &ns->ns_nr_unused, &ldlm_uint_fops);
471                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_size", ns,
472                              &lprocfs_lru_size_fops);
473                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_max_age",
474                              &ns->ns_max_age, &ldlm_rw_uint_fops);
475                 ldlm_add_var(&lock_vars[0], ns_pde, "early_lock_cancel",
476                              ns, &lprocfs_elc_fops);
477         } else {
478                 ldlm_add_var(&lock_vars[0], ns_pde, "ctime_age_limit",
479                              &ns->ns_ctime_age_limit, &ldlm_rw_uint_fops);
480                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_timeouts",
481                              &ns->ns_timeouts, &ldlm_uint_fops);
482                 ldlm_add_var(&lock_vars[0], ns_pde, "max_nolock_bytes",
483                              &ns->ns_max_nolock_size, &ldlm_rw_uint_fops);
484                 ldlm_add_var(&lock_vars[0], ns_pde, "contention_seconds",
485                              &ns->ns_contention_time, &ldlm_rw_uint_fops);
486                 ldlm_add_var(&lock_vars[0], ns_pde, "contended_locks",
487                              &ns->ns_contended_locks, &ldlm_rw_uint_fops);
488                 ldlm_add_var(&lock_vars[0], ns_pde, "max_parallel_ast",
489                              &ns->ns_max_parallel_ast, &ldlm_rw_uint_fops);
490         }
491         return 0;
492 }
493 #undef MAX_STRING_SIZE
494 #else /* CONFIG_PROC_FS */
495
496 #define ldlm_namespace_proc_unregister(ns)      ({;})
497 #define ldlm_namespace_proc_register(ns)        ({0;})
498
499 #endif /* CONFIG_PROC_FS */
500
501 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
502                                   const void *key, unsigned mask)
503 {
504         const struct ldlm_res_id     *id  = key;
505         unsigned                val = 0;
506         unsigned                i;
507
508         for (i = 0; i < RES_NAME_SIZE; i++)
509                 val += id->name[i];
510         return val & mask;
511 }
512
513 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
514                                       const void *key, unsigned mask)
515 {
516         const struct ldlm_res_id *id = key;
517         struct lu_fid       fid;
518         __u32               hash;
519         __u32               val;
520
521         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
522         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
523         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
524
525         hash = fid_flatten32(&fid);
526         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
527         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
528                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
529                 hash += (val >> 5) + (val << 11);
530         } else {
531                 val = fid_oid(&fid);
532         }
533         hash = hash_long(hash, hs->hs_bkt_bits);
534         /* give me another random factor */
535         hash -= hash_long((unsigned long)hs, val % 11 + 3);
536
537         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
538         hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
539
540         return hash & mask;
541 }
542
543 static void *ldlm_res_hop_key(struct hlist_node *hnode)
544 {
545         struct ldlm_resource   *res;
546
547         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
548         return &res->lr_name;
549 }
550
551 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
552 {
553         struct ldlm_resource   *res;
554
555         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
556         return ldlm_res_eq((const struct ldlm_res_id *)key,
557                            (const struct ldlm_res_id *)&res->lr_name);
558 }
559
560 static void *ldlm_res_hop_object(struct hlist_node *hnode)
561 {
562         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
563 }
564
565 static void
566 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
567 {
568         struct ldlm_resource *res;
569
570         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
571         ldlm_resource_getref(res);
572 }
573
574 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
575 {
576         struct ldlm_resource *res;
577
578         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
579         ldlm_resource_putref(res);
580 }
581
582 static struct cfs_hash_ops ldlm_ns_hash_ops = {
583         .hs_hash        = ldlm_res_hop_hash,
584         .hs_key         = ldlm_res_hop_key,
585         .hs_keycmp      = ldlm_res_hop_keycmp,
586         .hs_keycpy      = NULL,
587         .hs_object      = ldlm_res_hop_object,
588         .hs_get         = ldlm_res_hop_get_locked,
589         .hs_put         = ldlm_res_hop_put
590 };
591
592 static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
593         .hs_hash        = ldlm_res_hop_fid_hash,
594         .hs_key         = ldlm_res_hop_key,
595         .hs_keycmp      = ldlm_res_hop_keycmp,
596         .hs_keycpy      = NULL,
597         .hs_object      = ldlm_res_hop_object,
598         .hs_get         = ldlm_res_hop_get_locked,
599         .hs_put         = ldlm_res_hop_put
600 };
601
602 typedef struct ldlm_ns_hash_def {
603         enum ldlm_ns_type       nsd_type;
604         /** hash bucket bits */
605         unsigned                nsd_bkt_bits;
606         /** hash bits */
607         unsigned                nsd_all_bits;
608         /** hash operations */
609         struct cfs_hash_ops *nsd_hops;
610 } ldlm_ns_hash_def_t;
611
612 static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] =
613 {
614         {
615                 .nsd_type       = LDLM_NS_TYPE_MDC,
616                 .nsd_bkt_bits   = 11,
617                 .nsd_all_bits   = 16,
618                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
619         },
620         {
621                 .nsd_type       = LDLM_NS_TYPE_MDT,
622                 .nsd_bkt_bits   = 14,
623                 .nsd_all_bits   = 21,
624                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
625         },
626         {
627                 .nsd_type       = LDLM_NS_TYPE_OSC,
628                 .nsd_bkt_bits   = 8,
629                 .nsd_all_bits   = 12,
630                 .nsd_hops       = &ldlm_ns_hash_ops,
631         },
632         {
633                 .nsd_type       = LDLM_NS_TYPE_OST,
634                 .nsd_bkt_bits   = 11,
635                 .nsd_all_bits   = 17,
636                 .nsd_hops       = &ldlm_ns_hash_ops,
637         },
638         {
639                 .nsd_type       = LDLM_NS_TYPE_MGC,
640                 .nsd_bkt_bits   = 4,
641                 .nsd_all_bits   = 4,
642                 .nsd_hops       = &ldlm_ns_hash_ops,
643         },
644         {
645                 .nsd_type       = LDLM_NS_TYPE_MGT,
646                 .nsd_bkt_bits   = 4,
647                 .nsd_all_bits   = 4,
648                 .nsd_hops       = &ldlm_ns_hash_ops,
649         },
650         {
651                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
652         },
653 };
654
655 /**
656  * Create and initialize new empty namespace.
657  */
658 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
659                                           enum ldlm_side client,
660                                           enum ldlm_appetite apt,
661                                           enum ldlm_ns_type ns_type)
662 {
663         struct ldlm_namespace *ns = NULL;
664         struct ldlm_ns_bucket *nsb;
665         struct ldlm_ns_hash_def *nsd;
666         struct cfs_hash_bd bd;
667         int idx;
668         int rc;
669         ENTRY;
670
671         LASSERT(obd != NULL);
672
673         rc = ldlm_get_ref();
674         if (rc) {
675                 CERROR("ldlm_get_ref failed: %d\n", rc);
676                 RETURN(NULL);
677         }
678
679         for (idx = 0;;idx++) {
680                 nsd = &ldlm_ns_hash_defs[idx];
681                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
682                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
683                         GOTO(out_ref, NULL);
684                 }
685
686                 if (nsd->nsd_type == ns_type)
687                         break;
688         }
689
690         OBD_ALLOC_PTR(ns);
691         if (!ns)
692                 GOTO(out_ref, NULL);
693
694         ns->ns_rs_hash = cfs_hash_create(name,
695                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
696                                          nsd->nsd_bkt_bits, sizeof(*nsb),
697                                          CFS_HASH_MIN_THETA,
698                                          CFS_HASH_MAX_THETA,
699                                          nsd->nsd_hops,
700                                          CFS_HASH_DEPTH |
701                                          CFS_HASH_BIGNAME |
702                                          CFS_HASH_SPIN_BKTLOCK |
703                                          CFS_HASH_NO_ITEMREF);
704         if (ns->ns_rs_hash == NULL)
705                 GOTO(out_ns, NULL);
706
707         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
708                 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
709                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
710                 nsb->nsb_namespace = ns;
711                 nsb->nsb_reclaim_start = 0;
712         }
713
714         ns->ns_obd      = obd;
715         ns->ns_appetite = apt;
716         ns->ns_client   = client;
717
718         INIT_LIST_HEAD(&ns->ns_list_chain);
719         INIT_LIST_HEAD(&ns->ns_unused_list);
720         spin_lock_init(&ns->ns_lock);
721         atomic_set(&ns->ns_bref, 0);
722         init_waitqueue_head(&ns->ns_waitq);
723
724         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
725         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
726         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
727
728         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
729         ns->ns_nr_unused          = 0;
730         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
731         ns->ns_max_age            = LDLM_DEFAULT_MAX_ALIVE;
732         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
733         ns->ns_timeouts           = 0;
734         ns->ns_orig_connect_flags = 0;
735         ns->ns_connect_flags      = 0;
736         ns->ns_stopping           = 0;
737         ns->ns_reclaim_start      = 0;
738         rc = ldlm_namespace_proc_register(ns);
739         if (rc != 0) {
740                 CERROR("Can't initialize ns proc, rc %d\n", rc);
741                 GOTO(out_hash, rc);
742         }
743
744         idx = ldlm_namespace_nr_read(client);
745         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
746         if (rc) {
747                 CERROR("Can't initialize lock pool, rc %d\n", rc);
748                 GOTO(out_proc, rc);
749         }
750
751         ldlm_namespace_register(ns, client);
752         RETURN(ns);
753 out_proc:
754         ldlm_namespace_proc_unregister(ns);
755         ldlm_namespace_cleanup(ns, 0);
756 out_hash:
757         cfs_hash_putref(ns->ns_rs_hash);
758 out_ns:
759         OBD_FREE_PTR(ns);
760 out_ref:
761         ldlm_put_ref();
762         RETURN(NULL);
763 }
764 EXPORT_SYMBOL(ldlm_namespace_new);
765
766 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
767
768 /**
769  * Cancel and destroy all locks on a resource.
770  *
771  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
772  * clean up.  This is currently only used for recovery, and we make
773  * certain assumptions as a result--notably, that we shouldn't cancel
774  * locks with refs.
775  */
776 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
777                              __u64 flags)
778 {
779         struct list_head *tmp;
780         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
781         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
782
783         do {
784                 struct ldlm_lock *lock = NULL;
785
786                 /* First, we look for non-cleaned-yet lock
787                  * all cleaned locks are marked by CLEANED flag. */
788                 lock_res(res);
789                 list_for_each(tmp, q) {
790                         lock = list_entry(tmp, struct ldlm_lock,
791                                           l_res_link);
792                         if (ldlm_is_cleaned(lock)) {
793                                 lock = NULL;
794                                 continue;
795                         }
796                         LDLM_LOCK_GET(lock);
797                         ldlm_set_cleaned(lock);
798                         break;
799                 }
800
801                 if (lock == NULL) {
802                         unlock_res(res);
803                         break;
804                 }
805
806                 /* Set CBPENDING so nothing in the cancellation path
807                  * can match this lock. */
808                 ldlm_set_cbpending(lock);
809                 ldlm_set_failed(lock);
810                 lock->l_flags |= flags;
811
812                 /* ... without sending a CANCEL message for local_only. */
813                 if (local_only)
814                         ldlm_set_local_only(lock);
815
816                 if (local_only && (lock->l_readers || lock->l_writers)) {
817                         /* This is a little bit gross, but much better than the
818                          * alternative: pretend that we got a blocking AST from
819                          * the server, so that when the lock is decref'd, it
820                          * will go away ... */
821                         unlock_res(res);
822                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
823                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
824                                 set_current_state(TASK_UNINTERRUPTIBLE);
825                                 schedule_timeout(cfs_time_seconds(4));
826                                 set_current_state(TASK_RUNNING);
827                         }
828                         if (lock->l_completion_ast)
829                                 lock->l_completion_ast(lock,
830                                                        LDLM_FL_FAILED, NULL);
831                         LDLM_LOCK_RELEASE(lock);
832                         continue;
833                 }
834
835                 if (client) {
836                         struct lustre_handle lockh;
837
838                         unlock_res(res);
839                         ldlm_lock2handle(lock, &lockh);
840                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
841                         if (rc)
842                                 CERROR("ldlm_cli_cancel: %d\n", rc);
843                 } else {
844                         unlock_res(res);
845                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
846                                    "client node");
847                         ldlm_lock_cancel(lock);
848                 }
849                 LDLM_LOCK_RELEASE(lock);
850         } while (1);
851 }
852
853 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
854                                struct hlist_node *hnode, void *arg)
855 {
856         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
857         __u64 flags = *(__u64 *)arg;
858
859         cleanup_resource(res, &res->lr_granted, flags);
860         cleanup_resource(res, &res->lr_converting, flags);
861         cleanup_resource(res, &res->lr_waiting, flags);
862
863         return 0;
864 }
865
866 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
867                                   struct hlist_node *hnode, void *arg)
868 {
869         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
870
871         lock_res(res);
872         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
873                "(%d) after lock cleanup; forcing cleanup.\n",
874                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
875                atomic_read(&res->lr_refcount) - 1);
876
877         ldlm_resource_dump(D_ERROR, res);
878         unlock_res(res);
879         return 0;
880 }
881
882 /**
883  * Cancel and destroy all locks in the namespace.
884  *
885  * Typically used during evictions when server notified client that it was
886  * evicted and all of its state needs to be destroyed.
887  * Also used during shutdown.
888  */
889 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
890 {
891         if (ns == NULL) {
892                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
893                 return ELDLM_OK;
894         }
895
896         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
897                                  &flags, 0);
898         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
899                                  NULL, 0);
900         return ELDLM_OK;
901 }
902 EXPORT_SYMBOL(ldlm_namespace_cleanup);
903
904 /**
905  * Attempts to free namespace.
906  *
907  * Only used when namespace goes away, like during an unmount.
908  */
909 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
910 {
911         ENTRY;
912
913         /* At shutdown time, don't call the cancellation callback */
914         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
915
916         if (atomic_read(&ns->ns_bref) > 0) {
917                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
918                 int rc;
919                 CDEBUG(D_DLMTRACE,
920                        "dlm namespace %s free waiting on refcount %d\n",
921                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
922 force_wait:
923                 if (force)
924                         lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
925                                           MSEC_PER_SEC) / 4, NULL, NULL);
926
927                 rc = l_wait_event(ns->ns_waitq,
928                                   atomic_read(&ns->ns_bref) == 0, &lwi);
929
930                 /* Forced cleanups should be able to reclaim all references,
931                  * so it's safe to wait forever... we can't leak locks... */
932                 if (force && rc == -ETIMEDOUT) {
933                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
934                                        "namespace with %d resources in use, "
935                                        "(rc=%d)\n", ldlm_ns_name(ns),
936                                        atomic_read(&ns->ns_bref), rc);
937                         GOTO(force_wait, rc);
938                 }
939
940                 if (atomic_read(&ns->ns_bref)) {
941                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
942                                        "with %d resources in use, (rc=%d)\n",
943                                        ldlm_ns_name(ns),
944                                        atomic_read(&ns->ns_bref), rc);
945                         RETURN(ELDLM_NAMESPACE_EXISTS);
946                 }
947                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
948                        ldlm_ns_name(ns));
949         }
950
951         RETURN(ELDLM_OK);
952 }
953
954 /**
955  * Performs various cleanups for passed \a ns to make it drop refc and be
956  * ready for freeing. Waits for refc == 0.
957  *
958  * The following is done:
959  * (0) Unregister \a ns from its list to make inaccessible for potential
960  * users like pools thread and others;
961  * (1) Clear all locks in \a ns.
962  */
963 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
964                                struct obd_import *imp,
965                                int force)
966 {
967         int rc;
968         ENTRY;
969         if (!ns) {
970                 EXIT;
971                 return;
972         }
973
974         spin_lock(&ns->ns_lock);
975         ns->ns_stopping = 1;
976         spin_unlock(&ns->ns_lock);
977
978         /*
979          * Can fail with -EINTR when force == 0 in which case try harder.
980          */
981         rc = __ldlm_namespace_free(ns, force);
982         if (rc != ELDLM_OK) {
983                 if (imp) {
984                         ptlrpc_disconnect_import(imp, 0);
985                         ptlrpc_invalidate_import(imp);
986                 }
987
988                 /*
989                  * With all requests dropped and the import inactive
990                  * we are gaurenteed all reference will be dropped.
991                  */
992                 rc = __ldlm_namespace_free(ns, 1);
993                 LASSERT(rc == 0);
994         }
995         EXIT;
996 }
997 EXPORT_SYMBOL(ldlm_namespace_free_prior);
998
999 /**
1000  * Performs freeing memory structures related to \a ns. This is only done
1001  * when ldlm_namespce_free_prior() successfully removed all resources
1002  * referencing \a ns and its refc == 0.
1003  */
1004 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1005 {
1006         ENTRY;
1007         if (!ns) {
1008                 EXIT;
1009                 return;
1010         }
1011
1012         /* Make sure that nobody can find this ns in its list. */
1013         ldlm_namespace_unregister(ns, ns->ns_client);
1014         /* Fini pool _before_ parent proc dir is removed. This is important as
1015          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1016          * Removing it after @dir may cause oops. */
1017         ldlm_pool_fini(&ns->ns_pool);
1018
1019         ldlm_namespace_proc_unregister(ns);
1020         cfs_hash_putref(ns->ns_rs_hash);
1021         /* Namespace \a ns should be not on list at this time, otherwise
1022          * this will cause issues related to using freed \a ns in poold
1023          * thread. */
1024         LASSERT(list_empty(&ns->ns_list_chain));
1025         OBD_FREE_PTR(ns);
1026         ldlm_put_ref();
1027         EXIT;
1028 }
1029 EXPORT_SYMBOL(ldlm_namespace_free_post);
1030
1031 /**
1032  * Cleanup the resource, and free namespace.
1033  * bug 12864:
1034  * Deadlock issue:
1035  * proc1: destroy import
1036  *        class_disconnect_export(grab cl_sem) ->
1037  *              -> ldlm_namespace_free ->
1038  *              -> lprocfs_remove(grab _lprocfs_lock).
1039  * proc2: read proc info
1040  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1041  *              -> osc_rd_active, etc(grab cl_sem).
1042  *
1043  * So that I have to split the ldlm_namespace_free into two parts - the first
1044  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1045  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1046  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1047  * held.
1048  */
1049 void ldlm_namespace_free(struct ldlm_namespace *ns,
1050                          struct obd_import *imp,
1051                          int force)
1052 {
1053         ldlm_namespace_free_prior(ns, imp, force);
1054         ldlm_namespace_free_post(ns);
1055 }
1056 EXPORT_SYMBOL(ldlm_namespace_free);
1057
1058 void ldlm_namespace_get(struct ldlm_namespace *ns)
1059 {
1060         atomic_inc(&ns->ns_bref);
1061 }
1062
1063 /* This is only for callers that care about refcount */
1064 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1065 {
1066         return atomic_inc_return(&ns->ns_bref);
1067 }
1068
1069 void ldlm_namespace_put(struct ldlm_namespace *ns)
1070 {
1071         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1072                 wake_up(&ns->ns_waitq);
1073                 spin_unlock(&ns->ns_lock);
1074         }
1075 }
1076
1077 /** Register \a ns in the list of namespaces */
1078 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1079 {
1080         mutex_lock(ldlm_namespace_lock(client));
1081         LASSERT(list_empty(&ns->ns_list_chain));
1082         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1083         ldlm_namespace_nr_inc(client);
1084         mutex_unlock(ldlm_namespace_lock(client));
1085 }
1086
1087 /** Unregister \a ns from the list of namespaces. */
1088 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1089 {
1090         mutex_lock(ldlm_namespace_lock(client));
1091         LASSERT(!list_empty(&ns->ns_list_chain));
1092         /* Some asserts and possibly other parts of the code are still
1093          * using list_empty(&ns->ns_list_chain). This is why it is
1094          * important to use list_del_init() here. */
1095         list_del_init(&ns->ns_list_chain);
1096         ldlm_namespace_nr_dec(client);
1097         mutex_unlock(ldlm_namespace_lock(client));
1098 }
1099
1100 /** Should be called with ldlm_namespace_lock(client) taken. */
1101 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1102                                           enum ldlm_side client)
1103 {
1104         LASSERT(!list_empty(&ns->ns_list_chain));
1105         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1106         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1107 }
1108
1109 /** Should be called with ldlm_namespace_lock(client) taken. */
1110 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1111                                             enum ldlm_side client)
1112 {
1113         LASSERT(!list_empty(&ns->ns_list_chain));
1114         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1115         list_move_tail(&ns->ns_list_chain,
1116                        ldlm_namespace_inactive_list(client));
1117 }
1118
1119 /** Should be called with ldlm_namespace_lock(client) taken. */
1120 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1121 {
1122         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1123         LASSERT(!list_empty(ldlm_namespace_list(client)));
1124         return container_of(ldlm_namespace_list(client)->next,
1125                             struct ldlm_namespace, ns_list_chain);
1126 }
1127
1128 /** Create and initialize new resource. */
1129 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1130 {
1131         struct ldlm_resource *res;
1132         int idx;
1133
1134         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1135         if (res == NULL)
1136                 return NULL;
1137
1138         if (ldlm_type == LDLM_EXTENT) {
1139                 OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1140                                sizeof(*res->lr_itree) * LCK_MODE_NUM);
1141                 if (res->lr_itree == NULL) {
1142                         OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1143                         return NULL;
1144                 }
1145                 /* Initialize interval trees for each lock mode. */
1146                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1147                         res->lr_itree[idx].lit_size = 0;
1148                         res->lr_itree[idx].lit_mode = 1 << idx;
1149                         res->lr_itree[idx].lit_root = NULL;
1150                 }
1151         }
1152
1153         INIT_LIST_HEAD(&res->lr_granted);
1154         INIT_LIST_HEAD(&res->lr_converting);
1155         INIT_LIST_HEAD(&res->lr_waiting);
1156
1157         atomic_set(&res->lr_refcount, 1);
1158         spin_lock_init(&res->lr_lock);
1159         lu_ref_init(&res->lr_reference);
1160
1161         /* Since LVB init can be delayed now, there is no longer need to
1162          * immediatelly acquire mutex here. */
1163         mutex_init(&res->lr_lvb_mutex);
1164         res->lr_lvb_initialized = false;
1165
1166         return res;
1167 }
1168
1169 /**
1170  * Return a reference to resource with given name, creating it if necessary.
1171  * Args: namespace with ns_lock unlocked
1172  * Locks: takes and releases NS hash-lock and res->lr_lock
1173  * Returns: referenced, unlocked ldlm_resource or NULL
1174  */
1175 struct ldlm_resource *
1176 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1177                   const struct ldlm_res_id *name, enum ldlm_type type,
1178                   int create)
1179 {
1180         struct hlist_node       *hnode;
1181         struct ldlm_resource    *res = NULL;
1182         struct cfs_hash_bd              bd;
1183         __u64                   version;
1184         int                     ns_refcount = 0;
1185
1186         LASSERT(ns != NULL);
1187         LASSERT(parent == NULL);
1188         LASSERT(ns->ns_rs_hash != NULL);
1189         LASSERT(name->name[0] != 0);
1190
1191         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1192         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1193         if (hnode != NULL) {
1194                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1195                 GOTO(found, res);
1196         }
1197
1198         version = cfs_hash_bd_version_get(&bd);
1199         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1200
1201         if (create == 0)
1202                 return ERR_PTR(-ENOENT);
1203
1204         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1205                  "type: %d\n", type);
1206         res = ldlm_resource_new(type);
1207         if (res == NULL)
1208                 return ERR_PTR(-ENOMEM);
1209
1210         res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1211         res->lr_name = *name;
1212         res->lr_type = type;
1213
1214         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1215         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1216                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1217
1218         if (hnode != NULL) {
1219                 /* Someone won the race and already added the resource. */
1220                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1221                 /* Clean lu_ref for failed resource. */
1222                 lu_ref_fini(&res->lr_reference);
1223                 if (res->lr_itree != NULL)
1224                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1225                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1226                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1227 found:
1228                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1229                 return res;
1230         }
1231         /* We won! Let's add the resource. */
1232         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1233         if (cfs_hash_bd_count_get(&bd) == 1)
1234                 ns_refcount = ldlm_namespace_get_return(ns);
1235
1236         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1237
1238         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1239
1240         /* Let's see if we happened to be the very first resource in this
1241          * namespace. If so, and this is a client namespace, we need to move
1242          * the namespace into the active namespaces list to be patrolled by
1243          * the ldlm_poold. */
1244         if (ns_is_client(ns) && ns_refcount == 1) {
1245                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1246                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1247                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1248         }
1249
1250         return res;
1251 }
1252 EXPORT_SYMBOL(ldlm_resource_get);
1253
1254 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1255 {
1256         LASSERT(res != NULL);
1257         LASSERT(res != LP_POISON);
1258         atomic_inc(&res->lr_refcount);
1259         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1260                atomic_read(&res->lr_refcount));
1261         return res;
1262 }
1263
1264 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1265                                          struct ldlm_resource *res)
1266 {
1267         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1268
1269         if (!list_empty(&res->lr_granted)) {
1270                 ldlm_resource_dump(D_ERROR, res);
1271                 LBUG();
1272         }
1273
1274         if (!list_empty(&res->lr_converting)) {
1275                 ldlm_resource_dump(D_ERROR, res);
1276                 LBUG();
1277         }
1278
1279         if (!list_empty(&res->lr_waiting)) {
1280                 ldlm_resource_dump(D_ERROR, res);
1281                 LBUG();
1282         }
1283
1284         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1285                                bd, &res->lr_hash);
1286         lu_ref_fini(&res->lr_reference);
1287         if (cfs_hash_bd_count_get(bd) == 0)
1288                 ldlm_namespace_put(nsb->nsb_namespace);
1289 }
1290
1291 /* Returns 1 if the resource was freed, 0 if it remains. */
1292 int ldlm_resource_putref(struct ldlm_resource *res)
1293 {
1294         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1295         struct cfs_hash_bd   bd;
1296
1297         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1298         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1299                res, atomic_read(&res->lr_refcount) - 1);
1300
1301         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1302         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1303                 __ldlm_resource_putref_final(&bd, res);
1304                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1305                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1306                         ns->ns_lvbo->lvbo_free(res);
1307                 if (res->lr_itree != NULL)
1308                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1309                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1310                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1311                 return 1;
1312         }
1313         return 0;
1314 }
1315 EXPORT_SYMBOL(ldlm_resource_putref);
1316
1317 /**
1318  * Add a lock into a given resource into specified lock list.
1319  */
1320 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1321                             struct ldlm_lock *lock)
1322 {
1323         check_res_locked(res);
1324
1325         LDLM_DEBUG(lock, "About to add this lock:\n");
1326
1327         if (ldlm_is_destroyed(lock)) {
1328                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1329                 return;
1330         }
1331
1332         LASSERT(list_empty(&lock->l_res_link));
1333
1334         list_add_tail(&lock->l_res_link, head);
1335 }
1336
1337 /**
1338  * Insert a lock into resource after specified lock.
1339  *
1340  * Obtain resource description from the lock we are inserting after.
1341  */
1342 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1343                                      struct ldlm_lock *new)
1344 {
1345         struct ldlm_resource *res = original->l_resource;
1346
1347         check_res_locked(res);
1348
1349         ldlm_resource_dump(D_INFO, res);
1350         LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1351
1352         if (ldlm_is_destroyed(new)) {
1353                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1354                 goto out;
1355         }
1356
1357         LASSERT(list_empty(&new->l_res_link));
1358
1359         list_add(&new->l_res_link, &original->l_res_link);
1360  out:;
1361 }
1362
1363 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1364 {
1365         int type = lock->l_resource->lr_type;
1366
1367         check_res_locked(lock->l_resource);
1368         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1369                 ldlm_unlink_lock_skiplist(lock);
1370         else if (type == LDLM_EXTENT)
1371                 ldlm_extent_unlink_lock(lock);
1372         list_del_init(&lock->l_res_link);
1373 }
1374 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1375
1376 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1377 {
1378         desc->lr_type = res->lr_type;
1379         desc->lr_name = res->lr_name;
1380 }
1381
1382 /**
1383  * Print information about all locks in all namespaces on this node to debug
1384  * log.
1385  */
1386 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1387 {
1388         struct list_head *tmp;
1389
1390         if (!((libcfs_debug | D_ERROR) & level))
1391                 return;
1392
1393         mutex_lock(ldlm_namespace_lock(client));
1394
1395         list_for_each(tmp, ldlm_namespace_list(client)) {
1396                 struct ldlm_namespace *ns;
1397
1398                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1399                 ldlm_namespace_dump(level, ns);
1400         }
1401
1402         mutex_unlock(ldlm_namespace_lock(client));
1403 }
1404
1405 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1406                               struct hlist_node *hnode, void *arg)
1407 {
1408         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1409         int    level = (int)(unsigned long)arg;
1410
1411         lock_res(res);
1412         ldlm_resource_dump(level, res);
1413         unlock_res(res);
1414
1415         return 0;
1416 }
1417
1418 /**
1419  * Print information about all locks in this namespace on this node to debug
1420  * log.
1421  */
1422 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1423 {
1424         if (!((libcfs_debug | D_ERROR) & level))
1425                 return;
1426
1427         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1428                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1429                ns_is_client(ns) ? "client" : "server");
1430
1431         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1432                 return;
1433
1434         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1435                                  ldlm_res_hash_dump,
1436                                  (void *)(unsigned long)level, 0);
1437         spin_lock(&ns->ns_lock);
1438         ns->ns_next_dump = cfs_time_shift(10);
1439         spin_unlock(&ns->ns_lock);
1440 }
1441
1442 /**
1443  * Print information about all locks in this resource to debug log.
1444  */
1445 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1446 {
1447         struct ldlm_lock *lock;
1448         unsigned int granted = 0;
1449
1450         CLASSERT(RES_NAME_SIZE == 4);
1451
1452         if (!((libcfs_debug | D_ERROR) & level))
1453                 return;
1454
1455         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1456                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1457
1458         if (!list_empty(&res->lr_granted)) {
1459                 CDEBUG(level, "Granted locks (in reverse order):\n");
1460                 list_for_each_entry_reverse(lock, &res->lr_granted,
1461                                                 l_res_link) {
1462                         LDLM_DEBUG_LIMIT(level, lock, "###");
1463                         if (!(level & D_CANTMASK) &&
1464                             ++granted > ldlm_dump_granted_max) {
1465                                 CDEBUG(level, "only dump %d granted locks to "
1466                                        "avoid DDOS.\n", granted);
1467                                 break;
1468                         }
1469                 }
1470         }
1471         if (!list_empty(&res->lr_converting)) {
1472                 CDEBUG(level, "Converting locks:\n");
1473                 list_for_each_entry(lock, &res->lr_converting, l_res_link)
1474                         LDLM_DEBUG_LIMIT(level, lock, "###");
1475         }
1476         if (!list_empty(&res->lr_waiting)) {
1477                 CDEBUG(level, "Waiting locks:\n");
1478                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1479                         LDLM_DEBUG_LIMIT(level, lock, "###");
1480         }
1481 }
1482 EXPORT_SYMBOL(ldlm_resource_dump);