Whamcloud - gitweb
LU-6142 lustre: unwrap some ldebugfs_register() calls
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_resource.c
33  *
34  * Author: Phil Schwan <phil@clusterfs.com>
35  * Author: Peter Braam <braam@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39 #include <lustre_dlm.h>
40 #include <lustre_fid.h>
41 #include <obd_class.h>
42 #include "ldlm_internal.h"
43
44 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
45 struct kmem_cache *ldlm_interval_tree_slab;
46 struct kmem_cache *ldlm_inodebits_slab;
47
48 int ldlm_srv_namespace_nr = 0;
49 int ldlm_cli_namespace_nr = 0;
50
51 DEFINE_MUTEX(ldlm_srv_namespace_lock);
52 LIST_HEAD(ldlm_srv_namespace_list);
53
54 DEFINE_MUTEX(ldlm_cli_namespace_lock);
55 /* Client Namespaces that have active resources in them.
56  * Once all resources go away, ldlm_poold moves such namespaces to the
57  * inactive list */
58 LIST_HEAD(ldlm_cli_active_namespace_list);
59 /* Client namespaces that don't have any locks in them */
60 LIST_HEAD(ldlm_cli_inactive_namespace_list);
61
62 static struct dentry *ldlm_debugfs_dir;
63 static struct dentry *ldlm_ns_debugfs_dir;
64 struct dentry *ldlm_svc_debugfs_dir;
65
66 /* during debug dump certain amount of granted locks for one resource to avoid
67  * DDOS. */
68 static unsigned int ldlm_dump_granted_max = 256;
69
70 static ssize_t ldebugfs_dump_ns_seq_write(struct file *file,
71                                           const char __user *buffer,
72                                           size_t count, loff_t *off)
73 {
74         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
75         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
76         RETURN(count);
77 }
78
79 LDEBUGFS_FOPS_WR_ONLY(ldlm, dump_ns);
80
81 static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
82 {
83         seq_printf(m, "%u\n", *(unsigned int *)m->private);
84         return 0;
85 }
86
87 static ssize_t
88 ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
89                        size_t count, loff_t *off)
90 {
91         struct seq_file *seq = file->private_data;
92
93         if (!count)
94                 return 0;
95
96         return kstrtouint_from_user(buffer, count, 0,
97                                     (unsigned int *)seq->private);
98 }
99
100 LDEBUGFS_SEQ_FOPS(ldlm_rw_uint);
101
102 #ifdef HAVE_SERVER_SUPPORT
103
104 static int seq_watermark_show(struct seq_file *m, void *data)
105 {
106         seq_printf(m, "%llu\n", *(__u64 *)m->private);
107         return 0;
108 }
109
110 static ssize_t seq_watermark_write(struct file *file,
111                                    const char __user *buffer, size_t count,
112                                    loff_t *off)
113 {
114         struct seq_file *m = file->private_data;
115         u64 value;
116         __u64 watermark;
117         __u64 *data = m->private;
118         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
119         char kernbuf[22] = "";
120         int rc;
121
122         if (count >= sizeof(kernbuf))
123                 return -EINVAL;
124
125         if (copy_from_user(kernbuf, buffer, count))
126                 return -EFAULT;
127         kernbuf[count] = 0;
128
129         rc = sysfs_memparse(kernbuf, count, &value, "MiB");
130         if (rc < 0) {
131                 CERROR("Failed to set %s, rc = %d.\n",
132                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
133                        rc);
134                 return rc;
135         } else if (value != 0 && value < (1 << 20)) {
136                 CERROR("%s should be greater than 1MB.\n",
137                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
138                 return -EINVAL;
139         }
140         watermark = value >> 20;
141
142         if (wm_low) {
143                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
144                         CERROR("lock_reclaim_threshold_mb must be smaller than "
145                                "lock_limit_mb.\n");
146                         return -EINVAL;
147                 }
148
149                 *data = watermark;
150                 if (watermark != 0) {
151                         watermark <<= 20;
152                         do_div(watermark, sizeof(struct ldlm_lock));
153                 }
154                 ldlm_reclaim_threshold = watermark;
155         } else {
156                 if (ldlm_reclaim_threshold_mb != 0 &&
157                     watermark < ldlm_reclaim_threshold_mb) {
158                         CERROR("lock_limit_mb must be greater than "
159                                "lock_reclaim_threshold_mb.\n");
160                         return -EINVAL;
161                 }
162
163                 *data = watermark;
164                 if (watermark != 0) {
165                         watermark <<= 20;
166                         do_div(watermark, sizeof(struct ldlm_lock));
167                 }
168                 ldlm_lock_limit = watermark;
169         }
170
171         return count;
172 }
173
174 static int seq_watermark_open(struct inode *inode, struct file *file)
175 {
176         return single_open(file, seq_watermark_show, inode->i_private);
177 }
178
179 static const struct file_operations ldlm_watermark_fops = {
180         .owner          = THIS_MODULE,
181         .open           = seq_watermark_open,
182         .read           = seq_read,
183         .write          = seq_watermark_write,
184         .llseek         = seq_lseek,
185         .release        = lprocfs_single_release,
186 };
187
188 static int seq_granted_show(struct seq_file *m, void *data)
189 {
190         seq_printf(m, "%llu\n", percpu_counter_sum_positive(
191                    (struct percpu_counter *)m->private));
192         return 0;
193 }
194
195 static int seq_granted_open(struct inode *inode, struct file *file)
196 {
197         return single_open(file, seq_granted_show, inode->i_private);
198 }
199
200 static const struct file_operations ldlm_granted_fops = {
201         .owner  = THIS_MODULE,
202         .open   = seq_granted_open,
203         .read   = seq_read,
204         .llseek = seq_lseek,
205         .release = seq_release,
206 };
207
208 #endif /* HAVE_SERVER_SUPPORT */
209
210 static struct lprocfs_vars ldlm_debugfs_list[] = {
211         { .name =       "dump_namespaces",
212           .fops =       &ldlm_dump_ns_fops,
213           .proc_mode =  0222 },
214         { .name =       "dump_granted_max",
215           .fops =       &ldlm_rw_uint_fops,
216           .data =       &ldlm_dump_granted_max },
217 #ifdef HAVE_SERVER_SUPPORT
218         { .name =       "lock_reclaim_threshold_mb",
219           .fops =       &ldlm_watermark_fops,
220           .data =       &ldlm_reclaim_threshold_mb },
221         { .name =       "lock_limit_mb",
222           .fops =       &ldlm_watermark_fops,
223           .data =       &ldlm_lock_limit_mb },
224         { .name =       "lock_granted_count",
225           .fops =       &ldlm_granted_fops,
226           .data =       &ldlm_granted_total },
227 #endif
228         { NULL }
229 };
230
231 int ldlm_debugfs_setup(void)
232 {
233         int rc;
234
235         ENTRY;
236         ldlm_debugfs_dir = debugfs_create_dir(OBD_LDLM_DEVICENAME,
237                                              debugfs_lustre_root);
238         ldlm_ns_debugfs_dir = debugfs_create_dir("namespaces",
239                                                  ldlm_debugfs_dir);
240         ldlm_svc_debugfs_dir = debugfs_create_dir("services",
241                                                   ldlm_debugfs_dir);
242
243         rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
244         if (rc != 0) {
245                 CERROR("LProcFS failed in ldlm-init\n");
246                 GOTO(err, rc);
247         }
248
249         RETURN(0);
250
251 err:
252         debugfs_remove_recursive(ldlm_debugfs_dir);
253         ldlm_svc_debugfs_dir = NULL;
254         ldlm_ns_debugfs_dir = NULL;
255         ldlm_debugfs_dir = NULL;
256         RETURN(rc);
257 }
258
259 void ldlm_debugfs_cleanup(void)
260 {
261         debugfs_remove_recursive(ldlm_debugfs_dir);
262
263         ldlm_svc_debugfs_dir = NULL;
264         ldlm_ns_debugfs_dir = NULL;
265         ldlm_debugfs_dir = NULL;
266 }
267
268 static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
269                                    char *buf)
270 {
271         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
272                                                  ns_kobj);
273         __u64                   res = 0;
274         int                     i;
275
276         /* result is not strictly consistant */
277         for (i = 0; i < (1 << ns->ns_bucket_bits); i++)
278                 res += atomic_read(&ns->ns_rs_buckets[i].nsb_count);
279         return sprintf(buf, "%lld\n", res);
280 }
281 LUSTRE_RO_ATTR(resource_count);
282
283 static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
284                                char *buf)
285 {
286         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
287                                                  ns_kobj);
288         __u64                   locks;
289
290         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
291                                         LPROCFS_FIELDS_FLAGS_SUM);
292         return sprintf(buf, "%lld\n", locks);
293 }
294 LUSTRE_RO_ATTR(lock_count);
295
296 static ssize_t lock_unused_count_show(struct kobject *kobj,
297                                       struct attribute *attr,
298                                       char *buf)
299 {
300         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
301                                                  ns_kobj);
302
303         return sprintf(buf, "%d\n", ns->ns_nr_unused);
304 }
305 LUSTRE_RO_ATTR(lock_unused_count);
306
307 static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
308                              char *buf)
309 {
310         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
311                                                  ns_kobj);
312         __u32 *nr = &ns->ns_max_unused;
313
314         if (ns_connect_lru_resize(ns))
315                 nr = &ns->ns_nr_unused;
316         return sprintf(buf, "%u\n", *nr);
317 }
318
319 static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
320                               const char *buffer, size_t count)
321 {
322         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
323                                                  ns_kobj);
324         unsigned long tmp;
325         int lru_resize;
326         int err;
327
328         if (strncmp(buffer, "clear", 5) == 0) {
329                 CDEBUG(D_DLMTRACE,
330                        "dropping all unused locks from namespace %s\n",
331                        ldlm_ns_name(ns));
332                 if (ns_connect_lru_resize(ns)) {
333                         /* Try to cancel all @ns_nr_unused locks. */
334                         ldlm_cancel_lru(ns, ns->ns_nr_unused, 0,
335                                         LDLM_LRU_FLAG_PASSED |
336                                         LDLM_LRU_FLAG_CLEANUP);
337                 } else {
338                         tmp = ns->ns_max_unused;
339                         ns->ns_max_unused = 0;
340                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED |
341                                         LDLM_LRU_FLAG_CLEANUP);
342                         ns->ns_max_unused = tmp;
343                 }
344                 return count;
345         }
346
347         err = kstrtoul(buffer, 10, &tmp);
348         if (err != 0) {
349                 CERROR("lru_size: invalid value written\n");
350                 return -EINVAL;
351         }
352         lru_resize = (tmp == 0);
353
354         if (ns_connect_lru_resize(ns)) {
355                 if (!lru_resize)
356                         ns->ns_max_unused = (unsigned int)tmp;
357
358                 if (tmp > ns->ns_nr_unused)
359                         tmp = ns->ns_nr_unused;
360                 tmp = ns->ns_nr_unused - tmp;
361
362                 CDEBUG(D_DLMTRACE,
363                        "changing namespace %s unused locks from %u to %u\n",
364                        ldlm_ns_name(ns), ns->ns_nr_unused,
365                        (unsigned int)tmp);
366                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
367
368                 if (!lru_resize) {
369                         CDEBUG(D_DLMTRACE,
370                                "disable lru_resize for namespace %s\n",
371                                ldlm_ns_name(ns));
372                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
373                 }
374         } else {
375                 CDEBUG(D_DLMTRACE,
376                        "changing namespace %s max_unused from %u to %u\n",
377                        ldlm_ns_name(ns), ns->ns_max_unused,
378                        (unsigned int)tmp);
379                 ns->ns_max_unused = (unsigned int)tmp;
380                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
381
382                 /* Make sure that LRU resize was originally supported before
383                  * turning it on here.
384                  */
385                 if (lru_resize &&
386                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
387                         CDEBUG(D_DLMTRACE,
388                                "enable lru_resize for namespace %s\n",
389                                ldlm_ns_name(ns));
390                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
391                 }
392         }
393
394         return count;
395 }
396 LUSTRE_RW_ATTR(lru_size);
397
398 static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
399                                 char *buf)
400 {
401         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
402                                                  ns_kobj);
403
404         return sprintf(buf, "%lld\n", ktime_to_ms(ns->ns_max_age));
405 }
406
407 static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
408                                  const char *buffer, size_t count)
409 {
410         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
411                                                  ns_kobj);
412         int scale = NSEC_PER_MSEC;
413         unsigned long long tmp;
414         char *buf;
415
416         /* Did the user ask in seconds or milliseconds. Default is in ms */
417         buf = strstr(buffer, "ms");
418         if (!buf) {
419                 buf = strchr(buffer, 's');
420                 if (buf)
421                         scale = NSEC_PER_SEC;
422         }
423
424         if (buf)
425                 *buf = '\0';
426
427         if (kstrtoull(buffer, 10, &tmp))
428                 return -EINVAL;
429
430         ns->ns_max_age = ktime_set(0, tmp * scale);
431
432         return count;
433 }
434 LUSTRE_RW_ATTR(lru_max_age);
435
436 static ssize_t early_lock_cancel_show(struct kobject *kobj,
437                                       struct attribute *attr,
438                                       char *buf)
439 {
440         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
441                                                  ns_kobj);
442
443         return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
444 }
445
446 static ssize_t early_lock_cancel_store(struct kobject *kobj,
447                                        struct attribute *attr,
448                                        const char *buffer,
449                                        size_t count)
450 {
451         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
452                                                  ns_kobj);
453         unsigned long supp = -1;
454         int rc;
455
456         rc = kstrtoul(buffer, 10, &supp);
457         if (rc < 0)
458                 return rc;
459
460         if (supp == 0)
461                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
462         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
463                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
464         return count;
465 }
466 LUSTRE_RW_ATTR(early_lock_cancel);
467
468 static ssize_t dirty_age_limit_show(struct kobject *kobj,
469                                     struct attribute *attr, char *buf)
470 {
471         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
472                                                  ns_kobj);
473
474         return sprintf(buf, "%llu\n", ns->ns_dirty_age_limit);
475 }
476
477 static ssize_t dirty_age_limit_store(struct kobject *kobj,
478                                      struct attribute *attr,
479                                      const char *buffer, size_t count)
480 {
481         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
482                                                  ns_kobj);
483         unsigned long long tmp;
484
485         if (kstrtoull(buffer, 10, &tmp))
486                 return -EINVAL;
487
488         ns->ns_dirty_age_limit = tmp;
489
490         return count;
491 }
492 LUSTRE_RW_ATTR(dirty_age_limit);
493
494 #ifdef HAVE_SERVER_SUPPORT
495 static ssize_t ctime_age_limit_show(struct kobject *kobj,
496                                     struct attribute *attr, char *buf)
497 {
498         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
499                                                  ns_kobj);
500
501         return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit);
502 }
503
504 static ssize_t ctime_age_limit_store(struct kobject *kobj,
505                                      struct attribute *attr,
506                                      const char *buffer, size_t count)
507 {
508         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
509                                                  ns_kobj);
510         unsigned long long tmp;
511
512         if (kstrtoull(buffer, 10, &tmp))
513                 return -EINVAL;
514
515         ns->ns_ctime_age_limit = tmp;
516
517         return count;
518 }
519 LUSTRE_RW_ATTR(ctime_age_limit);
520
521 static ssize_t lock_timeouts_show(struct kobject *kobj, struct attribute *attr,
522                                   char *buf)
523 {
524         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
525                                                  ns_kobj);
526
527         return sprintf(buf, "%d\n", ns->ns_timeouts);
528 }
529 LUSTRE_RO_ATTR(lock_timeouts);
530
531 static ssize_t max_nolock_bytes_show(struct kobject *kobj,
532                                      struct attribute *attr, char *buf)
533 {
534         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
535                                                  ns_kobj);
536
537         return sprintf(buf, "%u\n", ns->ns_max_nolock_size);
538 }
539
540 static ssize_t max_nolock_bytes_store(struct kobject *kobj,
541                                       struct attribute *attr,
542                                       const char *buffer, size_t count)
543 {
544         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
545                                                  ns_kobj);
546         unsigned long tmp;
547         int err;
548
549         err = kstrtoul(buffer, 10, &tmp);
550         if (err != 0)
551                 return -EINVAL;
552
553         ns->ns_max_nolock_size = tmp;
554
555         return count;
556 }
557 LUSTRE_RW_ATTR(max_nolock_bytes);
558
559 static ssize_t contention_seconds_show(struct kobject *kobj,
560                                        struct attribute *attr, char *buf)
561 {
562         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
563                                                  ns_kobj);
564
565         return sprintf(buf, "%llu\n", ns->ns_contention_time);
566 }
567
568 static ssize_t contention_seconds_store(struct kobject *kobj,
569                                         struct attribute *attr,
570                                         const char *buffer, size_t count)
571 {
572         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
573                                                  ns_kobj);
574         unsigned long long tmp;
575
576         if (kstrtoull(buffer, 10, &tmp))
577                 return -EINVAL;
578
579         ns->ns_contention_time = tmp;
580
581         return count;
582 }
583 LUSTRE_RW_ATTR(contention_seconds);
584
585 static ssize_t contended_locks_show(struct kobject *kobj,
586                                     struct attribute *attr, char *buf)
587 {
588         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
589                                                  ns_kobj);
590
591         return sprintf(buf, "%u\n", ns->ns_contended_locks);
592 }
593
594 static ssize_t contended_locks_store(struct kobject *kobj,
595                                      struct attribute *attr,
596                                      const char *buffer, size_t count)
597 {
598         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
599                                                  ns_kobj);
600         unsigned long tmp;
601         int err;
602
603         err = kstrtoul(buffer, 10, &tmp);
604         if (err != 0)
605                 return -EINVAL;
606
607         ns->ns_contended_locks = tmp;
608
609         return count;
610 }
611 LUSTRE_RW_ATTR(contended_locks);
612
613 static ssize_t max_parallel_ast_show(struct kobject *kobj,
614                                      struct attribute *attr, char *buf)
615 {
616         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
617                                                  ns_kobj);
618
619         return sprintf(buf, "%u\n", ns->ns_max_parallel_ast);
620 }
621
622 static ssize_t max_parallel_ast_store(struct kobject *kobj,
623                                       struct attribute *attr,
624                                       const char *buffer, size_t count)
625 {
626         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
627                                                  ns_kobj);
628         unsigned long tmp;
629         int err;
630
631         err = kstrtoul(buffer, 10, &tmp);
632         if (err != 0)
633                 return -EINVAL;
634
635         ns->ns_max_parallel_ast = tmp;
636
637         return count;
638 }
639 LUSTRE_RW_ATTR(max_parallel_ast);
640
641 #endif /* HAVE_SERVER_SUPPORT */
642
643 /* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
644 static struct attribute *ldlm_ns_attrs[] = {
645         &lustre_attr_resource_count.attr,
646         &lustre_attr_lock_count.attr,
647         &lustre_attr_lock_unused_count.attr,
648         &lustre_attr_lru_size.attr,
649         &lustre_attr_lru_max_age.attr,
650         &lustre_attr_early_lock_cancel.attr,
651         &lustre_attr_dirty_age_limit.attr,
652 #ifdef HAVE_SERVER_SUPPORT
653         &lustre_attr_ctime_age_limit.attr,
654         &lustre_attr_lock_timeouts.attr,
655         &lustre_attr_max_nolock_bytes.attr,
656         &lustre_attr_contention_seconds.attr,
657         &lustre_attr_contended_locks.attr,
658         &lustre_attr_max_parallel_ast.attr,
659 #endif
660         NULL,
661 };
662
663 static void ldlm_ns_release(struct kobject *kobj)
664 {
665         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
666                                                  ns_kobj);
667         complete(&ns->ns_kobj_unregister);
668 }
669
670 static struct kobj_type ldlm_ns_ktype = {
671         .default_attrs  = ldlm_ns_attrs,
672         .sysfs_ops      = &lustre_sysfs_ops,
673         .release        = ldlm_ns_release,
674 };
675
676 static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
677 {
678         if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
679                 CERROR("dlm namespace %s has no procfs dir?\n",
680                        ldlm_ns_name(ns));
681         else
682                 debugfs_remove_recursive(ns->ns_debugfs_entry);
683
684         if (ns->ns_stats != NULL)
685                 lprocfs_free_stats(&ns->ns_stats);
686 }
687
688 void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
689 {
690         kobject_put(&ns->ns_kobj);
691         wait_for_completion(&ns->ns_kobj_unregister);
692 }
693
694 int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
695 {
696         int err;
697
698         ns->ns_kobj.kset = ldlm_ns_kset;
699         init_completion(&ns->ns_kobj_unregister);
700         err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
701                                    "%s", ldlm_ns_name(ns));
702
703         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
704         if (!ns->ns_stats) {
705                 kobject_put(&ns->ns_kobj);
706                 return -ENOMEM;
707         }
708
709         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
710                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
711
712         return err;
713 }
714
715 static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
716 {
717         struct dentry *ns_entry;
718
719         if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
720                 ns_entry = ns->ns_debugfs_entry;
721         } else {
722                 ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
723                                               ldlm_ns_debugfs_dir);
724                 if (!ns_entry)
725                         return -ENOMEM;
726                 ns->ns_debugfs_entry = ns_entry;
727         }
728
729         return 0;
730 }
731 #undef MAX_STRING_SIZE
732
733 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
734                                   const void *key, unsigned int mask)
735 {
736         const struct ldlm_res_id *id = key;
737         unsigned int val = 0;
738         unsigned int i;
739
740         for (i = 0; i < RES_NAME_SIZE; i++)
741                 val += id->name[i];
742         return val & mask;
743 }
744
745 static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned int bits)
746 {
747         struct lu_fid       fid;
748         __u32               hash;
749         __u32               val;
750
751         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
752         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
753         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
754
755         hash = fid_flatten32(&fid);
756         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
757         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
758                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
759         } else {
760                 val = fid_oid(&fid);
761         }
762         hash += (val >> 5) + (val << 11);
763         return cfs_hash_32(hash, bits);
764 }
765
766 static void *ldlm_res_hop_key(struct hlist_node *hnode)
767 {
768         struct ldlm_resource   *res;
769
770         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
771         return &res->lr_name;
772 }
773
774 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
775 {
776         struct ldlm_resource   *res;
777
778         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
779         return ldlm_res_eq((const struct ldlm_res_id *)key,
780                            (const struct ldlm_res_id *)&res->lr_name);
781 }
782
783 static void *ldlm_res_hop_object(struct hlist_node *hnode)
784 {
785         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
786 }
787
788 static void
789 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
790 {
791         struct ldlm_resource *res;
792
793         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
794         ldlm_resource_getref(res);
795 }
796
797 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
798 {
799         struct ldlm_resource *res;
800
801         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
802         ldlm_resource_putref(res);
803 }
804
805 static struct cfs_hash_ops ldlm_ns_hash_ops = {
806         .hs_hash        = ldlm_res_hop_hash,
807         .hs_key         = ldlm_res_hop_key,
808         .hs_keycmp      = ldlm_res_hop_keycmp,
809         .hs_keycpy      = NULL,
810         .hs_object      = ldlm_res_hop_object,
811         .hs_get         = ldlm_res_hop_get_locked,
812         .hs_put         = ldlm_res_hop_put
813 };
814
815 static struct {
816         /** hash bucket bits */
817         unsigned                nsd_bkt_bits;
818         /** hash bits */
819         unsigned                nsd_all_bits;
820 } ldlm_ns_hash_defs[] = {
821         [LDLM_NS_TYPE_MDC] = {
822                 .nsd_bkt_bits   = 11,
823                 .nsd_all_bits   = 16,
824         },
825         [LDLM_NS_TYPE_MDT] = {
826                 .nsd_bkt_bits   = 14,
827                 .nsd_all_bits   = 21,
828         },
829         [LDLM_NS_TYPE_OSC] = {
830                 .nsd_bkt_bits   = 8,
831                 .nsd_all_bits   = 12,
832         },
833         [LDLM_NS_TYPE_OST] = {
834                 .nsd_bkt_bits   = 11,
835                 .nsd_all_bits   = 17,
836         },
837         [LDLM_NS_TYPE_MGC] = {
838                 .nsd_bkt_bits   = 3,
839                 .nsd_all_bits   = 4,
840         },
841         [LDLM_NS_TYPE_MGT] = {
842                 .nsd_bkt_bits   = 3,
843                 .nsd_all_bits   = 4,
844         },
845 };
846
847 /**
848  * Create and initialize new empty namespace.
849  */
850 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
851                                           enum ldlm_side client,
852                                           enum ldlm_appetite apt,
853                                           enum ldlm_ns_type ns_type)
854 {
855         struct ldlm_namespace *ns = NULL;
856         int idx;
857         int rc;
858
859         ENTRY;
860         LASSERT(obd != NULL);
861
862         rc = ldlm_get_ref();
863         if (rc) {
864                 CERROR("ldlm_get_ref failed: %d\n", rc);
865                 RETURN(NULL);
866         }
867
868         if (ns_type >= ARRAY_SIZE(ldlm_ns_hash_defs) ||
869             ldlm_ns_hash_defs[ns_type].nsd_bkt_bits == 0) {
870                 CERROR("Unknown type %d for ns %s\n", ns_type, name);
871                 GOTO(out_ref, NULL);
872         }
873
874         OBD_ALLOC_PTR(ns);
875         if (!ns)
876                 GOTO(out_ref, NULL);
877
878         ns->ns_rs_hash = cfs_hash_create(name,
879                                          ldlm_ns_hash_defs[ns_type].nsd_all_bits,
880                                          ldlm_ns_hash_defs[ns_type].nsd_all_bits,
881                                          ldlm_ns_hash_defs[ns_type].nsd_bkt_bits,
882                                          0,
883                                          CFS_HASH_MIN_THETA,
884                                          CFS_HASH_MAX_THETA,
885                                          &ldlm_ns_hash_ops,
886                                          CFS_HASH_DEPTH |
887                                          CFS_HASH_BIGNAME |
888                                          CFS_HASH_SPIN_BKTLOCK |
889                                          CFS_HASH_NO_ITEMREF);
890         if (ns->ns_rs_hash == NULL)
891                 GOTO(out_ns, NULL);
892
893         ns->ns_bucket_bits = ldlm_ns_hash_defs[ns_type].nsd_all_bits -
894                              ldlm_ns_hash_defs[ns_type].nsd_bkt_bits;
895
896         OBD_ALLOC_LARGE(ns->ns_rs_buckets,
897                         BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
898         if (!ns->ns_rs_buckets)
899                 goto out_hash;
900
901         for (idx = 0; idx < (1 << ns->ns_bucket_bits); idx++) {
902                 struct ldlm_ns_bucket *nsb = &ns->ns_rs_buckets[idx];
903
904                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
905                 nsb->nsb_namespace = ns;
906                 nsb->nsb_reclaim_start = 0;
907                 atomic_set(&nsb->nsb_count, 0);
908         }
909
910         ns->ns_obd = obd;
911         ns->ns_appetite = apt;
912         ns->ns_client = client;
913         ns->ns_name = kstrdup(name, GFP_KERNEL);
914         if (!ns->ns_name)
915                 goto out_hash;
916
917         INIT_LIST_HEAD(&ns->ns_list_chain);
918         INIT_LIST_HEAD(&ns->ns_unused_list);
919         spin_lock_init(&ns->ns_lock);
920         atomic_set(&ns->ns_bref, 0);
921         init_waitqueue_head(&ns->ns_waitq);
922
923         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
924         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
925         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
926
927         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
928         ns->ns_nr_unused          = 0;
929         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
930         ns->ns_max_age            = ktime_set(LDLM_DEFAULT_MAX_ALIVE, 0);
931         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
932         ns->ns_dirty_age_limit    = LDLM_DIRTY_AGE_LIMIT;
933         ns->ns_timeouts           = 0;
934         ns->ns_orig_connect_flags = 0;
935         ns->ns_connect_flags      = 0;
936         ns->ns_stopping           = 0;
937         ns->ns_reclaim_start      = 0;
938         ns->ns_last_pos           = &ns->ns_unused_list;
939
940         rc = ldlm_namespace_sysfs_register(ns);
941         if (rc) {
942                 CERROR("Can't initialize ns sysfs, rc %d\n", rc);
943                 GOTO(out_hash, rc);
944         }
945
946         rc = ldlm_namespace_debugfs_register(ns);
947         if (rc) {
948                 CERROR("Can't initialize ns proc, rc %d\n", rc);
949                 GOTO(out_sysfs, rc);
950         }
951
952         idx = ldlm_namespace_nr_read(client);
953         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
954         if (rc) {
955                 CERROR("Can't initialize lock pool, rc %d\n", rc);
956                 GOTO(out_proc, rc);
957         }
958
959         ldlm_namespace_register(ns, client);
960         RETURN(ns);
961 out_proc:
962         ldlm_namespace_debugfs_unregister(ns);
963 out_sysfs:
964         ldlm_namespace_sysfs_unregister(ns);
965         ldlm_namespace_cleanup(ns, 0);
966 out_hash:
967         OBD_FREE_LARGE(ns->ns_rs_buckets,
968                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
969         kfree(ns->ns_name);
970         cfs_hash_putref(ns->ns_rs_hash);
971 out_ns:
972         OBD_FREE_PTR(ns);
973 out_ref:
974         ldlm_put_ref();
975         RETURN(NULL);
976 }
977 EXPORT_SYMBOL(ldlm_namespace_new);
978
979 /**
980  * Cancel and destroy all locks on a resource.
981  *
982  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
983  * clean up.  This is currently only used for recovery, and we make
984  * certain assumptions as a result--notably, that we shouldn't cancel
985  * locks with refs.
986  */
987 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
988                              __u64 flags)
989 {
990         struct list_head *tmp;
991         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
992         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
993
994         do {
995                 struct ldlm_lock *lock = NULL;
996
997                 /* First, we look for non-cleaned-yet lock
998                  * all cleaned locks are marked by CLEANED flag. */
999                 lock_res(res);
1000                 list_for_each(tmp, q) {
1001                         lock = list_entry(tmp, struct ldlm_lock,
1002                                           l_res_link);
1003                         if (ldlm_is_cleaned(lock)) {
1004                                 lock = NULL;
1005                                 continue;
1006                         }
1007                         LDLM_LOCK_GET(lock);
1008                         ldlm_set_cleaned(lock);
1009                         break;
1010                 }
1011
1012                 if (lock == NULL) {
1013                         unlock_res(res);
1014                         break;
1015                 }
1016
1017                 /* Set CBPENDING so nothing in the cancellation path
1018                  * can match this lock. */
1019                 ldlm_set_cbpending(lock);
1020                 ldlm_set_failed(lock);
1021                 lock->l_flags |= flags;
1022
1023                 /* ... without sending a CANCEL message for local_only. */
1024                 if (local_only)
1025                         ldlm_set_local_only(lock);
1026
1027                 if (local_only && (lock->l_readers || lock->l_writers)) {
1028                         /*
1029                          * This is a little bit gross, but much better than the
1030                          * alternative: pretend that we got a blocking AST from
1031                          * the server, so that when the lock is decref'd, it
1032                          * will go away ...
1033                          */
1034                         unlock_res(res);
1035                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
1036                         if (lock->l_flags & LDLM_FL_FAIL_LOC)
1037                                 schedule_timeout_uninterruptible(
1038                                         cfs_time_seconds(4));
1039
1040                         if (lock->l_completion_ast)
1041                                 lock->l_completion_ast(lock,
1042                                                        LDLM_FL_FAILED, NULL);
1043                         LDLM_LOCK_RELEASE(lock);
1044                         continue;
1045                 }
1046
1047                 if (client) {
1048                         struct lustre_handle lockh;
1049
1050                         unlock_res(res);
1051                         ldlm_lock2handle(lock, &lockh);
1052                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
1053                         if (rc)
1054                                 CERROR("ldlm_cli_cancel: %d\n", rc);
1055                 } else {
1056                         unlock_res(res);
1057                         LDLM_DEBUG(lock,
1058                                    "Freeing a lock still held by a client node");
1059                         ldlm_lock_cancel(lock);
1060                 }
1061                 LDLM_LOCK_RELEASE(lock);
1062         } while (1);
1063 }
1064
1065 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1066                                struct hlist_node *hnode, void *arg)
1067 {
1068         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1069         __u64 flags = *(__u64 *)arg;
1070
1071         cleanup_resource(res, &res->lr_granted, flags);
1072         cleanup_resource(res, &res->lr_waiting, flags);
1073
1074         return 0;
1075 }
1076
1077 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1078                                   struct hlist_node *hnode, void *arg)
1079 {
1080         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
1081
1082         lock_res(res);
1083         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
1084                "(%d) after lock cleanup; forcing cleanup.\n",
1085                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
1086                atomic_read(&res->lr_refcount) - 1);
1087
1088         /* Use D_NETERROR since it is in the default mask */
1089         ldlm_resource_dump(D_NETERROR, res);
1090         unlock_res(res);
1091         return 0;
1092 }
1093
1094 /**
1095  * Cancel and destroy all locks in the namespace.
1096  *
1097  * Typically used during evictions when server notified client that it was
1098  * evicted and all of its state needs to be destroyed.
1099  * Also used during shutdown.
1100  */
1101 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
1102 {
1103         if (ns == NULL) {
1104                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
1105                 return ELDLM_OK;
1106         }
1107
1108         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
1109                                  &flags, 0);
1110         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
1111                                  NULL, 0);
1112         return ELDLM_OK;
1113 }
1114 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1115
1116 /**
1117  * Attempts to free namespace.
1118  *
1119  * Only used when namespace goes away, like during an unmount.
1120  */
1121 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
1122 {
1123         ENTRY;
1124
1125         /* At shutdown time, don't call the cancellation callback */
1126         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
1127
1128         if (atomic_read(&ns->ns_bref) > 0) {
1129                 int rc;
1130                 CDEBUG(D_DLMTRACE,
1131                        "dlm namespace %s free waiting on refcount %d\n",
1132                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
1133 force_wait:
1134                 if (force)
1135                         rc = wait_event_idle_timeout(
1136                                 ns->ns_waitq,
1137                                 atomic_read(&ns->ns_bref) == 0,
1138                                 cfs_time_seconds(1) / 4);
1139                 else
1140                         rc = l_wait_event_abortable(
1141                                 ns->ns_waitq, atomic_read(&ns->ns_bref) == 0);
1142
1143                 /* Forced cleanups should be able to reclaim all references,
1144                  * so it's safe to wait forever... we can't leak locks... */
1145                 if (force && rc == 0) {
1146                         rc = -ETIMEDOUT;
1147                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
1148                                        "namespace with %d resources in use, "
1149                                        "(rc=%d)\n", ldlm_ns_name(ns),
1150                                        atomic_read(&ns->ns_bref), rc);
1151                         GOTO(force_wait, rc);
1152                 }
1153
1154                 if (atomic_read(&ns->ns_bref)) {
1155                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
1156                                        "with %d resources in use, (rc=%d)\n",
1157                                        ldlm_ns_name(ns),
1158                                        atomic_read(&ns->ns_bref), rc);
1159                         RETURN(ELDLM_NAMESPACE_EXISTS);
1160                 }
1161                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
1162                        ldlm_ns_name(ns));
1163         }
1164
1165         RETURN(ELDLM_OK);
1166 }
1167
1168 /**
1169  * Performs various cleanups for passed \a ns to make it drop refc and be
1170  * ready for freeing. Waits for refc == 0.
1171  *
1172  * The following is done:
1173  * (0) Unregister \a ns from its list to make inaccessible for potential
1174  * users like pools thread and others;
1175  * (1) Clear all locks in \a ns.
1176  */
1177 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
1178                                struct obd_import *imp,
1179                                int force)
1180 {
1181         int rc;
1182
1183         ENTRY;
1184         if (!ns) {
1185                 EXIT;
1186                 return;
1187         }
1188
1189         spin_lock(&ns->ns_lock);
1190         ns->ns_stopping = 1;
1191         spin_unlock(&ns->ns_lock);
1192
1193         /*
1194          * Can fail with -EINTR when force == 0 in which case try harder.
1195          */
1196         rc = __ldlm_namespace_free(ns, force);
1197         if (rc != ELDLM_OK) {
1198                 if (imp) {
1199                         ptlrpc_disconnect_import(imp, 0);
1200                         ptlrpc_invalidate_import(imp);
1201                 }
1202
1203                 /*
1204                  * With all requests dropped and the import inactive
1205                  * we are gaurenteed all reference will be dropped.
1206                  */
1207                 rc = __ldlm_namespace_free(ns, 1);
1208                 LASSERT(rc == 0);
1209         }
1210         EXIT;
1211 }
1212 EXPORT_SYMBOL(ldlm_namespace_free_prior);
1213
1214 /**
1215  * Performs freeing memory structures related to \a ns. This is only done
1216  * when ldlm_namespce_free_prior() successfully removed all resources
1217  * referencing \a ns and its refc == 0.
1218  */
1219 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1220 {
1221         ENTRY;
1222         if (!ns) {
1223                 EXIT;
1224                 return;
1225         }
1226
1227         /* Make sure that nobody can find this ns in its list. */
1228         ldlm_namespace_unregister(ns, ns->ns_client);
1229         /* Fini pool _before_ parent proc dir is removed. This is important as
1230          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1231          * Removing it after @dir may cause oops. */
1232         ldlm_pool_fini(&ns->ns_pool);
1233
1234         ldlm_namespace_debugfs_unregister(ns);
1235         ldlm_namespace_sysfs_unregister(ns);
1236         cfs_hash_putref(ns->ns_rs_hash);
1237         OBD_FREE_LARGE(ns->ns_rs_buckets,
1238                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
1239         kfree(ns->ns_name);
1240         /* Namespace \a ns should be not on list at this time, otherwise
1241          * this will cause issues related to using freed \a ns in poold
1242          * thread.
1243          */
1244         LASSERT(list_empty(&ns->ns_list_chain));
1245         OBD_FREE_PTR(ns);
1246         ldlm_put_ref();
1247         EXIT;
1248 }
1249 EXPORT_SYMBOL(ldlm_namespace_free_post);
1250
1251 /**
1252  * Cleanup the resource, and free namespace.
1253  * bug 12864:
1254  * Deadlock issue:
1255  * proc1: destroy import
1256  *        class_disconnect_export(grab cl_sem) ->
1257  *              -> ldlm_namespace_free ->
1258  *              -> lprocfs_remove(grab _lprocfs_lock).
1259  * proc2: read proc info
1260  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1261  *              -> osc_rd_active, etc(grab cl_sem).
1262  *
1263  * So that I have to split the ldlm_namespace_free into two parts - the first
1264  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1265  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1266  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1267  * held.
1268  */
1269 void ldlm_namespace_free(struct ldlm_namespace *ns,
1270                          struct obd_import *imp,
1271                          int force)
1272 {
1273         ldlm_namespace_free_prior(ns, imp, force);
1274         ldlm_namespace_free_post(ns);
1275 }
1276 EXPORT_SYMBOL(ldlm_namespace_free);
1277
1278 void ldlm_namespace_get(struct ldlm_namespace *ns)
1279 {
1280         atomic_inc(&ns->ns_bref);
1281 }
1282
1283 /* This is only for callers that care about refcount */
1284 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1285 {
1286         return atomic_inc_return(&ns->ns_bref);
1287 }
1288
1289 void ldlm_namespace_put(struct ldlm_namespace *ns)
1290 {
1291         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1292                 wake_up(&ns->ns_waitq);
1293                 spin_unlock(&ns->ns_lock);
1294         }
1295 }
1296
1297 /** Register \a ns in the list of namespaces */
1298 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1299 {
1300         mutex_lock(ldlm_namespace_lock(client));
1301         LASSERT(list_empty(&ns->ns_list_chain));
1302         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1303         ldlm_namespace_nr_inc(client);
1304         mutex_unlock(ldlm_namespace_lock(client));
1305 }
1306
1307 /** Unregister \a ns from the list of namespaces. */
1308 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1309 {
1310         mutex_lock(ldlm_namespace_lock(client));
1311         LASSERT(!list_empty(&ns->ns_list_chain));
1312         /* Some asserts and possibly other parts of the code are still
1313          * using list_empty(&ns->ns_list_chain). This is why it is
1314          * important to use list_del_init() here. */
1315         list_del_init(&ns->ns_list_chain);
1316         ldlm_namespace_nr_dec(client);
1317         mutex_unlock(ldlm_namespace_lock(client));
1318 }
1319
1320 /** Should be called with ldlm_namespace_lock(client) taken. */
1321 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1322                                           enum ldlm_side client)
1323 {
1324         LASSERT(!list_empty(&ns->ns_list_chain));
1325         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1326         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1327 }
1328
1329 /** Should be called with ldlm_namespace_lock(client) taken. */
1330 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1331                                             enum ldlm_side client)
1332 {
1333         LASSERT(!list_empty(&ns->ns_list_chain));
1334         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1335         list_move_tail(&ns->ns_list_chain,
1336                        ldlm_namespace_inactive_list(client));
1337 }
1338
1339 /** Should be called with ldlm_namespace_lock(client) taken. */
1340 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1341 {
1342         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1343         LASSERT(!list_empty(ldlm_namespace_list(client)));
1344         return container_of(ldlm_namespace_list(client)->next,
1345                             struct ldlm_namespace, ns_list_chain);
1346 }
1347
1348 static bool ldlm_resource_extent_new(struct ldlm_resource *res)
1349 {
1350         int idx;
1351
1352         OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1353                        sizeof(*res->lr_itree) * LCK_MODE_NUM);
1354         if (res->lr_itree == NULL)
1355                 return false;
1356         /* Initialize interval trees for each lock mode. */
1357         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1358                 res->lr_itree[idx].lit_size = 0;
1359                 res->lr_itree[idx].lit_mode = 1 << idx;
1360                 res->lr_itree[idx].lit_root = NULL;
1361         }
1362         return true;
1363 }
1364
1365 static bool ldlm_resource_inodebits_new(struct ldlm_resource *res)
1366 {
1367         int i;
1368
1369         OBD_ALLOC_PTR(res->lr_ibits_queues);
1370         if (res->lr_ibits_queues == NULL)
1371                 return false;
1372         for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
1373                 INIT_LIST_HEAD(&res->lr_ibits_queues->liq_waiting[i]);
1374         return true;
1375 }
1376
1377 /** Create and initialize new resource. */
1378 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1379 {
1380         struct ldlm_resource *res;
1381         bool rc;
1382
1383         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1384         if (res == NULL)
1385                 return NULL;
1386
1387         switch (ldlm_type) {
1388         case LDLM_EXTENT:
1389                 rc = ldlm_resource_extent_new(res);
1390                 break;
1391         case LDLM_IBITS:
1392                 rc = ldlm_resource_inodebits_new(res);
1393                 break;
1394         default:
1395                 rc = true;
1396                 break;
1397         }
1398         if (!rc) {
1399                 OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1400                 return NULL;
1401         }
1402
1403         INIT_LIST_HEAD(&res->lr_granted);
1404         INIT_LIST_HEAD(&res->lr_waiting);
1405
1406         atomic_set(&res->lr_refcount, 1);
1407         spin_lock_init(&res->lr_lock);
1408         lu_ref_init(&res->lr_reference);
1409
1410         /* Since LVB init can be delayed now, there is no longer need to
1411          * immediatelly acquire mutex here. */
1412         mutex_init(&res->lr_lvb_mutex);
1413         res->lr_lvb_initialized = false;
1414
1415         return res;
1416 }
1417
1418 static void ldlm_resource_free(struct ldlm_resource *res)
1419 {
1420         if (res->lr_type == LDLM_EXTENT) {
1421                 if (res->lr_itree != NULL)
1422                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1423                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1424         } else if (res->lr_type == LDLM_IBITS) {
1425                 if (res->lr_ibits_queues != NULL)
1426                         OBD_FREE_PTR(res->lr_ibits_queues);
1427         }
1428
1429         OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1430 }
1431
1432 /**
1433  * Return a reference to resource with given name, creating it if necessary.
1434  * Args: namespace with ns_lock unlocked
1435  * Locks: takes and releases NS hash-lock and res->lr_lock
1436  * Returns: referenced, unlocked ldlm_resource or NULL
1437  */
1438 struct ldlm_resource *
1439 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1440                   const struct ldlm_res_id *name, enum ldlm_type type,
1441                   int create)
1442 {
1443         struct hlist_node       *hnode;
1444         struct ldlm_resource    *res = NULL;
1445         struct cfs_hash_bd              bd;
1446         __u64                   version;
1447         int                     ns_refcount = 0;
1448         int hash;
1449
1450         LASSERT(ns != NULL);
1451         LASSERT(parent == NULL);
1452         LASSERT(ns->ns_rs_hash != NULL);
1453         LASSERT(name->name[0] != 0);
1454
1455         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1456         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1457         if (hnode != NULL) {
1458                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1459                 GOTO(found, res);
1460         }
1461
1462         version = cfs_hash_bd_version_get(&bd);
1463         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1464
1465         if (create == 0)
1466                 return ERR_PTR(-ENOENT);
1467
1468         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1469                  "type: %d\n", type);
1470         res = ldlm_resource_new(type);
1471         if (res == NULL)
1472                 return ERR_PTR(-ENOMEM);
1473
1474         hash = ldlm_res_hop_fid_hash(name, ns->ns_bucket_bits);
1475         res->lr_ns_bucket = &ns->ns_rs_buckets[hash];
1476         res->lr_name = *name;
1477         res->lr_type = type;
1478
1479         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1480         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1481                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1482
1483         if (hnode != NULL) {
1484                 /* Someone won the race and already added the resource. */
1485                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1486                 /* Clean lu_ref for failed resource. */
1487                 lu_ref_fini(&res->lr_reference);
1488                 ldlm_resource_free(res);
1489 found:
1490                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1491                 return res;
1492         }
1493         /* We won! Let's add the resource. */
1494         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1495         if (atomic_inc_return(&res->lr_ns_bucket->nsb_count) == 1)
1496                 ns_refcount = ldlm_namespace_get_return(ns);
1497
1498         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1499
1500         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1501
1502         /* Let's see if we happened to be the very first resource in this
1503          * namespace. If so, and this is a client namespace, we need to move
1504          * the namespace into the active namespaces list to be patrolled by
1505          * the ldlm_poold. */
1506         if (ns_is_client(ns) && ns_refcount == 1) {
1507                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1508                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1509                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1510         }
1511
1512         return res;
1513 }
1514 EXPORT_SYMBOL(ldlm_resource_get);
1515
1516 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1517 {
1518         LASSERT(res != NULL);
1519         LASSERT(res != LP_POISON);
1520         atomic_inc(&res->lr_refcount);
1521         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1522                atomic_read(&res->lr_refcount));
1523         return res;
1524 }
1525
1526 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1527                                          struct ldlm_resource *res)
1528 {
1529         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1530
1531         if (!list_empty(&res->lr_granted)) {
1532                 ldlm_resource_dump(D_ERROR, res);
1533                 LBUG();
1534         }
1535
1536         if (!list_empty(&res->lr_waiting)) {
1537                 ldlm_resource_dump(D_ERROR, res);
1538                 LBUG();
1539         }
1540
1541         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1542                                bd, &res->lr_hash);
1543         lu_ref_fini(&res->lr_reference);
1544         if (atomic_dec_and_test(&nsb->nsb_count))
1545                 ldlm_namespace_put(nsb->nsb_namespace);
1546 }
1547
1548 /* Returns 1 if the resource was freed, 0 if it remains. */
1549 int ldlm_resource_putref(struct ldlm_resource *res)
1550 {
1551         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1552         struct cfs_hash_bd   bd;
1553
1554         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1555         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1556                res, atomic_read(&res->lr_refcount) - 1);
1557
1558         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1559         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1560                 __ldlm_resource_putref_final(&bd, res);
1561                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1562                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1563                         ns->ns_lvbo->lvbo_free(res);
1564                 ldlm_resource_free(res);
1565                 return 1;
1566         }
1567         return 0;
1568 }
1569 EXPORT_SYMBOL(ldlm_resource_putref);
1570
1571 /**
1572  * Add a lock into a given resource into specified lock list.
1573  */
1574 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1575                             struct ldlm_lock *lock)
1576 {
1577         check_res_locked(res);
1578
1579         LDLM_DEBUG(lock, "About to add this lock");
1580
1581         if (ldlm_is_destroyed(lock)) {
1582                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1583                 return;
1584         }
1585
1586         LASSERT(list_empty(&lock->l_res_link));
1587
1588         list_add_tail(&lock->l_res_link, head);
1589
1590         if (res->lr_type == LDLM_IBITS)
1591                 ldlm_inodebits_add_lock(res, head, lock);
1592 }
1593
1594 /**
1595  * Insert a lock into resource after specified lock.
1596  *
1597  * Obtain resource description from the lock we are inserting after.
1598  */
1599 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1600                                      struct ldlm_lock *new)
1601 {
1602         struct ldlm_resource *res = original->l_resource;
1603
1604         check_res_locked(res);
1605
1606         ldlm_resource_dump(D_INFO, res);
1607         LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
1608
1609         if (ldlm_is_destroyed(new)) {
1610                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1611                 goto out;
1612         }
1613
1614         LASSERT(list_empty(&new->l_res_link));
1615
1616         list_add(&new->l_res_link, &original->l_res_link);
1617  out:;
1618 }
1619
1620 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1621 {
1622         int type = lock->l_resource->lr_type;
1623
1624         check_res_locked(lock->l_resource);
1625         switch (type) {
1626         case LDLM_PLAIN:
1627                 ldlm_unlink_lock_skiplist(lock);
1628                 break;
1629         case LDLM_EXTENT:
1630                 ldlm_extent_unlink_lock(lock);
1631                 break;
1632         case LDLM_IBITS:
1633                 ldlm_inodebits_unlink_lock(lock);
1634                 break;
1635         }
1636         list_del_init(&lock->l_res_link);
1637 }
1638 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1639
1640 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1641 {
1642         desc->lr_type = res->lr_type;
1643         desc->lr_name = res->lr_name;
1644 }
1645
1646 /**
1647  * Print information about all locks in all namespaces on this node to debug
1648  * log.
1649  */
1650 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1651 {
1652         struct list_head *tmp;
1653
1654         if (!((libcfs_debug | D_ERROR) & level))
1655                 return;
1656
1657         mutex_lock(ldlm_namespace_lock(client));
1658
1659         list_for_each(tmp, ldlm_namespace_list(client)) {
1660                 struct ldlm_namespace *ns;
1661
1662                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1663                 ldlm_namespace_dump(level, ns);
1664         }
1665
1666         mutex_unlock(ldlm_namespace_lock(client));
1667 }
1668
1669 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1670                               struct hlist_node *hnode, void *arg)
1671 {
1672         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1673         int    level = (int)(unsigned long)arg;
1674
1675         lock_res(res);
1676         ldlm_resource_dump(level, res);
1677         unlock_res(res);
1678
1679         return 0;
1680 }
1681
1682 /**
1683  * Print information about all locks in this namespace on this node to debug
1684  * log.
1685  */
1686 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1687 {
1688         if (!((libcfs_debug | D_ERROR) & level))
1689                 return;
1690
1691         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1692                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1693                ns_is_client(ns) ? "client" : "server");
1694
1695         if (ktime_get_seconds() < ns->ns_next_dump)
1696                 return;
1697
1698         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1699                                  ldlm_res_hash_dump,
1700                                  (void *)(unsigned long)level, 0);
1701         spin_lock(&ns->ns_lock);
1702         ns->ns_next_dump = ktime_get_seconds() + 10;
1703         spin_unlock(&ns->ns_lock);
1704 }
1705
1706 /**
1707  * Print information about all locks in this resource to debug log.
1708  */
1709 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1710 {
1711         struct ldlm_lock *lock;
1712         unsigned int granted = 0;
1713
1714         BUILD_BUG_ON(RES_NAME_SIZE != 4);
1715
1716         if (!((libcfs_debug | D_ERROR) & level))
1717                 return;
1718
1719         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1720                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1721
1722         if (!list_empty(&res->lr_granted)) {
1723                 CDEBUG(level, "Granted locks (in reverse order):\n");
1724                 list_for_each_entry_reverse(lock, &res->lr_granted,
1725                                                 l_res_link) {
1726                         LDLM_DEBUG_LIMIT(level, lock, "###");
1727                         if (!(level & D_CANTMASK) &&
1728                             ++granted > ldlm_dump_granted_max) {
1729                                 CDEBUG(level,
1730                                        "only dump %d granted locks to avoid DDOS.\n",
1731                                        granted);
1732                                 break;
1733                         }
1734                 }
1735         }
1736
1737         if (!list_empty(&res->lr_waiting)) {
1738                 CDEBUG(level, "Waiting locks:\n");
1739                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1740                         LDLM_DEBUG_LIMIT(level, lock, "###");
1741         }
1742 }
1743 EXPORT_SYMBOL(ldlm_resource_dump);