Whamcloud - gitweb
LU-14139 statahead: add stats for batch RPC requests
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/ldlm/ldlm_resource.c
32  *
33  * Author: Phil Schwan <phil@clusterfs.com>
34  * Author: Peter Braam <braam@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LDLM
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <obd_class.h>
41 #include <libcfs/linux/linux-hash.h>
42 #include "ldlm_internal.h"
43
44 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
45 struct kmem_cache *ldlm_interval_tree_slab;
46 struct kmem_cache *ldlm_inodebits_slab;
47
48 int ldlm_srv_namespace_nr = 0;
49 int ldlm_cli_namespace_nr = 0;
50
51 DEFINE_MUTEX(ldlm_srv_namespace_lock);
52 LIST_HEAD(ldlm_srv_namespace_list);
53
54 DEFINE_MUTEX(ldlm_cli_namespace_lock);
55 /* Client Namespaces that have active resources in them.
56  * Once all resources go away, ldlm_poold moves such namespaces to the
57  * inactive list */
58 LIST_HEAD(ldlm_cli_active_namespace_list);
59 /* Client namespaces that don't have any locks in them */
60 LIST_HEAD(ldlm_cli_inactive_namespace_list);
61
62 static struct dentry *ldlm_debugfs_dir;
63 static struct dentry *ldlm_ns_debugfs_dir;
64 struct dentry *ldlm_svc_debugfs_dir;
65
66 /* during debug dump certain amount of granted locks for one resource to avoid
67  * DDOS. */
68 static unsigned int ldlm_dump_granted_max = 256;
69
70 static ssize_t ldebugfs_dump_ns_seq_write(struct file *file,
71                                           const char __user *buffer,
72                                           size_t count, loff_t *off)
73 {
74         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
75         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
76         RETURN(count);
77 }
78
79 LDEBUGFS_FOPS_WR_ONLY(ldlm, dump_ns);
80
81 static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
82 {
83         seq_printf(m, "%u\n", *(unsigned int *)m->private);
84         return 0;
85 }
86
87 static ssize_t
88 ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
89                        size_t count, loff_t *off)
90 {
91         struct seq_file *seq = file->private_data;
92
93         if (!count)
94                 return 0;
95
96         return kstrtouint_from_user(buffer, count, 0,
97                                     (unsigned int *)seq->private);
98 }
99
100 LDEBUGFS_SEQ_FOPS(ldlm_rw_uint);
101
102 #ifdef HAVE_SERVER_SUPPORT
103
104 static int seq_watermark_show(struct seq_file *m, void *data)
105 {
106         seq_printf(m, "%llu\n", *(__u64 *)m->private);
107         return 0;
108 }
109
110 static ssize_t seq_watermark_write(struct file *file,
111                                    const char __user *buffer, size_t count,
112                                    loff_t *off)
113 {
114         struct seq_file *m = file->private_data;
115         u64 value;
116         __u64 watermark;
117         __u64 *data = m->private;
118         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
119         char kernbuf[22] = "";
120         int rc;
121
122         if (count >= sizeof(kernbuf))
123                 return -EINVAL;
124
125         if (copy_from_user(kernbuf, buffer, count))
126                 return -EFAULT;
127         kernbuf[count] = 0;
128
129         rc = sysfs_memparse(kernbuf, count, &value, "MiB");
130         if (rc < 0) {
131                 CERROR("Failed to set %s, rc = %d.\n",
132                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
133                        rc);
134                 return rc;
135         } else if (value != 0 && value < (1 << 20)) {
136                 CERROR("%s should be greater than 1MB.\n",
137                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
138                 return -EINVAL;
139         }
140         watermark = value >> 20;
141
142         if (wm_low) {
143                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
144                         CERROR("lock_reclaim_threshold_mb must be smaller than "
145                                "lock_limit_mb.\n");
146                         return -EINVAL;
147                 }
148
149                 *data = watermark;
150                 if (watermark != 0) {
151                         watermark <<= 20;
152                         do_div(watermark, sizeof(struct ldlm_lock));
153                 }
154                 ldlm_reclaim_threshold = watermark;
155         } else {
156                 if (ldlm_reclaim_threshold_mb != 0 &&
157                     watermark < ldlm_reclaim_threshold_mb) {
158                         CERROR("lock_limit_mb must be greater than "
159                                "lock_reclaim_threshold_mb.\n");
160                         return -EINVAL;
161                 }
162
163                 *data = watermark;
164                 if (watermark != 0) {
165                         watermark <<= 20;
166                         do_div(watermark, sizeof(struct ldlm_lock));
167                 }
168                 ldlm_lock_limit = watermark;
169         }
170
171         return count;
172 }
173
174 static int seq_watermark_open(struct inode *inode, struct file *file)
175 {
176         return single_open(file, seq_watermark_show, inode->i_private);
177 }
178
179 static const struct file_operations ldlm_watermark_fops = {
180         .owner          = THIS_MODULE,
181         .open           = seq_watermark_open,
182         .read           = seq_read,
183         .write          = seq_watermark_write,
184         .llseek         = seq_lseek,
185         .release        = lprocfs_single_release,
186 };
187
188 static int seq_granted_show(struct seq_file *m, void *data)
189 {
190         seq_printf(m, "%llu\n", percpu_counter_sum_positive(
191                    (struct percpu_counter *)m->private));
192         return 0;
193 }
194
195 static int seq_granted_open(struct inode *inode, struct file *file)
196 {
197         return single_open(file, seq_granted_show, inode->i_private);
198 }
199
200 static const struct file_operations ldlm_granted_fops = {
201         .owner  = THIS_MODULE,
202         .open   = seq_granted_open,
203         .read   = seq_read,
204         .llseek = seq_lseek,
205         .release = seq_release,
206 };
207
208 #endif /* HAVE_SERVER_SUPPORT */
209
210 static struct ldebugfs_vars ldlm_debugfs_list[] = {
211         { .name =       "dump_namespaces",
212           .fops =       &ldlm_dump_ns_fops,
213           .proc_mode =  0222 },
214         { .name =       "dump_granted_max",
215           .fops =       &ldlm_rw_uint_fops,
216           .data =       &ldlm_dump_granted_max },
217 #ifdef HAVE_SERVER_SUPPORT
218         { .name =       "lock_reclaim_threshold_mb",
219           .fops =       &ldlm_watermark_fops,
220           .data =       &ldlm_reclaim_threshold_mb },
221         { .name =       "lock_limit_mb",
222           .fops =       &ldlm_watermark_fops,
223           .data =       &ldlm_lock_limit_mb },
224         { .name =       "lock_granted_count",
225           .fops =       &ldlm_granted_fops,
226           .data =       &ldlm_granted_total },
227 #endif
228         { NULL }
229 };
230
231 int ldlm_debugfs_setup(void)
232 {
233         ENTRY;
234         ldlm_debugfs_dir = debugfs_create_dir(OBD_LDLM_DEVICENAME,
235                                              debugfs_lustre_root);
236         ldlm_ns_debugfs_dir = debugfs_create_dir("namespaces",
237                                                  ldlm_debugfs_dir);
238         ldlm_svc_debugfs_dir = debugfs_create_dir("services",
239                                                   ldlm_debugfs_dir);
240
241         ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
242
243         RETURN(0);
244 }
245
246 void ldlm_debugfs_cleanup(void)
247 {
248         debugfs_remove_recursive(ldlm_debugfs_dir);
249
250         ldlm_svc_debugfs_dir = NULL;
251         ldlm_ns_debugfs_dir = NULL;
252         ldlm_debugfs_dir = NULL;
253 }
254
255 static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
256                                    char *buf)
257 {
258         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
259                                                  ns_kobj);
260         __u64                   res = 0;
261         int                     i;
262
263         /* result is not strictly consistant */
264         for (i = 0; i < (1 << ns->ns_bucket_bits); i++)
265                 res += atomic_read(&ns->ns_rs_buckets[i].nsb_count);
266         return sprintf(buf, "%lld\n", res);
267 }
268 LUSTRE_RO_ATTR(resource_count);
269
270 static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
271                                char *buf)
272 {
273         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
274                                                  ns_kobj);
275         __u64                   locks;
276
277         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
278                                         LPROCFS_FIELDS_FLAGS_SUM);
279         return sprintf(buf, "%lld\n", locks);
280 }
281 LUSTRE_RO_ATTR(lock_count);
282
283 static ssize_t lock_unused_count_show(struct kobject *kobj,
284                                       struct attribute *attr,
285                                       char *buf)
286 {
287         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
288                                                  ns_kobj);
289
290         return sprintf(buf, "%d\n", ns->ns_nr_unused);
291 }
292 LUSTRE_RO_ATTR(lock_unused_count);
293
294 static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
295                              char *buf)
296 {
297         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
298                                                  ns_kobj);
299         __u32 *nr = &ns->ns_max_unused;
300
301         if (ns_connect_lru_resize(ns))
302                 nr = &ns->ns_nr_unused;
303         return sprintf(buf, "%u\n", *nr);
304 }
305
306 static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
307                               const char *buffer, size_t count)
308 {
309         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
310                                                  ns_kobj);
311         unsigned long tmp;
312         int lru_resize;
313         int err;
314
315         if (strncmp(buffer, "clear", 5) == 0) {
316                 CDEBUG(D_DLMTRACE,
317                        "dropping all unused locks from namespace %s\n",
318                        ldlm_ns_name(ns));
319                 /* Try to cancel all @ns_nr_unused locks. */
320                 ldlm_cancel_lru(ns, INT_MAX, 0, LDLM_LRU_FLAG_CLEANUP);
321                 return count;
322         }
323
324         err = kstrtoul(buffer, 10, &tmp);
325         if (err != 0) {
326                 CERROR("lru_size: invalid value written\n");
327                 return -EINVAL;
328         }
329         lru_resize = (tmp == 0);
330
331         if (ns_connect_lru_resize(ns)) {
332                 if (!lru_resize)
333                         ns->ns_max_unused = (unsigned int)tmp;
334
335                 if (tmp > ns->ns_nr_unused)
336                         tmp = ns->ns_nr_unused;
337                 tmp = ns->ns_nr_unused - tmp;
338
339                 CDEBUG(D_DLMTRACE,
340                        "changing namespace %s unused locks from %u to %u\n",
341                        ldlm_ns_name(ns), ns->ns_nr_unused,
342                        (unsigned int)tmp);
343
344                 if (!lru_resize) {
345                         CDEBUG(D_DLMTRACE,
346                                "disable lru_resize for namespace %s\n",
347                                ldlm_ns_name(ns));
348                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
349                 }
350                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, 0);
351         } else {
352                 CDEBUG(D_DLMTRACE,
353                        "changing namespace %s max_unused from %u to %u\n",
354                        ldlm_ns_name(ns), ns->ns_max_unused,
355                        (unsigned int)tmp);
356
357                 /* Make sure that LRU resize was originally supported before
358                  * turning it on here.
359                  */
360                 if (lru_resize &&
361                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
362                         CDEBUG(D_DLMTRACE,
363                                "enable lru_resize for namespace %s\n",
364                                ldlm_ns_name(ns));
365                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
366                 }
367                 ns->ns_max_unused = (unsigned int)tmp;
368                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
369         }
370
371         return count;
372 }
373 LUSTRE_RW_ATTR(lru_size);
374
375 static ssize_t lru_cancel_batch_show(struct kobject *kobj,
376                                  struct attribute *attr, char *buf)
377 {
378         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
379                                                  ns_kobj);
380
381         return snprintf(buf, sizeof(buf) - 1, "%u\n", ns->ns_cancel_batch);
382 }
383
384 static ssize_t lru_cancel_batch_store(struct kobject *kobj,
385                                   struct attribute *attr,
386                                   const char *buffer, size_t count)
387 {
388         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
389                                                  ns_kobj);
390         unsigned long tmp;
391
392         if (kstrtoul(buffer, 10, &tmp))
393                 return -EINVAL;
394
395         ns->ns_cancel_batch = (unsigned int)tmp;
396
397         return count;
398 }
399 LUSTRE_RW_ATTR(lru_cancel_batch);
400
401 static ssize_t ns_recalc_pct_show(struct kobject *kobj,
402                                   struct attribute *attr, char *buf)
403 {
404         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
405                                                  ns_kobj);
406
407         return snprintf(buf, sizeof(buf) - 1, "%u\n", ns->ns_recalc_pct);
408 }
409
410 static ssize_t ns_recalc_pct_store(struct kobject *kobj,
411                                    struct attribute *attr,
412                                    const char *buffer, size_t count)
413 {
414         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
415                                                  ns_kobj);
416         unsigned long tmp;
417
418         if (kstrtoul(buffer, 10, &tmp))
419                 return -EINVAL;
420
421         if (tmp > 100)
422                 return -ERANGE;
423
424         ns->ns_recalc_pct = (unsigned int)tmp;
425
426         return count;
427 }
428 LUSTRE_RW_ATTR(ns_recalc_pct);
429
430 static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
431                                 char *buf)
432 {
433         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
434                                                  ns_kobj);
435
436         return sprintf(buf, "%lld\n", ktime_to_ms(ns->ns_max_age));
437 }
438
439 static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
440                                  const char *buffer, size_t count)
441 {
442         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
443                                                  ns_kobj);
444         int scale = NSEC_PER_MSEC;
445         unsigned long long tmp;
446         char *buf;
447
448         /* Did the user ask in seconds or milliseconds. Default is in ms */
449         buf = strstr(buffer, "ms");
450         if (!buf) {
451                 buf = strchr(buffer, 's');
452                 if (buf)
453                         scale = NSEC_PER_SEC;
454         }
455
456         if (buf)
457                 *buf = '\0';
458
459         if (kstrtoull(buffer, 10, &tmp))
460                 return -EINVAL;
461
462         ns->ns_max_age = ktime_set(0, tmp * scale);
463
464         return count;
465 }
466 LUSTRE_RW_ATTR(lru_max_age);
467
468 static ssize_t early_lock_cancel_show(struct kobject *kobj,
469                                       struct attribute *attr,
470                                       char *buf)
471 {
472         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
473                                                  ns_kobj);
474
475         return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
476 }
477
478 static ssize_t early_lock_cancel_store(struct kobject *kobj,
479                                        struct attribute *attr,
480                                        const char *buffer,
481                                        size_t count)
482 {
483         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
484                                                  ns_kobj);
485         unsigned long supp = -1;
486         int rc;
487
488         rc = kstrtoul(buffer, 10, &supp);
489         if (rc < 0)
490                 return rc;
491
492         if (supp == 0)
493                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
494         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
495                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
496         return count;
497 }
498 LUSTRE_RW_ATTR(early_lock_cancel);
499
500 static ssize_t dirty_age_limit_show(struct kobject *kobj,
501                                     struct attribute *attr, char *buf)
502 {
503         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
504                                                  ns_kobj);
505
506         return scnprintf(buf, PAGE_SIZE, "%llu\n",
507                          ktime_divns(ns->ns_dirty_age_limit, NSEC_PER_SEC));
508 }
509
510 static ssize_t dirty_age_limit_store(struct kobject *kobj,
511                                      struct attribute *attr,
512                                      const char *buffer, size_t count)
513 {
514         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
515                                                  ns_kobj);
516         unsigned long long tmp;
517
518         if (kstrtoull(buffer, 10, &tmp))
519                 return -EINVAL;
520
521         ns->ns_dirty_age_limit = ktime_set(tmp, 0);
522
523         return count;
524 }
525 LUSTRE_RW_ATTR(dirty_age_limit);
526
527 #ifdef HAVE_SERVER_SUPPORT
528 static ssize_t ctime_age_limit_show(struct kobject *kobj,
529                                     struct attribute *attr, char *buf)
530 {
531         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
532                                                  ns_kobj);
533
534         return scnprintf(buf, PAGE_SIZE, "%u\n", ns->ns_ctime_age_limit);
535 }
536
537 static ssize_t ctime_age_limit_store(struct kobject *kobj,
538                                      struct attribute *attr,
539                                      const char *buffer, size_t count)
540 {
541         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
542                                                  ns_kobj);
543         unsigned long tmp;
544
545         if (kstrtoul(buffer, 10, &tmp))
546                 return -EINVAL;
547
548         ns->ns_ctime_age_limit = tmp;
549
550         return count;
551 }
552 LUSTRE_RW_ATTR(ctime_age_limit);
553
554 static ssize_t lock_timeouts_show(struct kobject *kobj, struct attribute *attr,
555                                   char *buf)
556 {
557         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
558                                                  ns_kobj);
559
560         return sprintf(buf, "%d\n", ns->ns_timeouts);
561 }
562 LUSTRE_RO_ATTR(lock_timeouts);
563
564 static ssize_t max_nolock_bytes_show(struct kobject *kobj,
565                                      struct attribute *attr, char *buf)
566 {
567         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
568                                                  ns_kobj);
569
570         return sprintf(buf, "%u\n", ns->ns_max_nolock_size);
571 }
572
573 static ssize_t max_nolock_bytes_store(struct kobject *kobj,
574                                       struct attribute *attr,
575                                       const char *buffer, size_t count)
576 {
577         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
578                                                  ns_kobj);
579         unsigned long tmp;
580         int err;
581
582         err = kstrtoul(buffer, 10, &tmp);
583         if (err != 0)
584                 return -EINVAL;
585
586         ns->ns_max_nolock_size = tmp;
587
588         return count;
589 }
590 LUSTRE_RW_ATTR(max_nolock_bytes);
591
592 static ssize_t contention_seconds_show(struct kobject *kobj,
593                                        struct attribute *attr, char *buf)
594 {
595         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
596                                                  ns_kobj);
597
598         return scnprintf(buf, PAGE_SIZE, "%d\n", ns->ns_contention_time);
599 }
600
601 static ssize_t contention_seconds_store(struct kobject *kobj,
602                                         struct attribute *attr,
603                                         const char *buffer, size_t count)
604 {
605         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
606                                                  ns_kobj);
607         unsigned int tmp;
608
609         if (kstrtouint(buffer, 10, &tmp))
610                 return -EINVAL;
611
612         ns->ns_contention_time = tmp;
613
614         return count;
615 }
616 LUSTRE_RW_ATTR(contention_seconds);
617
618 static ssize_t contended_locks_show(struct kobject *kobj,
619                                     struct attribute *attr, char *buf)
620 {
621         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
622                                                  ns_kobj);
623
624         return sprintf(buf, "%u\n", ns->ns_contended_locks);
625 }
626
627 static ssize_t contended_locks_store(struct kobject *kobj,
628                                      struct attribute *attr,
629                                      const char *buffer, size_t count)
630 {
631         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
632                                                  ns_kobj);
633         unsigned long tmp;
634         int err;
635
636         err = kstrtoul(buffer, 10, &tmp);
637         if (err != 0)
638                 return -EINVAL;
639
640         ns->ns_contended_locks = tmp;
641
642         return count;
643 }
644 LUSTRE_RW_ATTR(contended_locks);
645
646 static ssize_t max_parallel_ast_show(struct kobject *kobj,
647                                      struct attribute *attr, char *buf)
648 {
649         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
650                                                  ns_kobj);
651
652         return sprintf(buf, "%u\n", ns->ns_max_parallel_ast);
653 }
654
655 static ssize_t max_parallel_ast_store(struct kobject *kobj,
656                                       struct attribute *attr,
657                                       const char *buffer, size_t count)
658 {
659         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
660                                                  ns_kobj);
661         unsigned long tmp;
662         int err;
663
664         err = kstrtoul(buffer, 10, &tmp);
665         if (err != 0)
666                 return -EINVAL;
667
668         ns->ns_max_parallel_ast = tmp;
669
670         return count;
671 }
672 LUSTRE_RW_ATTR(max_parallel_ast);
673
674 #endif /* HAVE_SERVER_SUPPORT */
675
676 /* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
677 static struct attribute *ldlm_ns_attrs[] = {
678         &lustre_attr_resource_count.attr,
679         &lustre_attr_lock_count.attr,
680         &lustre_attr_lock_unused_count.attr,
681         &lustre_attr_ns_recalc_pct.attr,
682         &lustre_attr_lru_size.attr,
683         &lustre_attr_lru_cancel_batch.attr,
684         &lustre_attr_lru_max_age.attr,
685         &lustre_attr_early_lock_cancel.attr,
686         &lustre_attr_dirty_age_limit.attr,
687 #ifdef HAVE_SERVER_SUPPORT
688         &lustre_attr_ctime_age_limit.attr,
689         &lustre_attr_lock_timeouts.attr,
690         &lustre_attr_max_nolock_bytes.attr,
691         &lustre_attr_contention_seconds.attr,
692         &lustre_attr_contended_locks.attr,
693         &lustre_attr_max_parallel_ast.attr,
694 #endif
695         NULL,
696 };
697
698 static void ldlm_ns_release(struct kobject *kobj)
699 {
700         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
701                                                  ns_kobj);
702         complete(&ns->ns_kobj_unregister);
703 }
704
705 KOBJ_ATTRIBUTE_GROUPS(ldlm_ns);
706
707 static struct kobj_type ldlm_ns_ktype = {
708         .default_groups = KOBJ_ATTR_GROUPS(ldlm_ns),
709         .sysfs_ops      = &lustre_sysfs_ops,
710         .release        = ldlm_ns_release,
711 };
712
713 static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
714 {
715         if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
716                 CERROR("dlm namespace %s has no procfs dir?\n",
717                        ldlm_ns_name(ns));
718         else
719                 debugfs_remove_recursive(ns->ns_debugfs_entry);
720
721         if (ns->ns_stats != NULL)
722                 lprocfs_stats_free(&ns->ns_stats);
723 }
724
725 void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
726 {
727         kobject_put(&ns->ns_kobj);
728         wait_for_completion(&ns->ns_kobj_unregister);
729 }
730
731 int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
732 {
733         int err;
734
735         ns->ns_kobj.kset = ldlm_ns_kset;
736         init_completion(&ns->ns_kobj_unregister);
737         err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
738                                    "%s", ldlm_ns_name(ns));
739
740         ns->ns_stats = lprocfs_stats_alloc(LDLM_NSS_LAST, 0);
741         if (!ns->ns_stats) {
742                 kobject_put(&ns->ns_kobj);
743                 return -ENOMEM;
744         }
745
746         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
747                              LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
748                              "locks");
749
750         return err;
751 }
752
753 static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
754 {
755         struct dentry *ns_entry;
756
757         if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
758                 ns_entry = ns->ns_debugfs_entry;
759         } else {
760                 ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
761                                               ldlm_ns_debugfs_dir);
762                 if (!ns_entry)
763                         return -ENOMEM;
764                 ns->ns_debugfs_entry = ns_entry;
765         }
766
767         return 0;
768 }
769 #undef MAX_STRING_SIZE
770
771 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
772                                   const void *key, unsigned int mask)
773 {
774         const struct ldlm_res_id *id = key;
775         unsigned int val = 0;
776         unsigned int i;
777
778         for (i = 0; i < RES_NAME_SIZE; i++)
779                 val += id->name[i];
780         return val & mask;
781 }
782
783 static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned int bits)
784 {
785         struct lu_fid       fid;
786         __u32               hash;
787         __u32               val;
788
789         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
790         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
791         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
792
793         hash = fid_flatten32(&fid);
794         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
795         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
796                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
797         } else {
798                 val = fid_oid(&fid);
799         }
800         hash += (val >> 5) + (val << 11);
801         return cfs_hash_32(hash, bits);
802 }
803
804 static void *ldlm_res_hop_key(struct hlist_node *hnode)
805 {
806         struct ldlm_resource   *res;
807
808         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
809         return &res->lr_name;
810 }
811
812 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
813 {
814         struct ldlm_resource   *res;
815
816         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
817         return ldlm_res_eq((const struct ldlm_res_id *)key,
818                            (const struct ldlm_res_id *)&res->lr_name);
819 }
820
821 static void *ldlm_res_hop_object(struct hlist_node *hnode)
822 {
823         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
824 }
825
826 static void
827 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
828 {
829         struct ldlm_resource *res;
830
831         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
832         ldlm_resource_getref(res);
833 }
834
835 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
836 {
837         struct ldlm_resource *res;
838
839         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
840         ldlm_resource_putref(res);
841 }
842
843 static struct cfs_hash_ops ldlm_ns_hash_ops = {
844         .hs_hash        = ldlm_res_hop_hash,
845         .hs_key         = ldlm_res_hop_key,
846         .hs_keycmp      = ldlm_res_hop_keycmp,
847         .hs_keycpy      = NULL,
848         .hs_object      = ldlm_res_hop_object,
849         .hs_get         = ldlm_res_hop_get_locked,
850         .hs_put         = ldlm_res_hop_put
851 };
852
853 static struct {
854         /** hash bucket bits */
855         unsigned                nsd_bkt_bits;
856         /** hash bits */
857         unsigned                nsd_all_bits;
858 } ldlm_ns_hash_defs[] = {
859         [LDLM_NS_TYPE_MDC] = {
860                 .nsd_bkt_bits   = 11,
861                 .nsd_all_bits   = 16,
862         },
863         [LDLM_NS_TYPE_MDT] = {
864                 .nsd_bkt_bits   = 14,
865                 .nsd_all_bits   = 21,
866         },
867         [LDLM_NS_TYPE_OSC] = {
868                 .nsd_bkt_bits   = 8,
869                 .nsd_all_bits   = 12,
870         },
871         [LDLM_NS_TYPE_OST] = {
872                 .nsd_bkt_bits   = 11,
873                 .nsd_all_bits   = 17,
874         },
875         [LDLM_NS_TYPE_MGC] = {
876                 .nsd_bkt_bits   = 3,
877                 .nsd_all_bits   = 4,
878         },
879         [LDLM_NS_TYPE_MGT] = {
880                 .nsd_bkt_bits   = 3,
881                 .nsd_all_bits   = 4,
882         },
883 };
884
885 /**
886  * Create and initialize new empty namespace.
887  */
888 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
889                                           enum ldlm_side client,
890                                           enum ldlm_appetite apt,
891                                           enum ldlm_ns_type ns_type)
892 {
893         struct ldlm_namespace *ns = NULL;
894         int idx;
895         int rc;
896
897         ENTRY;
898         LASSERT(obd != NULL);
899
900         rc = ldlm_get_ref();
901         if (rc) {
902                 CERROR("%s: ldlm_get_ref failed: rc = %d\n", name, rc);
903                 RETURN(ERR_PTR(rc));
904         }
905
906         if (ns_type >= ARRAY_SIZE(ldlm_ns_hash_defs) ||
907             ldlm_ns_hash_defs[ns_type].nsd_bkt_bits == 0) {
908                 rc = -EINVAL;
909                 CERROR("%s: unknown namespace type %d: rc = %d\n",
910                        name, ns_type, rc);
911                 GOTO(out_ref, rc);
912         }
913
914         OBD_ALLOC_PTR(ns);
915         if (!ns)
916                 GOTO(out_ref, rc = -ENOMEM);
917
918         ns->ns_rs_hash = cfs_hash_create(name,
919                                          ldlm_ns_hash_defs[ns_type].nsd_all_bits,
920                                          ldlm_ns_hash_defs[ns_type].nsd_all_bits,
921                                          ldlm_ns_hash_defs[ns_type].nsd_bkt_bits,
922                                          0,
923                                          CFS_HASH_MIN_THETA,
924                                          CFS_HASH_MAX_THETA,
925                                          &ldlm_ns_hash_ops,
926                                          CFS_HASH_DEPTH |
927                                          CFS_HASH_BIGNAME |
928                                          CFS_HASH_SPIN_BKTLOCK |
929                                          CFS_HASH_NO_ITEMREF);
930         if (!ns->ns_rs_hash)
931                 GOTO(out_ns, rc = -ENOMEM);
932
933         ns->ns_bucket_bits = ldlm_ns_hash_defs[ns_type].nsd_all_bits -
934                              ldlm_ns_hash_defs[ns_type].nsd_bkt_bits;
935
936         OBD_ALLOC_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
937         if (!ns->ns_rs_buckets)
938                 GOTO(out_hash, rc = -ENOMEM);
939
940         for (idx = 0; idx < (1 << ns->ns_bucket_bits); idx++) {
941                 struct ldlm_ns_bucket *nsb = &ns->ns_rs_buckets[idx];
942
943                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
944                 nsb->nsb_namespace = ns;
945                 nsb->nsb_reclaim_start = 0;
946                 atomic_set(&nsb->nsb_count, 0);
947         }
948
949         ns->ns_obd = obd;
950         ns->ns_appetite = apt;
951         ns->ns_client = client;
952         ns->ns_name = kstrdup(name, GFP_KERNEL);
953         if (!ns->ns_name)
954                 GOTO(out_hash, rc = -ENOMEM);
955
956         INIT_LIST_HEAD(&ns->ns_list_chain);
957         INIT_LIST_HEAD(&ns->ns_unused_list);
958         spin_lock_init(&ns->ns_lock);
959         atomic_set(&ns->ns_bref, 0);
960         init_waitqueue_head(&ns->ns_waitq);
961
962         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
963         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
964         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
965
966         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
967         ns->ns_nr_unused          = 0;
968         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
969         ns->ns_cancel_batch       = LDLM_DEFAULT_LRU_SHRINK_BATCH;
970         ns->ns_recalc_pct         = LDLM_DEFAULT_SLV_RECALC_PCT;
971         ns->ns_max_age            = ktime_set(LDLM_DEFAULT_MAX_ALIVE, 0);
972         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
973         ns->ns_dirty_age_limit    = ktime_set(LDLM_DIRTY_AGE_LIMIT, 0);
974         ns->ns_timeouts           = 0;
975         ns->ns_orig_connect_flags = 0;
976         ns->ns_connect_flags      = 0;
977         ns->ns_stopping           = 0;
978         ns->ns_reclaim_start      = 0;
979         ns->ns_last_pos           = &ns->ns_unused_list;
980         ns->ns_flags              = 0;
981
982         rc = ldlm_namespace_sysfs_register(ns);
983         if (rc) {
984                 CERROR("%s: cannot initialize ns sysfs: rc = %d\n", name, rc);
985                 GOTO(out_hash, rc);
986         }
987
988         rc = ldlm_namespace_debugfs_register(ns);
989         if (rc) {
990                 CERROR("%s: cannot initialize ns proc: rc = %d\n", name, rc);
991                 GOTO(out_sysfs, rc);
992         }
993
994         idx = ldlm_namespace_nr_read(client);
995         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
996         if (rc) {
997                 CERROR("%s: cannot initialize lock pool, rc = %d\n", name, rc);
998                 GOTO(out_proc, rc);
999         }
1000
1001         ldlm_namespace_register(ns, client);
1002         RETURN(ns);
1003 out_proc:
1004         ldlm_namespace_debugfs_unregister(ns);
1005 out_sysfs:
1006         ldlm_namespace_sysfs_unregister(ns);
1007         ldlm_namespace_cleanup(ns, 0);
1008 out_hash:
1009         OBD_FREE_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
1010         kfree(ns->ns_name);
1011         cfs_hash_putref(ns->ns_rs_hash);
1012 out_ns:
1013         OBD_FREE_PTR(ns);
1014 out_ref:
1015         ldlm_put_ref();
1016         RETURN(ERR_PTR(rc));
1017 }
1018 EXPORT_SYMBOL(ldlm_namespace_new);
1019
1020 /**
1021  * Cancel and destroy all locks on a resource.
1022  *
1023  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
1024  * clean up.  This is currently only used for recovery, and we make
1025  * certain assumptions as a result--notably, that we shouldn't cancel
1026  * locks with refs.
1027  */
1028 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
1029                              u64 flags)
1030 {
1031         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
1032         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
1033
1034         do {
1035                 struct ldlm_lock *lock = NULL, *tmp;
1036
1037                 /* First, we look for non-cleaned-yet lock
1038                  * all cleaned locks are marked by CLEANED flag. */
1039                 lock_res(res);
1040                 list_for_each_entry(tmp, q, l_res_link) {
1041                         if (ldlm_is_cleaned(tmp))
1042                                 continue;
1043
1044                         lock = tmp;
1045                         LDLM_LOCK_GET(lock);
1046                         ldlm_set_cleaned(lock);
1047                         break;
1048                 }
1049
1050                 if (lock == NULL) {
1051                         unlock_res(res);
1052                         break;
1053                 }
1054
1055                 /* Set CBPENDING so nothing in the cancellation path
1056                  * can match this lock. */
1057                 ldlm_set_cbpending(lock);
1058                 ldlm_set_failed(lock);
1059                 ldlm_clear_converting(lock);
1060                 lock->l_flags |= flags;
1061
1062                 /* ... without sending a CANCEL message for local_only. */
1063                 if (local_only)
1064                         ldlm_set_local_only(lock);
1065
1066                 if (local_only && (lock->l_readers || lock->l_writers)) {
1067                         /*
1068                          * This is a little bit gross, but much better than the
1069                          * alternative: pretend that we got a blocking AST from
1070                          * the server, so that when the lock is decref'd, it
1071                          * will go away ...
1072                          */
1073                         unlock_res(res);
1074                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
1075                         if (lock->l_flags & LDLM_FL_FAIL_LOC)
1076                                 schedule_timeout_uninterruptible(
1077                                         cfs_time_seconds(4));
1078
1079                         if (lock->l_completion_ast)
1080                                 lock->l_completion_ast(lock,
1081                                                        LDLM_FL_FAILED, NULL);
1082                         LDLM_LOCK_RELEASE(lock);
1083                         continue;
1084                 }
1085
1086                 if (client) {
1087                         struct lustre_handle lockh;
1088
1089                         unlock_res(res);
1090                         ldlm_lock2handle(lock, &lockh);
1091                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
1092                         if (rc)
1093                                 CERROR("ldlm_cli_cancel: %d\n", rc);
1094                 } else {
1095                         unlock_res(res);
1096                         LDLM_DEBUG(lock,
1097                                    "Freeing a lock still held by a client node");
1098                         ldlm_lock_cancel(lock);
1099                 }
1100                 LDLM_LOCK_RELEASE(lock);
1101         } while (1);
1102 }
1103
1104 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1105                                struct hlist_node *hnode, void *arg)
1106 {
1107         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1108         __u64 flags = *(__u64 *)arg;
1109
1110         cleanup_resource(res, &res->lr_granted, flags);
1111         cleanup_resource(res, &res->lr_waiting, flags);
1112
1113         return 0;
1114 }
1115
1116 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1117                                   struct hlist_node *hnode, void *arg)
1118 {
1119         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
1120
1121         lock_res(res);
1122         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
1123                "(%d) after lock cleanup; forcing cleanup.\n",
1124                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
1125                atomic_read(&res->lr_refcount) - 1);
1126
1127         /* Use D_NETERROR since it is in the default mask */
1128         ldlm_resource_dump(D_NETERROR, res);
1129         unlock_res(res);
1130         return 0;
1131 }
1132
1133 /**
1134  * Cancel and destroy all locks in the namespace.
1135  *
1136  * Typically used during evictions when server notified client that it was
1137  * evicted and all of its state needs to be destroyed.
1138  * Also used during shutdown.
1139  */
1140 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
1141 {
1142         if (ns == NULL) {
1143                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
1144                 return ELDLM_OK;
1145         }
1146
1147         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
1148                                  &flags, 0);
1149         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
1150                                  NULL, 0);
1151         return ELDLM_OK;
1152 }
1153 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1154
1155 /**
1156  * Attempts to free namespace.
1157  *
1158  * Only used when namespace goes away, like during an unmount.
1159  */
1160 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
1161 {
1162         ENTRY;
1163
1164         /* At shutdown time, don't call the cancellation callback */
1165         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
1166
1167         if (atomic_read(&ns->ns_bref) > 0) {
1168                 int rc;
1169                 CDEBUG(D_DLMTRACE,
1170                        "dlm namespace %s free waiting on refcount %d\n",
1171                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
1172 force_wait:
1173                 if (force)
1174                         rc = wait_event_idle_timeout(
1175                                 ns->ns_waitq,
1176                                 atomic_read(&ns->ns_bref) == 0,
1177                                 cfs_time_seconds(1) / 4);
1178                 else
1179                         rc = l_wait_event_abortable(
1180                                 ns->ns_waitq, atomic_read(&ns->ns_bref) == 0);
1181
1182                 /* Forced cleanups should be able to reclaim all references,
1183                  * so it's safe to wait forever... we can't leak locks... */
1184                 if (force && rc == 0) {
1185                         rc = -ETIMEDOUT;
1186                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
1187                                        "namespace with %d resources in use, "
1188                                        "(rc=%d)\n", ldlm_ns_name(ns),
1189                                        atomic_read(&ns->ns_bref), rc);
1190                         GOTO(force_wait, rc);
1191                 }
1192
1193                 if (atomic_read(&ns->ns_bref)) {
1194                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
1195                                        "with %d resources in use, (rc=%d)\n",
1196                                        ldlm_ns_name(ns),
1197                                        atomic_read(&ns->ns_bref), rc);
1198                         RETURN(ELDLM_NAMESPACE_EXISTS);
1199                 }
1200                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
1201                        ldlm_ns_name(ns));
1202         }
1203
1204         RETURN(ELDLM_OK);
1205 }
1206
1207 /**
1208  * Performs various cleanups for passed \a ns to make it drop refc and be
1209  * ready for freeing. Waits for refc == 0.
1210  *
1211  * The following is done:
1212  * (0) Unregister \a ns from its list to make inaccessible for potential
1213  * users like pools thread and others;
1214  * (1) Clear all locks in \a ns.
1215  */
1216 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
1217                                struct obd_import *imp,
1218                                int force)
1219 {
1220         int rc;
1221
1222         ENTRY;
1223         if (!ns) {
1224                 EXIT;
1225                 return;
1226         }
1227
1228         spin_lock(&ns->ns_lock);
1229         ns->ns_stopping = 1;
1230         spin_unlock(&ns->ns_lock);
1231
1232         /*
1233          * Can fail with -EINTR when force == 0 in which case try harder.
1234          */
1235         rc = __ldlm_namespace_free(ns, force);
1236         if (rc != ELDLM_OK) {
1237                 if (imp) {
1238                         ptlrpc_disconnect_import(imp, 0);
1239                         ptlrpc_invalidate_import(imp);
1240                 }
1241
1242                 /*
1243                  * With all requests dropped and the import inactive
1244                  * we are gaurenteed all reference will be dropped.
1245                  */
1246                 rc = __ldlm_namespace_free(ns, 1);
1247                 LASSERT(rc == 0);
1248         }
1249         EXIT;
1250 }
1251 EXPORT_SYMBOL(ldlm_namespace_free_prior);
1252
1253 /**
1254  * Performs freeing memory structures related to \a ns. This is only done
1255  * when ldlm_namespce_free_prior() successfully removed all resources
1256  * referencing \a ns and its refc == 0.
1257  */
1258 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1259 {
1260         ENTRY;
1261         if (!ns) {
1262                 EXIT;
1263                 return;
1264         }
1265
1266         /* Make sure that nobody can find this ns in its list. */
1267         ldlm_namespace_unregister(ns, ns->ns_client);
1268         /* Fini pool _before_ parent proc dir is removed. This is important as
1269          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1270          * Removing it after @dir may cause oops. */
1271         ldlm_pool_fini(&ns->ns_pool);
1272
1273         ldlm_namespace_debugfs_unregister(ns);
1274         ldlm_namespace_sysfs_unregister(ns);
1275         cfs_hash_putref(ns->ns_rs_hash);
1276         OBD_FREE_PTR_ARRAY_LARGE(ns->ns_rs_buckets, 1 << ns->ns_bucket_bits);
1277         kfree(ns->ns_name);
1278         /* Namespace \a ns should be not on list at this time, otherwise
1279          * this will cause issues related to using freed \a ns in poold
1280          * thread.
1281          */
1282         LASSERT(list_empty(&ns->ns_list_chain));
1283         OBD_FREE_PTR(ns);
1284         ldlm_put_ref();
1285         EXIT;
1286 }
1287 EXPORT_SYMBOL(ldlm_namespace_free_post);
1288
1289 /**
1290  * Cleanup the resource, and free namespace.
1291  * bug 12864:
1292  * Deadlock issue:
1293  * proc1: destroy import
1294  *        class_disconnect_export(grab cl_sem) ->
1295  *              -> ldlm_namespace_free ->
1296  *              -> lprocfs_remove(grab _lprocfs_lock).
1297  * proc2: read proc info
1298  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1299  *              -> osc_rd_active, etc(grab cl_sem).
1300  *
1301  * So that I have to split the ldlm_namespace_free into two parts - the first
1302  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1303  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1304  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1305  * held.
1306  */
1307 void ldlm_namespace_free(struct ldlm_namespace *ns,
1308                          struct obd_import *imp,
1309                          int force)
1310 {
1311         ldlm_namespace_free_prior(ns, imp, force);
1312         ldlm_namespace_free_post(ns);
1313 }
1314 EXPORT_SYMBOL(ldlm_namespace_free);
1315
1316 void ldlm_namespace_get(struct ldlm_namespace *ns)
1317 {
1318         atomic_inc(&ns->ns_bref);
1319 }
1320
1321 /* This is only for callers that care about refcount */
1322 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1323 {
1324         return atomic_inc_return(&ns->ns_bref);
1325 }
1326
1327 void ldlm_namespace_put(struct ldlm_namespace *ns)
1328 {
1329         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1330                 wake_up(&ns->ns_waitq);
1331                 spin_unlock(&ns->ns_lock);
1332         }
1333 }
1334
1335 /** Register \a ns in the list of namespaces */
1336 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1337 {
1338         mutex_lock(ldlm_namespace_lock(client));
1339         LASSERT(list_empty(&ns->ns_list_chain));
1340         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1341         ldlm_namespace_nr_inc(client);
1342         mutex_unlock(ldlm_namespace_lock(client));
1343 }
1344
1345 /** Unregister \a ns from the list of namespaces. */
1346 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1347 {
1348         mutex_lock(ldlm_namespace_lock(client));
1349         LASSERT(!list_empty(&ns->ns_list_chain));
1350         /* Some asserts and possibly other parts of the code are still
1351          * using list_empty(&ns->ns_list_chain). This is why it is
1352          * important to use list_del_init() here. */
1353         list_del_init(&ns->ns_list_chain);
1354         ldlm_namespace_nr_dec(client);
1355         mutex_unlock(ldlm_namespace_lock(client));
1356 }
1357
1358 /** Should be called with ldlm_namespace_lock(client) taken. */
1359 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1360                                           enum ldlm_side client)
1361 {
1362         LASSERT(!list_empty(&ns->ns_list_chain));
1363         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1364         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1365 }
1366
1367 /** Should be called with ldlm_namespace_lock(client) taken. */
1368 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1369                                             enum ldlm_side client)
1370 {
1371         LASSERT(!list_empty(&ns->ns_list_chain));
1372         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1373         list_move_tail(&ns->ns_list_chain,
1374                        ldlm_namespace_inactive_list(client));
1375 }
1376
1377 /** Should be called with ldlm_namespace_lock(client) taken. */
1378 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1379 {
1380         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1381         LASSERT(!list_empty(ldlm_namespace_list(client)));
1382         return container_of(ldlm_namespace_list(client)->next,
1383                             struct ldlm_namespace, ns_list_chain);
1384 }
1385
1386 static bool ldlm_resource_extent_new(struct ldlm_resource *res)
1387 {
1388         int idx;
1389
1390         OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1391                        sizeof(*res->lr_itree) * LCK_MODE_NUM);
1392         if (res->lr_itree == NULL)
1393                 return false;
1394         /* Initialize interval trees for each lock mode. */
1395         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1396                 res->lr_itree[idx].lit_size = 0;
1397                 res->lr_itree[idx].lit_mode = BIT(idx);
1398                 res->lr_itree[idx].lit_root = NULL;
1399         }
1400         return true;
1401 }
1402
1403 static bool ldlm_resource_inodebits_new(struct ldlm_resource *res)
1404 {
1405         int i;
1406
1407         OBD_ALLOC_PTR(res->lr_ibits_queues);
1408         if (res->lr_ibits_queues == NULL)
1409                 return false;
1410         for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
1411                 INIT_LIST_HEAD(&res->lr_ibits_queues->liq_waiting[i]);
1412         return true;
1413 }
1414
1415 static bool ldlm_resource_flock_new(struct ldlm_resource *res)
1416 {
1417         res->lr_flock_node.lfn_needs_reprocess = false;
1418         atomic_set(&res->lr_flock_node.lfn_unlock_pending, 0);
1419
1420         return true;
1421 }
1422
1423 /** Create and initialize new resource. */
1424 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1425 {
1426         struct ldlm_resource *res;
1427         bool rc;
1428
1429         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1430         if (res == NULL)
1431                 return NULL;
1432
1433         switch (ldlm_type) {
1434         case LDLM_EXTENT:
1435                 rc = ldlm_resource_extent_new(res);
1436                 break;
1437         case LDLM_IBITS:
1438                 rc = ldlm_resource_inodebits_new(res);
1439                 break;
1440         case LDLM_FLOCK:
1441                 rc = ldlm_resource_flock_new(res);
1442                 break;
1443         default:
1444                 rc = true;
1445                 break;
1446         }
1447         if (!rc) {
1448                 OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1449                 return NULL;
1450         }
1451
1452         INIT_LIST_HEAD(&res->lr_granted);
1453         INIT_LIST_HEAD(&res->lr_waiting);
1454
1455         atomic_set(&res->lr_refcount, 1);
1456         spin_lock_init(&res->lr_lock);
1457         lu_ref_init(&res->lr_reference);
1458
1459         /* Since LVB init can be delayed now, there is no longer need to
1460          * immediatelly acquire mutex here. */
1461         mutex_init(&res->lr_lvb_mutex);
1462         res->lr_lvb_initialized = false;
1463
1464         return res;
1465 }
1466
1467 static void __ldlm_resource_free(struct rcu_head *head)
1468 {
1469         struct ldlm_resource *res = container_of(head, struct ldlm_resource,
1470                                                  lr_rcu);
1471
1472         OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1473 }
1474
1475 static void ldlm_resource_free(struct ldlm_resource *res)
1476 {
1477         if (res->lr_type == LDLM_EXTENT) {
1478                 if (res->lr_itree != NULL)
1479                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1480                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1481         } else if (res->lr_type == LDLM_IBITS) {
1482                 if (res->lr_ibits_queues != NULL)
1483                         OBD_FREE_PTR(res->lr_ibits_queues);
1484         }
1485
1486         call_rcu(&res->lr_rcu, __ldlm_resource_free);
1487 }
1488
1489 /**
1490  * Return a reference to resource with given name, creating it if necessary.
1491  * Args: namespace with ns_lock unlocked
1492  * Locks: takes and releases NS hash-lock and res->lr_lock
1493  * Returns: referenced, unlocked ldlm_resource or ERR_PTR
1494  */
1495 struct ldlm_resource *
1496 ldlm_resource_get(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
1497                   enum ldlm_type type, int create)
1498 {
1499         struct hlist_node       *hnode;
1500         struct ldlm_resource    *res = NULL;
1501         struct cfs_hash_bd              bd;
1502         __u64                   version;
1503         int                     ns_refcount = 0;
1504         int hash;
1505
1506         LASSERT(ns != NULL);
1507         LASSERT(ns->ns_rs_hash != NULL);
1508         LASSERT(name->name[0] != 0);
1509
1510         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1511         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1512         if (hnode != NULL) {
1513                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1514                 GOTO(found, res);
1515         }
1516
1517         version = cfs_hash_bd_version_get(&bd);
1518         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1519
1520         if (create == 0)
1521                 return ERR_PTR(-ENOENT);
1522
1523         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1524                  "type: %d\n", type);
1525         res = ldlm_resource_new(type);
1526         if (res == NULL)
1527                 return ERR_PTR(-ENOMEM);
1528
1529         hash = ldlm_res_hop_fid_hash(name, ns->ns_bucket_bits);
1530         res->lr_ns_bucket = &ns->ns_rs_buckets[hash];
1531         res->lr_name = *name;
1532         res->lr_type = type;
1533
1534         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1535         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1536                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1537
1538         if (hnode != NULL) {
1539                 /* Someone won the race and already added the resource. */
1540                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1541                 /* Clean lu_ref for failed resource. */
1542                 lu_ref_fini(&res->lr_reference);
1543                 ldlm_resource_free(res);
1544 found:
1545                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1546                 return res;
1547         }
1548         /* We won! Let's add the resource. */
1549         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1550         if (atomic_inc_return(&res->lr_ns_bucket->nsb_count) == 1)
1551                 ns_refcount = ldlm_namespace_get_return(ns);
1552
1553         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1554
1555         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1556
1557         /* Let's see if we happened to be the very first resource in this
1558          * namespace. If so, and this is a client namespace, we need to move
1559          * the namespace into the active namespaces list to be patrolled by
1560          * the ldlm_poold. */
1561         if (ns_is_client(ns) && ns_refcount == 1) {
1562                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1563                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1564                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1565         }
1566
1567         return res;
1568 }
1569 EXPORT_SYMBOL(ldlm_resource_get);
1570
1571 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1572 {
1573         LASSERT(res != NULL);
1574         LASSERT(res != LP_POISON);
1575         atomic_inc(&res->lr_refcount);
1576         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1577                atomic_read(&res->lr_refcount));
1578         return res;
1579 }
1580
1581 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1582                                          struct ldlm_resource *res)
1583 {
1584         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1585
1586         if (!list_empty(&res->lr_granted)) {
1587                 ldlm_resource_dump(D_ERROR, res);
1588                 LBUG();
1589         }
1590
1591         if (!list_empty(&res->lr_waiting)) {
1592                 ldlm_resource_dump(D_ERROR, res);
1593                 LBUG();
1594         }
1595
1596         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1597                                bd, &res->lr_hash);
1598         lu_ref_fini(&res->lr_reference);
1599         if (atomic_dec_and_test(&nsb->nsb_count))
1600                 ldlm_namespace_put(nsb->nsb_namespace);
1601 }
1602
1603 /* Returns 1 if the resource was freed, 0 if it remains. */
1604 int ldlm_resource_putref(struct ldlm_resource *res)
1605 {
1606         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1607         struct cfs_hash_bd   bd;
1608
1609         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1610         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1611                res, atomic_read(&res->lr_refcount) - 1);
1612
1613         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1614         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1615                 __ldlm_resource_putref_final(&bd, res);
1616                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1617                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1618                         ns->ns_lvbo->lvbo_free(res);
1619                 ldlm_resource_free(res);
1620                 return 1;
1621         }
1622         return 0;
1623 }
1624 EXPORT_SYMBOL(ldlm_resource_putref);
1625
1626 static void __ldlm_resource_add_lock(struct ldlm_resource *res,
1627                                      struct list_head *head,
1628                                      struct ldlm_lock *lock,
1629                                      bool tail)
1630 {
1631         check_res_locked(res);
1632
1633         if (ldlm_is_destroyed(lock)) {
1634                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1635                 return;
1636         }
1637
1638         LASSERT(list_empty(&lock->l_res_link));
1639
1640         if (tail)
1641                 list_add_tail(&lock->l_res_link, head);
1642         else
1643                 list_add(&lock->l_res_link, head);
1644
1645         if (res->lr_type == LDLM_IBITS)
1646                 ldlm_inodebits_add_lock(res, head, lock, tail);
1647
1648         ldlm_resource_dump(D_INFO, res);
1649 }
1650
1651 /**
1652  * Add a lock into a given resource into specified lock list.
1653  */
1654 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1655                             struct ldlm_lock *lock)
1656 {
1657         LDLM_DEBUG(lock, "About to add this lock");
1658
1659         __ldlm_resource_add_lock(res, head, lock, true);
1660 }
1661
1662 /**
1663  * Insert a lock into resource after specified lock.
1664  */
1665 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1666                                      struct ldlm_lock *new)
1667 {
1668         LASSERT(!list_empty(&original->l_res_link));
1669
1670         LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
1671         __ldlm_resource_add_lock(original->l_resource,
1672                                  &original->l_res_link,
1673                                  new, false);
1674 }
1675
1676 /**
1677  * Insert a lock into resource before the specified lock.
1678  *
1679  * IBITS waiting locks are to be inserted to the ibit lists as well, and only
1680  * the insert-after operation is supported for them, because the set of bits
1681  * of the previous and the new locks must match. Therefore, get the previous
1682  * lock and insert after.
1683  */
1684 void ldlm_resource_insert_lock_before(struct ldlm_lock *original,
1685                                       struct ldlm_lock *new)
1686 {
1687         LASSERT(!list_empty(&original->l_res_link));
1688
1689         LDLM_DEBUG(new, "About to insert this lock before %p: ", original);
1690         __ldlm_resource_add_lock(original->l_resource,
1691                                  original->l_res_link.prev, new, false);
1692 }
1693
1694 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1695 {
1696         int type = lock->l_resource->lr_type;
1697
1698         check_res_locked(lock->l_resource);
1699         switch (type) {
1700         case LDLM_PLAIN:
1701                 ldlm_unlink_lock_skiplist(lock);
1702                 break;
1703         case LDLM_EXTENT:
1704                 ldlm_extent_unlink_lock(lock);
1705                 break;
1706         case LDLM_IBITS:
1707                 ldlm_inodebits_unlink_lock(lock);
1708                 break;
1709         }
1710         list_del_init(&lock->l_res_link);
1711 }
1712 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1713
1714 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1715 {
1716         desc->lr_type = res->lr_type;
1717         desc->lr_name = res->lr_name;
1718 }
1719
1720 /**
1721  * Print information about all locks in all namespaces on this node to debug
1722  * log.
1723  */
1724 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1725 {
1726         struct ldlm_namespace *ns;
1727
1728         if (!((libcfs_debug | D_ERROR) & level))
1729                 return;
1730
1731         mutex_lock(ldlm_namespace_lock(client));
1732
1733         list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain)
1734                 ldlm_namespace_dump(level, ns);
1735
1736         mutex_unlock(ldlm_namespace_lock(client));
1737 }
1738
1739 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1740                               struct hlist_node *hnode, void *arg)
1741 {
1742         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1743         int    level = (int)(unsigned long)arg;
1744
1745         lock_res(res);
1746         ldlm_resource_dump(level, res);
1747         unlock_res(res);
1748
1749         return 0;
1750 }
1751
1752 /**
1753  * Print information about all locks in this namespace on this node to debug
1754  * log.
1755  */
1756 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1757 {
1758         if (!((libcfs_debug | D_ERROR) & level))
1759                 return;
1760
1761         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1762                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1763                ns_is_client(ns) ? "client" : "server");
1764
1765         if (ktime_get_seconds() < ns->ns_next_dump)
1766                 return;
1767
1768         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1769                                  ldlm_res_hash_dump,
1770                                  (void *)(unsigned long)level, 0);
1771         spin_lock(&ns->ns_lock);
1772         ns->ns_next_dump = ktime_get_seconds() + 10;
1773         spin_unlock(&ns->ns_lock);
1774 }
1775
1776 /**
1777  * Print information about all locks in this resource to debug log.
1778  */
1779 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1780 {
1781         struct ldlm_lock *lock;
1782         unsigned int granted = 0;
1783
1784         BUILD_BUG_ON(RES_NAME_SIZE != 4);
1785
1786         if (!((libcfs_debug | D_ERROR) & level))
1787                 return;
1788
1789         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1790                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1791
1792         if (!list_empty(&res->lr_granted)) {
1793                 CDEBUG(level, "Granted locks (in reverse order):\n");
1794                 list_for_each_entry_reverse(lock, &res->lr_granted,
1795                                                 l_res_link) {
1796                         LDLM_DEBUG_LIMIT(level, lock, "###");
1797                         if (!(level & D_CANTMASK) &&
1798                             ++granted > ldlm_dump_granted_max) {
1799                                 CDEBUG(level,
1800                                        "only dump %d granted locks to avoid DDOS.\n",
1801                                        granted);
1802                                 break;
1803                         }
1804                 }
1805         }
1806
1807         if (!list_empty(&res->lr_waiting)) {
1808                 CDEBUG(level, "Waiting locks:\n");
1809                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1810                         LDLM_DEBUG_LIMIT(level, lock, "###");
1811         }
1812 }
1813 EXPORT_SYMBOL(ldlm_resource_dump);