Whamcloud - gitweb
735ea3da23258aae698325db2bd97646b608a594
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_resource.c
33  *
34  * Author: Phil Schwan <phil@clusterfs.com>
35  * Author: Peter Braam <braam@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39 #include <lustre_dlm.h>
40 #include <lustre_fid.h>
41 #include <obd_class.h>
42 #include "ldlm_internal.h"
43
44 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
45 struct kmem_cache *ldlm_interval_tree_slab;
46 struct kmem_cache *ldlm_inodebits_slab;
47
48 int ldlm_srv_namespace_nr = 0;
49 int ldlm_cli_namespace_nr = 0;
50
51 DEFINE_MUTEX(ldlm_srv_namespace_lock);
52 LIST_HEAD(ldlm_srv_namespace_list);
53
54 DEFINE_MUTEX(ldlm_cli_namespace_lock);
55 /* Client Namespaces that have active resources in them.
56  * Once all resources go away, ldlm_poold moves such namespaces to the
57  * inactive list */
58 LIST_HEAD(ldlm_cli_active_namespace_list);
59 /* Client namespaces that don't have any locks in them */
60 LIST_HEAD(ldlm_cli_inactive_namespace_list);
61
62 static struct dentry *ldlm_debugfs_dir;
63 static struct dentry *ldlm_ns_debugfs_dir;
64 struct dentry *ldlm_svc_debugfs_dir;
65
66 /* during debug dump certain amount of granted locks for one resource to avoid
67  * DDOS. */
68 static unsigned int ldlm_dump_granted_max = 256;
69
70 static ssize_t ldebugfs_dump_ns_seq_write(struct file *file,
71                                           const char __user *buffer,
72                                           size_t count, loff_t *off)
73 {
74         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
75         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
76         RETURN(count);
77 }
78
79 LDEBUGFS_FOPS_WR_ONLY(ldlm, dump_ns);
80
81 static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
82 {
83         seq_printf(m, "%u\n", *(unsigned int *)m->private);
84         return 0;
85 }
86
87 static ssize_t
88 ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
89                        size_t count, loff_t *off)
90 {
91         struct seq_file *seq = file->private_data;
92
93         if (!count)
94                 return 0;
95
96         return kstrtouint_from_user(buffer, count, 0,
97                                     (unsigned int *)seq->private);
98 }
99
100 LDEBUGFS_SEQ_FOPS(ldlm_rw_uint);
101
102 #ifdef HAVE_SERVER_SUPPORT
103
104 static int seq_watermark_show(struct seq_file *m, void *data)
105 {
106         seq_printf(m, "%llu\n", *(__u64 *)m->private);
107         return 0;
108 }
109
110 static ssize_t seq_watermark_write(struct file *file,
111                                    const char __user *buffer, size_t count,
112                                    loff_t *off)
113 {
114         u64 value;
115         __u64 watermark;
116         __u64 *data = ((struct seq_file *)file->private_data)->private;
117         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
118         char kernbuf[22] = "";
119         int rc;
120
121         if (count >= sizeof(kernbuf))
122                 return -EINVAL;
123
124         if (copy_from_user(kernbuf, buffer, count))
125                 return -EFAULT;
126         kernbuf[count] = 0;
127
128         rc = sysfs_memparse(kernbuf, count, &value, "MiB");
129         if (rc < 0) {
130                 CERROR("Failed to set %s, rc = %d.\n",
131                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
132                        rc);
133                 return rc;
134         } else if (value != 0 && value < (1 << 20)) {
135                 CERROR("%s should be greater than 1MB.\n",
136                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
137                 return -EINVAL;
138         }
139         watermark = value >> 20;
140
141         if (wm_low) {
142                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
143                         CERROR("lock_reclaim_threshold_mb must be smaller than "
144                                "lock_limit_mb.\n");
145                         return -EINVAL;
146                 }
147
148                 *data = watermark;
149                 if (watermark != 0) {
150                         watermark <<= 20;
151                         do_div(watermark, sizeof(struct ldlm_lock));
152                 }
153                 ldlm_reclaim_threshold = watermark;
154         } else {
155                 if (ldlm_reclaim_threshold_mb != 0 &&
156                     watermark < ldlm_reclaim_threshold_mb) {
157                         CERROR("lock_limit_mb must be greater than "
158                                "lock_reclaim_threshold_mb.\n");
159                         return -EINVAL;
160                 }
161
162                 *data = watermark;
163                 if (watermark != 0) {
164                         watermark <<= 20;
165                         do_div(watermark, sizeof(struct ldlm_lock));
166                 }
167                 ldlm_lock_limit = watermark;
168         }
169
170         return count;
171 }
172
173 static int seq_watermark_open(struct inode *inode, struct file *file)
174 {
175         return single_open(file, seq_watermark_show, inode->i_private);
176 }
177
178 static const struct file_operations ldlm_watermark_fops = {
179         .owner          = THIS_MODULE,
180         .open           = seq_watermark_open,
181         .read           = seq_read,
182         .write          = seq_watermark_write,
183         .llseek         = seq_lseek,
184         .release        = lprocfs_single_release,
185 };
186
187 static int seq_granted_show(struct seq_file *m, void *data)
188 {
189         seq_printf(m, "%llu\n", percpu_counter_sum_positive(
190                    (struct percpu_counter *)m->private));
191         return 0;
192 }
193
194 static int seq_granted_open(struct inode *inode, struct file *file)
195 {
196         return single_open(file, seq_granted_show, inode->i_private);
197 }
198
199 static const struct file_operations ldlm_granted_fops = {
200         .owner  = THIS_MODULE,
201         .open   = seq_granted_open,
202         .read   = seq_read,
203         .llseek = seq_lseek,
204         .release = seq_release,
205 };
206
207 #endif /* HAVE_SERVER_SUPPORT */
208
209 static struct lprocfs_vars ldlm_debugfs_list[] = {
210         { .name =       "dump_namespaces",
211           .fops =       &ldlm_dump_ns_fops,
212           .proc_mode =  0222 },
213         { .name =       "dump_granted_max",
214           .fops =       &ldlm_rw_uint_fops,
215           .data =       &ldlm_dump_granted_max },
216 #ifdef HAVE_SERVER_SUPPORT
217         { .name =       "lock_reclaim_threshold_mb",
218           .fops =       &ldlm_watermark_fops,
219           .data =       &ldlm_reclaim_threshold_mb },
220         { .name =       "lock_limit_mb",
221           .fops =       &ldlm_watermark_fops,
222           .data =       &ldlm_lock_limit_mb },
223         { .name =       "lock_granted_count",
224           .fops =       &ldlm_granted_fops,
225           .data =       &ldlm_granted_total },
226 #endif
227         { NULL }
228 };
229
230 int ldlm_debugfs_setup(void)
231 {
232         int rc;
233
234         ENTRY;
235         ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
236                                              debugfs_lustre_root,
237                                              NULL, NULL);
238         if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
239                 CERROR("LDebugFS failed in ldlm-init\n");
240                 rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
241                 ldlm_debugfs_dir = NULL;
242                 GOTO(err, rc);
243         }
244
245         ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
246                                                 ldlm_debugfs_dir,
247                                                 NULL, NULL);
248         if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
249                 CERROR("LProcFS failed in ldlm-init\n");
250                 rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
251                                          : -ENOMEM;
252                 GOTO(err, rc);
253         }
254
255         ldlm_svc_debugfs_dir = ldebugfs_register("services",
256                                                  ldlm_debugfs_dir,
257                                                  NULL, NULL);
258         if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
259                 CERROR("LProcFS failed in ldlm-init\n");
260                 rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
261                                           : -ENOMEM;
262                 GOTO(err, rc);
263         }
264
265         rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
266         if (rc != 0) {
267                 CERROR("LProcFS failed in ldlm-init\n");
268                 GOTO(err, rc);
269         }
270
271         RETURN(0);
272
273 err:
274         debugfs_remove_recursive(ldlm_debugfs_dir);
275         ldlm_svc_debugfs_dir = NULL;
276         ldlm_ns_debugfs_dir = NULL;
277         ldlm_debugfs_dir = NULL;
278         RETURN(rc);
279 }
280
281 void ldlm_debugfs_cleanup(void)
282 {
283         debugfs_remove_recursive(ldlm_debugfs_dir);
284
285         ldlm_svc_debugfs_dir = NULL;
286         ldlm_ns_debugfs_dir = NULL;
287         ldlm_debugfs_dir = NULL;
288 }
289
290 static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
291                                    char *buf)
292 {
293         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
294                                                  ns_kobj);
295         __u64                   res = 0;
296         int                     i;
297
298         /* result is not strictly consistant */
299         for (i = 0; i < (1 << ns->ns_bucket_bits); i++)
300                 res += atomic_read(&ns->ns_rs_buckets[i].nsb_count);
301         return sprintf(buf, "%lld\n", res);
302 }
303 LUSTRE_RO_ATTR(resource_count);
304
305 static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
306                                char *buf)
307 {
308         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
309                                                  ns_kobj);
310         __u64                   locks;
311
312         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
313                                         LPROCFS_FIELDS_FLAGS_SUM);
314         return sprintf(buf, "%lld\n", locks);
315 }
316 LUSTRE_RO_ATTR(lock_count);
317
318 static ssize_t lock_unused_count_show(struct kobject *kobj,
319                                       struct attribute *attr,
320                                       char *buf)
321 {
322         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
323                                                  ns_kobj);
324
325         return sprintf(buf, "%d\n", ns->ns_nr_unused);
326 }
327 LUSTRE_RO_ATTR(lock_unused_count);
328
329 static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
330                              char *buf)
331 {
332         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
333                                                  ns_kobj);
334         __u32 *nr = &ns->ns_max_unused;
335
336         if (ns_connect_lru_resize(ns))
337                 nr = &ns->ns_nr_unused;
338         return sprintf(buf, "%u\n", *nr);
339 }
340
341 static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
342                               const char *buffer, size_t count)
343 {
344         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
345                                                  ns_kobj);
346         unsigned long tmp;
347         int lru_resize;
348         int err;
349
350         if (strncmp(buffer, "clear", 5) == 0) {
351                 CDEBUG(D_DLMTRACE,
352                        "dropping all unused locks from namespace %s\n",
353                        ldlm_ns_name(ns));
354                 if (ns_connect_lru_resize(ns)) {
355                         /* Try to cancel all @ns_nr_unused locks. */
356                         ldlm_cancel_lru(ns, ns->ns_nr_unused, 0,
357                                         LDLM_LRU_FLAG_PASSED |
358                                         LDLM_LRU_FLAG_CLEANUP);
359                 } else {
360                         tmp = ns->ns_max_unused;
361                         ns->ns_max_unused = 0;
362                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED |
363                                         LDLM_LRU_FLAG_CLEANUP);
364                         ns->ns_max_unused = tmp;
365                 }
366                 return count;
367         }
368
369         err = kstrtoul(buffer, 10, &tmp);
370         if (err != 0) {
371                 CERROR("lru_size: invalid value written\n");
372                 return -EINVAL;
373         }
374         lru_resize = (tmp == 0);
375
376         if (ns_connect_lru_resize(ns)) {
377                 if (!lru_resize)
378                         ns->ns_max_unused = (unsigned int)tmp;
379
380                 if (tmp > ns->ns_nr_unused)
381                         tmp = ns->ns_nr_unused;
382                 tmp = ns->ns_nr_unused - tmp;
383
384                 CDEBUG(D_DLMTRACE,
385                        "changing namespace %s unused locks from %u to %u\n",
386                        ldlm_ns_name(ns), ns->ns_nr_unused,
387                        (unsigned int)tmp);
388                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
389
390                 if (!lru_resize) {
391                         CDEBUG(D_DLMTRACE,
392                                "disable lru_resize for namespace %s\n",
393                                ldlm_ns_name(ns));
394                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
395                 }
396         } else {
397                 CDEBUG(D_DLMTRACE,
398                        "changing namespace %s max_unused from %u to %u\n",
399                        ldlm_ns_name(ns), ns->ns_max_unused,
400                        (unsigned int)tmp);
401                 ns->ns_max_unused = (unsigned int)tmp;
402                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
403
404                 /* Make sure that LRU resize was originally supported before
405                  * turning it on here.
406                  */
407                 if (lru_resize &&
408                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
409                         CDEBUG(D_DLMTRACE,
410                                "enable lru_resize for namespace %s\n",
411                                ldlm_ns_name(ns));
412                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
413                 }
414         }
415
416         return count;
417 }
418 LUSTRE_RW_ATTR(lru_size);
419
420 static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
421                                 char *buf)
422 {
423         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
424                                                  ns_kobj);
425
426         return sprintf(buf, "%lld\n", ktime_to_ms(ns->ns_max_age));
427 }
428
429 static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
430                                  const char *buffer, size_t count)
431 {
432         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
433                                                  ns_kobj);
434         int scale = NSEC_PER_MSEC;
435         unsigned long long tmp;
436         char *buf;
437
438         /* Did the user ask in seconds or milliseconds. Default is in ms */
439         buf = strstr(buffer, "ms");
440         if (!buf) {
441                 buf = strchr(buffer, 's');
442                 if (buf)
443                         scale = NSEC_PER_SEC;
444         }
445
446         if (buf)
447                 *buf = '\0';
448
449         if (kstrtoull(buffer, 10, &tmp))
450                 return -EINVAL;
451
452         ns->ns_max_age = ktime_set(0, tmp * scale);
453
454         return count;
455 }
456 LUSTRE_RW_ATTR(lru_max_age);
457
458 static ssize_t early_lock_cancel_show(struct kobject *kobj,
459                                       struct attribute *attr,
460                                       char *buf)
461 {
462         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
463                                                  ns_kobj);
464
465         return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
466 }
467
468 static ssize_t early_lock_cancel_store(struct kobject *kobj,
469                                        struct attribute *attr,
470                                        const char *buffer,
471                                        size_t count)
472 {
473         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
474                                                  ns_kobj);
475         unsigned long supp = -1;
476         int rc;
477
478         rc = kstrtoul(buffer, 10, &supp);
479         if (rc < 0)
480                 return rc;
481
482         if (supp == 0)
483                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
484         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
485                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
486         return count;
487 }
488 LUSTRE_RW_ATTR(early_lock_cancel);
489
490 static ssize_t dirty_age_limit_show(struct kobject *kobj,
491                                     struct attribute *attr, char *buf)
492 {
493         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
494                                                  ns_kobj);
495
496         return sprintf(buf, "%llu\n", ns->ns_dirty_age_limit);
497 }
498
499 static ssize_t dirty_age_limit_store(struct kobject *kobj,
500                                      struct attribute *attr,
501                                      const char *buffer, size_t count)
502 {
503         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
504                                                  ns_kobj);
505         unsigned long long tmp;
506
507         if (kstrtoull(buffer, 10, &tmp))
508                 return -EINVAL;
509
510         ns->ns_dirty_age_limit = tmp;
511
512         return count;
513 }
514 LUSTRE_RW_ATTR(dirty_age_limit);
515
516 #ifdef HAVE_SERVER_SUPPORT
517 static ssize_t ctime_age_limit_show(struct kobject *kobj,
518                                     struct attribute *attr, char *buf)
519 {
520         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
521                                                  ns_kobj);
522
523         return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit);
524 }
525
526 static ssize_t ctime_age_limit_store(struct kobject *kobj,
527                                      struct attribute *attr,
528                                      const char *buffer, size_t count)
529 {
530         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
531                                                  ns_kobj);
532         unsigned long long tmp;
533
534         if (kstrtoull(buffer, 10, &tmp))
535                 return -EINVAL;
536
537         ns->ns_ctime_age_limit = tmp;
538
539         return count;
540 }
541 LUSTRE_RW_ATTR(ctime_age_limit);
542
543 static ssize_t lock_timeouts_show(struct kobject *kobj, struct attribute *attr,
544                                   char *buf)
545 {
546         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
547                                                  ns_kobj);
548
549         return sprintf(buf, "%d\n", ns->ns_timeouts);
550 }
551 LUSTRE_RO_ATTR(lock_timeouts);
552
553 static ssize_t max_nolock_bytes_show(struct kobject *kobj,
554                                      struct attribute *attr, char *buf)
555 {
556         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
557                                                  ns_kobj);
558
559         return sprintf(buf, "%u\n", ns->ns_max_nolock_size);
560 }
561
562 static ssize_t max_nolock_bytes_store(struct kobject *kobj,
563                                       struct attribute *attr,
564                                       const char *buffer, size_t count)
565 {
566         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
567                                                  ns_kobj);
568         unsigned long tmp;
569         int err;
570
571         err = kstrtoul(buffer, 10, &tmp);
572         if (err != 0)
573                 return -EINVAL;
574
575         ns->ns_max_nolock_size = tmp;
576
577         return count;
578 }
579 LUSTRE_RW_ATTR(max_nolock_bytes);
580
581 static ssize_t contention_seconds_show(struct kobject *kobj,
582                                        struct attribute *attr, char *buf)
583 {
584         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
585                                                  ns_kobj);
586
587         return sprintf(buf, "%llu\n", ns->ns_contention_time);
588 }
589
590 static ssize_t contention_seconds_store(struct kobject *kobj,
591                                         struct attribute *attr,
592                                         const char *buffer, size_t count)
593 {
594         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
595                                                  ns_kobj);
596         unsigned long long tmp;
597
598         if (kstrtoull(buffer, 10, &tmp))
599                 return -EINVAL;
600
601         ns->ns_contention_time = tmp;
602
603         return count;
604 }
605 LUSTRE_RW_ATTR(contention_seconds);
606
607 static ssize_t contended_locks_show(struct kobject *kobj,
608                                     struct attribute *attr, char *buf)
609 {
610         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
611                                                  ns_kobj);
612
613         return sprintf(buf, "%u\n", ns->ns_contended_locks);
614 }
615
616 static ssize_t contended_locks_store(struct kobject *kobj,
617                                      struct attribute *attr,
618                                      const char *buffer, size_t count)
619 {
620         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
621                                                  ns_kobj);
622         unsigned long tmp;
623         int err;
624
625         err = kstrtoul(buffer, 10, &tmp);
626         if (err != 0)
627                 return -EINVAL;
628
629         ns->ns_contended_locks = tmp;
630
631         return count;
632 }
633 LUSTRE_RW_ATTR(contended_locks);
634
635 static ssize_t max_parallel_ast_show(struct kobject *kobj,
636                                      struct attribute *attr, char *buf)
637 {
638         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
639                                                  ns_kobj);
640
641         return sprintf(buf, "%u\n", ns->ns_max_parallel_ast);
642 }
643
644 static ssize_t max_parallel_ast_store(struct kobject *kobj,
645                                       struct attribute *attr,
646                                       const char *buffer, size_t count)
647 {
648         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
649                                                  ns_kobj);
650         unsigned long tmp;
651         int err;
652
653         err = kstrtoul(buffer, 10, &tmp);
654         if (err != 0)
655                 return -EINVAL;
656
657         ns->ns_max_parallel_ast = tmp;
658
659         return count;
660 }
661 LUSTRE_RW_ATTR(max_parallel_ast);
662
663 #endif /* HAVE_SERVER_SUPPORT */
664
665 /* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
666 static struct attribute *ldlm_ns_attrs[] = {
667         &lustre_attr_resource_count.attr,
668         &lustre_attr_lock_count.attr,
669         &lustre_attr_lock_unused_count.attr,
670         &lustre_attr_lru_size.attr,
671         &lustre_attr_lru_max_age.attr,
672         &lustre_attr_early_lock_cancel.attr,
673         &lustre_attr_dirty_age_limit.attr,
674 #ifdef HAVE_SERVER_SUPPORT
675         &lustre_attr_ctime_age_limit.attr,
676         &lustre_attr_lock_timeouts.attr,
677         &lustre_attr_max_nolock_bytes.attr,
678         &lustre_attr_contention_seconds.attr,
679         &lustre_attr_contended_locks.attr,
680         &lustre_attr_max_parallel_ast.attr,
681 #endif
682         NULL,
683 };
684
685 static void ldlm_ns_release(struct kobject *kobj)
686 {
687         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
688                                                  ns_kobj);
689         complete(&ns->ns_kobj_unregister);
690 }
691
692 static struct kobj_type ldlm_ns_ktype = {
693         .default_attrs  = ldlm_ns_attrs,
694         .sysfs_ops      = &lustre_sysfs_ops,
695         .release        = ldlm_ns_release,
696 };
697
698 static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
699 {
700         if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
701                 CERROR("dlm namespace %s has no procfs dir?\n",
702                        ldlm_ns_name(ns));
703         else
704                 debugfs_remove_recursive(ns->ns_debugfs_entry);
705
706         if (ns->ns_stats != NULL)
707                 lprocfs_free_stats(&ns->ns_stats);
708 }
709
710 void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
711 {
712         kobject_put(&ns->ns_kobj);
713         wait_for_completion(&ns->ns_kobj_unregister);
714 }
715
716 int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
717 {
718         int err;
719
720         ns->ns_kobj.kset = ldlm_ns_kset;
721         init_completion(&ns->ns_kobj_unregister);
722         err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
723                                    "%s", ldlm_ns_name(ns));
724
725         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
726         if (!ns->ns_stats) {
727                 kobject_put(&ns->ns_kobj);
728                 return -ENOMEM;
729         }
730
731         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
732                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
733
734         return err;
735 }
736
737 static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
738 {
739         struct dentry *ns_entry;
740
741         if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
742                 ns_entry = ns->ns_debugfs_entry;
743         } else {
744                 ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
745                                               ldlm_ns_debugfs_dir);
746                 if (!ns_entry)
747                         return -ENOMEM;
748                 ns->ns_debugfs_entry = ns_entry;
749         }
750
751         return 0;
752 }
753 #undef MAX_STRING_SIZE
754
755 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
756                                   const void *key, unsigned int mask)
757 {
758         const struct ldlm_res_id *id = key;
759         unsigned int val = 0;
760         unsigned int i;
761
762         for (i = 0; i < RES_NAME_SIZE; i++)
763                 val += id->name[i];
764         return val & mask;
765 }
766
767 static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned int bits)
768 {
769         struct lu_fid       fid;
770         __u32               hash;
771         __u32               val;
772
773         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
774         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
775         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
776
777         hash = fid_flatten32(&fid);
778         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
779         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
780                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
781         } else {
782                 val = fid_oid(&fid);
783         }
784         hash += (val >> 5) + (val << 11);
785         return cfs_hash_32(hash, bits);
786 }
787
788 static void *ldlm_res_hop_key(struct hlist_node *hnode)
789 {
790         struct ldlm_resource   *res;
791
792         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
793         return &res->lr_name;
794 }
795
796 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
797 {
798         struct ldlm_resource   *res;
799
800         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
801         return ldlm_res_eq((const struct ldlm_res_id *)key,
802                            (const struct ldlm_res_id *)&res->lr_name);
803 }
804
805 static void *ldlm_res_hop_object(struct hlist_node *hnode)
806 {
807         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
808 }
809
810 static void
811 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
812 {
813         struct ldlm_resource *res;
814
815         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
816         ldlm_resource_getref(res);
817 }
818
819 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
820 {
821         struct ldlm_resource *res;
822
823         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
824         ldlm_resource_putref(res);
825 }
826
827 static struct cfs_hash_ops ldlm_ns_hash_ops = {
828         .hs_hash        = ldlm_res_hop_hash,
829         .hs_key         = ldlm_res_hop_key,
830         .hs_keycmp      = ldlm_res_hop_keycmp,
831         .hs_keycpy      = NULL,
832         .hs_object      = ldlm_res_hop_object,
833         .hs_get         = ldlm_res_hop_get_locked,
834         .hs_put         = ldlm_res_hop_put
835 };
836
837 typedef struct ldlm_ns_hash_def {
838         enum ldlm_ns_type       nsd_type;
839         /** hash bucket bits */
840         unsigned                nsd_bkt_bits;
841         /** hash bits */
842         unsigned                nsd_all_bits;
843         /** hash operations */
844         struct cfs_hash_ops *nsd_hops;
845 } ldlm_ns_hash_def_t;
846
847 static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] =
848 {
849         {
850                 .nsd_type       = LDLM_NS_TYPE_MDC,
851                 .nsd_bkt_bits   = 11,
852                 .nsd_all_bits   = 16,
853                 .nsd_hops       = &ldlm_ns_hash_ops,
854         },
855         {
856                 .nsd_type       = LDLM_NS_TYPE_MDT,
857                 .nsd_bkt_bits   = 14,
858                 .nsd_all_bits   = 21,
859                 .nsd_hops       = &ldlm_ns_hash_ops,
860         },
861         {
862                 .nsd_type       = LDLM_NS_TYPE_OSC,
863                 .nsd_bkt_bits   = 8,
864                 .nsd_all_bits   = 12,
865                 .nsd_hops       = &ldlm_ns_hash_ops,
866         },
867         {
868                 .nsd_type       = LDLM_NS_TYPE_OST,
869                 .nsd_bkt_bits   = 11,
870                 .nsd_all_bits   = 17,
871                 .nsd_hops       = &ldlm_ns_hash_ops,
872         },
873         {
874                 .nsd_type       = LDLM_NS_TYPE_MGC,
875                 .nsd_bkt_bits   = 3,
876                 .nsd_all_bits   = 4,
877                 .nsd_hops       = &ldlm_ns_hash_ops,
878         },
879         {
880                 .nsd_type       = LDLM_NS_TYPE_MGT,
881                 .nsd_bkt_bits   = 3,
882                 .nsd_all_bits   = 4,
883                 .nsd_hops       = &ldlm_ns_hash_ops,
884         },
885         {
886                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
887         },
888 };
889
890 /**
891  * Create and initialize new empty namespace.
892  */
893 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
894                                           enum ldlm_side client,
895                                           enum ldlm_appetite apt,
896                                           enum ldlm_ns_type ns_type)
897 {
898         struct ldlm_namespace *ns = NULL;
899         struct ldlm_ns_hash_def *nsd;
900         int idx;
901         int rc;
902
903         ENTRY;
904         LASSERT(obd != NULL);
905
906         rc = ldlm_get_ref();
907         if (rc) {
908                 CERROR("ldlm_get_ref failed: %d\n", rc);
909                 RETURN(NULL);
910         }
911
912         for (idx = 0; ; idx++) {
913                 nsd = &ldlm_ns_hash_defs[idx];
914                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
915                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
916                         GOTO(out_ref, NULL);
917                 }
918
919                 if (nsd->nsd_type == ns_type)
920                         break;
921         }
922
923         OBD_ALLOC_PTR(ns);
924         if (!ns)
925                 GOTO(out_ref, NULL);
926
927         ns->ns_rs_hash = cfs_hash_create(name,
928                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
929                                          nsd->nsd_bkt_bits, 0,
930                                          CFS_HASH_MIN_THETA,
931                                          CFS_HASH_MAX_THETA,
932                                          nsd->nsd_hops,
933                                          CFS_HASH_DEPTH |
934                                          CFS_HASH_BIGNAME |
935                                          CFS_HASH_SPIN_BKTLOCK |
936                                          CFS_HASH_NO_ITEMREF);
937         if (ns->ns_rs_hash == NULL)
938                 GOTO(out_ns, NULL);
939
940         ns->ns_bucket_bits = nsd->nsd_all_bits - nsd->nsd_bkt_bits;
941         OBD_ALLOC_LARGE(ns->ns_rs_buckets,
942                         BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
943         if (!ns->ns_rs_buckets)
944                 goto out_hash;
945
946         for (idx = 0; idx < (1 << ns->ns_bucket_bits); idx++) {
947                 struct ldlm_ns_bucket *nsb = &ns->ns_rs_buckets[idx];
948
949                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
950                 nsb->nsb_namespace = ns;
951                 nsb->nsb_reclaim_start = 0;
952                 atomic_set(&nsb->nsb_count, 0);
953         }
954
955         ns->ns_obd = obd;
956         ns->ns_appetite = apt;
957         ns->ns_client = client;
958         ns->ns_name = kstrdup(name, GFP_KERNEL);
959         if (!ns->ns_name)
960                 goto out_hash;
961
962         INIT_LIST_HEAD(&ns->ns_list_chain);
963         INIT_LIST_HEAD(&ns->ns_unused_list);
964         spin_lock_init(&ns->ns_lock);
965         atomic_set(&ns->ns_bref, 0);
966         init_waitqueue_head(&ns->ns_waitq);
967
968         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
969         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
970         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
971
972         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
973         ns->ns_nr_unused          = 0;
974         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
975         ns->ns_max_age            = ktime_set(LDLM_DEFAULT_MAX_ALIVE, 0);
976         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
977         ns->ns_dirty_age_limit    = LDLM_DIRTY_AGE_LIMIT;
978         ns->ns_timeouts           = 0;
979         ns->ns_orig_connect_flags = 0;
980         ns->ns_connect_flags      = 0;
981         ns->ns_stopping           = 0;
982         ns->ns_reclaim_start      = 0;
983         ns->ns_last_pos           = &ns->ns_unused_list;
984
985         rc = ldlm_namespace_sysfs_register(ns);
986         if (rc) {
987                 CERROR("Can't initialize ns sysfs, rc %d\n", rc);
988                 GOTO(out_hash, rc);
989         }
990
991         rc = ldlm_namespace_debugfs_register(ns);
992         if (rc) {
993                 CERROR("Can't initialize ns proc, rc %d\n", rc);
994                 GOTO(out_sysfs, rc);
995         }
996
997         idx = ldlm_namespace_nr_read(client);
998         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
999         if (rc) {
1000                 CERROR("Can't initialize lock pool, rc %d\n", rc);
1001                 GOTO(out_proc, rc);
1002         }
1003
1004         ldlm_namespace_register(ns, client);
1005         RETURN(ns);
1006 out_proc:
1007         ldlm_namespace_debugfs_unregister(ns);
1008 out_sysfs:
1009         ldlm_namespace_sysfs_unregister(ns);
1010         ldlm_namespace_cleanup(ns, 0);
1011 out_hash:
1012         OBD_FREE_LARGE(ns->ns_rs_buckets,
1013                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
1014         kfree(ns->ns_name);
1015         cfs_hash_putref(ns->ns_rs_hash);
1016 out_ns:
1017         OBD_FREE_PTR(ns);
1018 out_ref:
1019         ldlm_put_ref();
1020         RETURN(NULL);
1021 }
1022 EXPORT_SYMBOL(ldlm_namespace_new);
1023
1024 /**
1025  * Cancel and destroy all locks on a resource.
1026  *
1027  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
1028  * clean up.  This is currently only used for recovery, and we make
1029  * certain assumptions as a result--notably, that we shouldn't cancel
1030  * locks with refs.
1031  */
1032 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
1033                              __u64 flags)
1034 {
1035         struct list_head *tmp;
1036         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
1037         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
1038
1039         do {
1040                 struct ldlm_lock *lock = NULL;
1041
1042                 /* First, we look for non-cleaned-yet lock
1043                  * all cleaned locks are marked by CLEANED flag. */
1044                 lock_res(res);
1045                 list_for_each(tmp, q) {
1046                         lock = list_entry(tmp, struct ldlm_lock,
1047                                           l_res_link);
1048                         if (ldlm_is_cleaned(lock)) {
1049                                 lock = NULL;
1050                                 continue;
1051                         }
1052                         LDLM_LOCK_GET(lock);
1053                         ldlm_set_cleaned(lock);
1054                         break;
1055                 }
1056
1057                 if (lock == NULL) {
1058                         unlock_res(res);
1059                         break;
1060                 }
1061
1062                 /* Set CBPENDING so nothing in the cancellation path
1063                  * can match this lock. */
1064                 ldlm_set_cbpending(lock);
1065                 ldlm_set_failed(lock);
1066                 lock->l_flags |= flags;
1067
1068                 /* ... without sending a CANCEL message for local_only. */
1069                 if (local_only)
1070                         ldlm_set_local_only(lock);
1071
1072                 if (local_only && (lock->l_readers || lock->l_writers)) {
1073                         /*
1074                          * This is a little bit gross, but much better than the
1075                          * alternative: pretend that we got a blocking AST from
1076                          * the server, so that when the lock is decref'd, it
1077                          * will go away ...
1078                          */
1079                         unlock_res(res);
1080                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
1081                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
1082                                 set_current_state(TASK_UNINTERRUPTIBLE);
1083                                 schedule_timeout(cfs_time_seconds(4));
1084                                 set_current_state(TASK_RUNNING);
1085                         }
1086                         if (lock->l_completion_ast)
1087                                 lock->l_completion_ast(lock,
1088                                                        LDLM_FL_FAILED, NULL);
1089                         LDLM_LOCK_RELEASE(lock);
1090                         continue;
1091                 }
1092
1093                 if (client) {
1094                         struct lustre_handle lockh;
1095
1096                         unlock_res(res);
1097                         ldlm_lock2handle(lock, &lockh);
1098                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
1099                         if (rc)
1100                                 CERROR("ldlm_cli_cancel: %d\n", rc);
1101                 } else {
1102                         unlock_res(res);
1103                         LDLM_DEBUG(lock,
1104                                    "Freeing a lock still held by a client node");
1105                         ldlm_lock_cancel(lock);
1106                 }
1107                 LDLM_LOCK_RELEASE(lock);
1108         } while (1);
1109 }
1110
1111 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1112                                struct hlist_node *hnode, void *arg)
1113 {
1114         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1115         __u64 flags = *(__u64 *)arg;
1116
1117         cleanup_resource(res, &res->lr_granted, flags);
1118         cleanup_resource(res, &res->lr_waiting, flags);
1119
1120         return 0;
1121 }
1122
1123 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1124                                   struct hlist_node *hnode, void *arg)
1125 {
1126         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
1127
1128         lock_res(res);
1129         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
1130                "(%d) after lock cleanup; forcing cleanup.\n",
1131                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
1132                atomic_read(&res->lr_refcount) - 1);
1133
1134         /* Use D_NETERROR since it is in the default mask */
1135         ldlm_resource_dump(D_NETERROR, res);
1136         unlock_res(res);
1137         return 0;
1138 }
1139
1140 /**
1141  * Cancel and destroy all locks in the namespace.
1142  *
1143  * Typically used during evictions when server notified client that it was
1144  * evicted and all of its state needs to be destroyed.
1145  * Also used during shutdown.
1146  */
1147 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
1148 {
1149         if (ns == NULL) {
1150                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
1151                 return ELDLM_OK;
1152         }
1153
1154         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
1155                                  &flags, 0);
1156         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
1157                                  NULL, 0);
1158         return ELDLM_OK;
1159 }
1160 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1161
1162 /**
1163  * Attempts to free namespace.
1164  *
1165  * Only used when namespace goes away, like during an unmount.
1166  */
1167 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
1168 {
1169         ENTRY;
1170
1171         /* At shutdown time, don't call the cancellation callback */
1172         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
1173
1174         if (atomic_read(&ns->ns_bref) > 0) {
1175                 int rc;
1176                 CDEBUG(D_DLMTRACE,
1177                        "dlm namespace %s free waiting on refcount %d\n",
1178                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
1179 force_wait:
1180                 if (force)
1181                         rc = wait_event_idle_timeout(
1182                                 ns->ns_waitq,
1183                                 atomic_read(&ns->ns_bref) == 0,
1184                                 cfs_time_seconds(1) / 4);
1185                 else
1186                         rc = l_wait_event_abortable(
1187                                 ns->ns_waitq, atomic_read(&ns->ns_bref) == 0);
1188
1189                 /* Forced cleanups should be able to reclaim all references,
1190                  * so it's safe to wait forever... we can't leak locks... */
1191                 if (force && rc == 0) {
1192                         rc = -ETIMEDOUT;
1193                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
1194                                        "namespace with %d resources in use, "
1195                                        "(rc=%d)\n", ldlm_ns_name(ns),
1196                                        atomic_read(&ns->ns_bref), rc);
1197                         GOTO(force_wait, rc);
1198                 }
1199
1200                 if (atomic_read(&ns->ns_bref)) {
1201                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
1202                                        "with %d resources in use, (rc=%d)\n",
1203                                        ldlm_ns_name(ns),
1204                                        atomic_read(&ns->ns_bref), rc);
1205                         RETURN(ELDLM_NAMESPACE_EXISTS);
1206                 }
1207                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
1208                        ldlm_ns_name(ns));
1209         }
1210
1211         RETURN(ELDLM_OK);
1212 }
1213
1214 /**
1215  * Performs various cleanups for passed \a ns to make it drop refc and be
1216  * ready for freeing. Waits for refc == 0.
1217  *
1218  * The following is done:
1219  * (0) Unregister \a ns from its list to make inaccessible for potential
1220  * users like pools thread and others;
1221  * (1) Clear all locks in \a ns.
1222  */
1223 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
1224                                struct obd_import *imp,
1225                                int force)
1226 {
1227         int rc;
1228
1229         ENTRY;
1230         if (!ns) {
1231                 EXIT;
1232                 return;
1233         }
1234
1235         spin_lock(&ns->ns_lock);
1236         ns->ns_stopping = 1;
1237         spin_unlock(&ns->ns_lock);
1238
1239         /*
1240          * Can fail with -EINTR when force == 0 in which case try harder.
1241          */
1242         rc = __ldlm_namespace_free(ns, force);
1243         if (rc != ELDLM_OK) {
1244                 if (imp) {
1245                         ptlrpc_disconnect_import(imp, 0);
1246                         ptlrpc_invalidate_import(imp);
1247                 }
1248
1249                 /*
1250                  * With all requests dropped and the import inactive
1251                  * we are gaurenteed all reference will be dropped.
1252                  */
1253                 rc = __ldlm_namespace_free(ns, 1);
1254                 LASSERT(rc == 0);
1255         }
1256         EXIT;
1257 }
1258 EXPORT_SYMBOL(ldlm_namespace_free_prior);
1259
1260 /**
1261  * Performs freeing memory structures related to \a ns. This is only done
1262  * when ldlm_namespce_free_prior() successfully removed all resources
1263  * referencing \a ns and its refc == 0.
1264  */
1265 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1266 {
1267         ENTRY;
1268         if (!ns) {
1269                 EXIT;
1270                 return;
1271         }
1272
1273         /* Make sure that nobody can find this ns in its list. */
1274         ldlm_namespace_unregister(ns, ns->ns_client);
1275         /* Fini pool _before_ parent proc dir is removed. This is important as
1276          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1277          * Removing it after @dir may cause oops. */
1278         ldlm_pool_fini(&ns->ns_pool);
1279
1280         ldlm_namespace_debugfs_unregister(ns);
1281         ldlm_namespace_sysfs_unregister(ns);
1282         cfs_hash_putref(ns->ns_rs_hash);
1283         OBD_FREE_LARGE(ns->ns_rs_buckets,
1284                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
1285         kfree(ns->ns_name);
1286         /* Namespace \a ns should be not on list at this time, otherwise
1287          * this will cause issues related to using freed \a ns in poold
1288          * thread.
1289          */
1290         LASSERT(list_empty(&ns->ns_list_chain));
1291         OBD_FREE_PTR(ns);
1292         ldlm_put_ref();
1293         EXIT;
1294 }
1295 EXPORT_SYMBOL(ldlm_namespace_free_post);
1296
1297 /**
1298  * Cleanup the resource, and free namespace.
1299  * bug 12864:
1300  * Deadlock issue:
1301  * proc1: destroy import
1302  *        class_disconnect_export(grab cl_sem) ->
1303  *              -> ldlm_namespace_free ->
1304  *              -> lprocfs_remove(grab _lprocfs_lock).
1305  * proc2: read proc info
1306  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1307  *              -> osc_rd_active, etc(grab cl_sem).
1308  *
1309  * So that I have to split the ldlm_namespace_free into two parts - the first
1310  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1311  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1312  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1313  * held.
1314  */
1315 void ldlm_namespace_free(struct ldlm_namespace *ns,
1316                          struct obd_import *imp,
1317                          int force)
1318 {
1319         ldlm_namespace_free_prior(ns, imp, force);
1320         ldlm_namespace_free_post(ns);
1321 }
1322 EXPORT_SYMBOL(ldlm_namespace_free);
1323
1324 void ldlm_namespace_get(struct ldlm_namespace *ns)
1325 {
1326         atomic_inc(&ns->ns_bref);
1327 }
1328
1329 /* This is only for callers that care about refcount */
1330 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1331 {
1332         return atomic_inc_return(&ns->ns_bref);
1333 }
1334
1335 void ldlm_namespace_put(struct ldlm_namespace *ns)
1336 {
1337         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1338                 wake_up(&ns->ns_waitq);
1339                 spin_unlock(&ns->ns_lock);
1340         }
1341 }
1342
1343 /** Register \a ns in the list of namespaces */
1344 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1345 {
1346         mutex_lock(ldlm_namespace_lock(client));
1347         LASSERT(list_empty(&ns->ns_list_chain));
1348         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1349         ldlm_namespace_nr_inc(client);
1350         mutex_unlock(ldlm_namespace_lock(client));
1351 }
1352
1353 /** Unregister \a ns from the list of namespaces. */
1354 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1355 {
1356         mutex_lock(ldlm_namespace_lock(client));
1357         LASSERT(!list_empty(&ns->ns_list_chain));
1358         /* Some asserts and possibly other parts of the code are still
1359          * using list_empty(&ns->ns_list_chain). This is why it is
1360          * important to use list_del_init() here. */
1361         list_del_init(&ns->ns_list_chain);
1362         ldlm_namespace_nr_dec(client);
1363         mutex_unlock(ldlm_namespace_lock(client));
1364 }
1365
1366 /** Should be called with ldlm_namespace_lock(client) taken. */
1367 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1368                                           enum ldlm_side client)
1369 {
1370         LASSERT(!list_empty(&ns->ns_list_chain));
1371         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1372         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1373 }
1374
1375 /** Should be called with ldlm_namespace_lock(client) taken. */
1376 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1377                                             enum ldlm_side client)
1378 {
1379         LASSERT(!list_empty(&ns->ns_list_chain));
1380         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1381         list_move_tail(&ns->ns_list_chain,
1382                        ldlm_namespace_inactive_list(client));
1383 }
1384
1385 /** Should be called with ldlm_namespace_lock(client) taken. */
1386 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1387 {
1388         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1389         LASSERT(!list_empty(ldlm_namespace_list(client)));
1390         return container_of(ldlm_namespace_list(client)->next,
1391                             struct ldlm_namespace, ns_list_chain);
1392 }
1393
1394 static bool ldlm_resource_extent_new(struct ldlm_resource *res)
1395 {
1396         int idx;
1397
1398         OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1399                        sizeof(*res->lr_itree) * LCK_MODE_NUM);
1400         if (res->lr_itree == NULL)
1401                 return false;
1402         /* Initialize interval trees for each lock mode. */
1403         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1404                 res->lr_itree[idx].lit_size = 0;
1405                 res->lr_itree[idx].lit_mode = 1 << idx;
1406                 res->lr_itree[idx].lit_root = NULL;
1407         }
1408         return true;
1409 }
1410
1411 static bool ldlm_resource_inodebits_new(struct ldlm_resource *res)
1412 {
1413         int i;
1414
1415         OBD_ALLOC_PTR(res->lr_ibits_queues);
1416         if (res->lr_ibits_queues == NULL)
1417                 return false;
1418         for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
1419                 INIT_LIST_HEAD(&res->lr_ibits_queues->liq_waiting[i]);
1420         return true;
1421 }
1422
1423 /** Create and initialize new resource. */
1424 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1425 {
1426         struct ldlm_resource *res;
1427         bool rc;
1428
1429         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1430         if (res == NULL)
1431                 return NULL;
1432
1433         switch (ldlm_type) {
1434         case LDLM_EXTENT:
1435                 rc = ldlm_resource_extent_new(res);
1436                 break;
1437         case LDLM_IBITS:
1438                 rc = ldlm_resource_inodebits_new(res);
1439                 break;
1440         default:
1441                 rc = true;
1442                 break;
1443         }
1444         if (!rc) {
1445                 OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1446                 return NULL;
1447         }
1448
1449         INIT_LIST_HEAD(&res->lr_granted);
1450         INIT_LIST_HEAD(&res->lr_waiting);
1451
1452         atomic_set(&res->lr_refcount, 1);
1453         spin_lock_init(&res->lr_lock);
1454         lu_ref_init(&res->lr_reference);
1455
1456         /* Since LVB init can be delayed now, there is no longer need to
1457          * immediatelly acquire mutex here. */
1458         mutex_init(&res->lr_lvb_mutex);
1459         res->lr_lvb_initialized = false;
1460
1461         return res;
1462 }
1463
1464 static void ldlm_resource_free(struct ldlm_resource *res)
1465 {
1466         if (res->lr_type == LDLM_EXTENT) {
1467                 if (res->lr_itree != NULL)
1468                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1469                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1470         } else if (res->lr_type == LDLM_IBITS) {
1471                 if (res->lr_ibits_queues != NULL)
1472                         OBD_FREE_PTR(res->lr_ibits_queues);
1473         }
1474
1475         OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1476 }
1477
1478 /**
1479  * Return a reference to resource with given name, creating it if necessary.
1480  * Args: namespace with ns_lock unlocked
1481  * Locks: takes and releases NS hash-lock and res->lr_lock
1482  * Returns: referenced, unlocked ldlm_resource or NULL
1483  */
1484 struct ldlm_resource *
1485 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1486                   const struct ldlm_res_id *name, enum ldlm_type type,
1487                   int create)
1488 {
1489         struct hlist_node       *hnode;
1490         struct ldlm_resource    *res = NULL;
1491         struct cfs_hash_bd              bd;
1492         __u64                   version;
1493         int                     ns_refcount = 0;
1494         int hash;
1495
1496         LASSERT(ns != NULL);
1497         LASSERT(parent == NULL);
1498         LASSERT(ns->ns_rs_hash != NULL);
1499         LASSERT(name->name[0] != 0);
1500
1501         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1502         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1503         if (hnode != NULL) {
1504                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1505                 GOTO(found, res);
1506         }
1507
1508         version = cfs_hash_bd_version_get(&bd);
1509         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1510
1511         if (create == 0)
1512                 return ERR_PTR(-ENOENT);
1513
1514         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1515                  "type: %d\n", type);
1516         res = ldlm_resource_new(type);
1517         if (res == NULL)
1518                 return ERR_PTR(-ENOMEM);
1519
1520         hash = ldlm_res_hop_fid_hash(name, ns->ns_bucket_bits);
1521         res->lr_ns_bucket = &ns->ns_rs_buckets[hash];
1522         res->lr_name = *name;
1523         res->lr_type = type;
1524
1525         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1526         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1527                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1528
1529         if (hnode != NULL) {
1530                 /* Someone won the race and already added the resource. */
1531                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1532                 /* Clean lu_ref for failed resource. */
1533                 lu_ref_fini(&res->lr_reference);
1534                 ldlm_resource_free(res);
1535 found:
1536                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1537                 return res;
1538         }
1539         /* We won! Let's add the resource. */
1540         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1541         if (atomic_inc_return(&res->lr_ns_bucket->nsb_count) == 1)
1542                 ns_refcount = ldlm_namespace_get_return(ns);
1543
1544         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1545
1546         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1547
1548         /* Let's see if we happened to be the very first resource in this
1549          * namespace. If so, and this is a client namespace, we need to move
1550          * the namespace into the active namespaces list to be patrolled by
1551          * the ldlm_poold. */
1552         if (ns_is_client(ns) && ns_refcount == 1) {
1553                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1554                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1555                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1556         }
1557
1558         return res;
1559 }
1560 EXPORT_SYMBOL(ldlm_resource_get);
1561
1562 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1563 {
1564         LASSERT(res != NULL);
1565         LASSERT(res != LP_POISON);
1566         atomic_inc(&res->lr_refcount);
1567         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1568                atomic_read(&res->lr_refcount));
1569         return res;
1570 }
1571
1572 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1573                                          struct ldlm_resource *res)
1574 {
1575         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1576
1577         if (!list_empty(&res->lr_granted)) {
1578                 ldlm_resource_dump(D_ERROR, res);
1579                 LBUG();
1580         }
1581
1582         if (!list_empty(&res->lr_waiting)) {
1583                 ldlm_resource_dump(D_ERROR, res);
1584                 LBUG();
1585         }
1586
1587         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1588                                bd, &res->lr_hash);
1589         lu_ref_fini(&res->lr_reference);
1590         if (atomic_dec_and_test(&nsb->nsb_count))
1591                 ldlm_namespace_put(nsb->nsb_namespace);
1592 }
1593
1594 /* Returns 1 if the resource was freed, 0 if it remains. */
1595 int ldlm_resource_putref(struct ldlm_resource *res)
1596 {
1597         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1598         struct cfs_hash_bd   bd;
1599
1600         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1601         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1602                res, atomic_read(&res->lr_refcount) - 1);
1603
1604         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1605         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1606                 __ldlm_resource_putref_final(&bd, res);
1607                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1608                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1609                         ns->ns_lvbo->lvbo_free(res);
1610                 ldlm_resource_free(res);
1611                 return 1;
1612         }
1613         return 0;
1614 }
1615 EXPORT_SYMBOL(ldlm_resource_putref);
1616
1617 /**
1618  * Add a lock into a given resource into specified lock list.
1619  */
1620 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1621                             struct ldlm_lock *lock)
1622 {
1623         check_res_locked(res);
1624
1625         LDLM_DEBUG(lock, "About to add this lock");
1626
1627         if (ldlm_is_destroyed(lock)) {
1628                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1629                 return;
1630         }
1631
1632         LASSERT(list_empty(&lock->l_res_link));
1633
1634         list_add_tail(&lock->l_res_link, head);
1635
1636         if (res->lr_type == LDLM_IBITS)
1637                 ldlm_inodebits_add_lock(res, head, lock);
1638 }
1639
1640 /**
1641  * Insert a lock into resource after specified lock.
1642  *
1643  * Obtain resource description from the lock we are inserting after.
1644  */
1645 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1646                                      struct ldlm_lock *new)
1647 {
1648         struct ldlm_resource *res = original->l_resource;
1649
1650         check_res_locked(res);
1651
1652         ldlm_resource_dump(D_INFO, res);
1653         LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
1654
1655         if (ldlm_is_destroyed(new)) {
1656                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1657                 goto out;
1658         }
1659
1660         LASSERT(list_empty(&new->l_res_link));
1661
1662         list_add(&new->l_res_link, &original->l_res_link);
1663  out:;
1664 }
1665
1666 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1667 {
1668         int type = lock->l_resource->lr_type;
1669
1670         check_res_locked(lock->l_resource);
1671         switch (type) {
1672         case LDLM_PLAIN:
1673                 ldlm_unlink_lock_skiplist(lock);
1674                 break;
1675         case LDLM_EXTENT:
1676                 ldlm_extent_unlink_lock(lock);
1677                 break;
1678         case LDLM_IBITS:
1679                 ldlm_inodebits_unlink_lock(lock);
1680                 break;
1681         }
1682         list_del_init(&lock->l_res_link);
1683 }
1684 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1685
1686 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1687 {
1688         desc->lr_type = res->lr_type;
1689         desc->lr_name = res->lr_name;
1690 }
1691
1692 /**
1693  * Print information about all locks in all namespaces on this node to debug
1694  * log.
1695  */
1696 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1697 {
1698         struct list_head *tmp;
1699
1700         if (!((libcfs_debug | D_ERROR) & level))
1701                 return;
1702
1703         mutex_lock(ldlm_namespace_lock(client));
1704
1705         list_for_each(tmp, ldlm_namespace_list(client)) {
1706                 struct ldlm_namespace *ns;
1707
1708                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1709                 ldlm_namespace_dump(level, ns);
1710         }
1711
1712         mutex_unlock(ldlm_namespace_lock(client));
1713 }
1714
1715 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1716                               struct hlist_node *hnode, void *arg)
1717 {
1718         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1719         int    level = (int)(unsigned long)arg;
1720
1721         lock_res(res);
1722         ldlm_resource_dump(level, res);
1723         unlock_res(res);
1724
1725         return 0;
1726 }
1727
1728 /**
1729  * Print information about all locks in this namespace on this node to debug
1730  * log.
1731  */
1732 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1733 {
1734         if (!((libcfs_debug | D_ERROR) & level))
1735                 return;
1736
1737         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1738                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1739                ns_is_client(ns) ? "client" : "server");
1740
1741         if (ktime_get_seconds() < ns->ns_next_dump)
1742                 return;
1743
1744         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1745                                  ldlm_res_hash_dump,
1746                                  (void *)(unsigned long)level, 0);
1747         spin_lock(&ns->ns_lock);
1748         ns->ns_next_dump = ktime_get_seconds() + 10;
1749         spin_unlock(&ns->ns_lock);
1750 }
1751
1752 /**
1753  * Print information about all locks in this resource to debug log.
1754  */
1755 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1756 {
1757         struct ldlm_lock *lock;
1758         unsigned int granted = 0;
1759
1760         BUILD_BUG_ON(RES_NAME_SIZE != 4);
1761
1762         if (!((libcfs_debug | D_ERROR) & level))
1763                 return;
1764
1765         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1766                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1767
1768         if (!list_empty(&res->lr_granted)) {
1769                 CDEBUG(level, "Granted locks (in reverse order):\n");
1770                 list_for_each_entry_reverse(lock, &res->lr_granted,
1771                                                 l_res_link) {
1772                         LDLM_DEBUG_LIMIT(level, lock, "###");
1773                         if (!(level & D_CANTMASK) &&
1774                             ++granted > ldlm_dump_granted_max) {
1775                                 CDEBUG(level,
1776                                        "only dump %d granted locks to avoid DDOS.\n",
1777                                        granted);
1778                                 break;
1779                         }
1780                 }
1781         }
1782
1783         if (!list_empty(&res->lr_waiting)) {
1784                 CDEBUG(level, "Waiting locks:\n");
1785                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1786                         LDLM_DEBUG_LIMIT(level, lock, "###");
1787         }
1788 }
1789 EXPORT_SYMBOL(ldlm_resource_dump);