Whamcloud - gitweb
LU-9679 lustre: avoid cast of file->private_data
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_resource.c
33  *
34  * Author: Phil Schwan <phil@clusterfs.com>
35  * Author: Peter Braam <braam@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39 #include <lustre_dlm.h>
40 #include <lustre_fid.h>
41 #include <obd_class.h>
42 #include "ldlm_internal.h"
43
44 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
45 struct kmem_cache *ldlm_interval_tree_slab;
46 struct kmem_cache *ldlm_inodebits_slab;
47
48 int ldlm_srv_namespace_nr = 0;
49 int ldlm_cli_namespace_nr = 0;
50
51 DEFINE_MUTEX(ldlm_srv_namespace_lock);
52 LIST_HEAD(ldlm_srv_namespace_list);
53
54 DEFINE_MUTEX(ldlm_cli_namespace_lock);
55 /* Client Namespaces that have active resources in them.
56  * Once all resources go away, ldlm_poold moves such namespaces to the
57  * inactive list */
58 LIST_HEAD(ldlm_cli_active_namespace_list);
59 /* Client namespaces that don't have any locks in them */
60 LIST_HEAD(ldlm_cli_inactive_namespace_list);
61
62 static struct dentry *ldlm_debugfs_dir;
63 static struct dentry *ldlm_ns_debugfs_dir;
64 struct dentry *ldlm_svc_debugfs_dir;
65
66 /* during debug dump certain amount of granted locks for one resource to avoid
67  * DDOS. */
68 static unsigned int ldlm_dump_granted_max = 256;
69
70 static ssize_t ldebugfs_dump_ns_seq_write(struct file *file,
71                                           const char __user *buffer,
72                                           size_t count, loff_t *off)
73 {
74         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
75         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
76         RETURN(count);
77 }
78
79 LDEBUGFS_FOPS_WR_ONLY(ldlm, dump_ns);
80
81 static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
82 {
83         seq_printf(m, "%u\n", *(unsigned int *)m->private);
84         return 0;
85 }
86
87 static ssize_t
88 ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
89                        size_t count, loff_t *off)
90 {
91         struct seq_file *seq = file->private_data;
92
93         if (!count)
94                 return 0;
95
96         return kstrtouint_from_user(buffer, count, 0,
97                                     (unsigned int *)seq->private);
98 }
99
100 LDEBUGFS_SEQ_FOPS(ldlm_rw_uint);
101
102 #ifdef HAVE_SERVER_SUPPORT
103
104 static int seq_watermark_show(struct seq_file *m, void *data)
105 {
106         seq_printf(m, "%llu\n", *(__u64 *)m->private);
107         return 0;
108 }
109
110 static ssize_t seq_watermark_write(struct file *file,
111                                    const char __user *buffer, size_t count,
112                                    loff_t *off)
113 {
114         struct seq_file *m = file->private_data;
115         u64 value;
116         __u64 watermark;
117         __u64 *data = m->private;
118         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
119         char kernbuf[22] = "";
120         int rc;
121
122         if (count >= sizeof(kernbuf))
123                 return -EINVAL;
124
125         if (copy_from_user(kernbuf, buffer, count))
126                 return -EFAULT;
127         kernbuf[count] = 0;
128
129         rc = sysfs_memparse(kernbuf, count, &value, "MiB");
130         if (rc < 0) {
131                 CERROR("Failed to set %s, rc = %d.\n",
132                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
133                        rc);
134                 return rc;
135         } else if (value != 0 && value < (1 << 20)) {
136                 CERROR("%s should be greater than 1MB.\n",
137                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
138                 return -EINVAL;
139         }
140         watermark = value >> 20;
141
142         if (wm_low) {
143                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
144                         CERROR("lock_reclaim_threshold_mb must be smaller than "
145                                "lock_limit_mb.\n");
146                         return -EINVAL;
147                 }
148
149                 *data = watermark;
150                 if (watermark != 0) {
151                         watermark <<= 20;
152                         do_div(watermark, sizeof(struct ldlm_lock));
153                 }
154                 ldlm_reclaim_threshold = watermark;
155         } else {
156                 if (ldlm_reclaim_threshold_mb != 0 &&
157                     watermark < ldlm_reclaim_threshold_mb) {
158                         CERROR("lock_limit_mb must be greater than "
159                                "lock_reclaim_threshold_mb.\n");
160                         return -EINVAL;
161                 }
162
163                 *data = watermark;
164                 if (watermark != 0) {
165                         watermark <<= 20;
166                         do_div(watermark, sizeof(struct ldlm_lock));
167                 }
168                 ldlm_lock_limit = watermark;
169         }
170
171         return count;
172 }
173
174 static int seq_watermark_open(struct inode *inode, struct file *file)
175 {
176         return single_open(file, seq_watermark_show, inode->i_private);
177 }
178
179 static const struct file_operations ldlm_watermark_fops = {
180         .owner          = THIS_MODULE,
181         .open           = seq_watermark_open,
182         .read           = seq_read,
183         .write          = seq_watermark_write,
184         .llseek         = seq_lseek,
185         .release        = lprocfs_single_release,
186 };
187
188 static int seq_granted_show(struct seq_file *m, void *data)
189 {
190         seq_printf(m, "%llu\n", percpu_counter_sum_positive(
191                    (struct percpu_counter *)m->private));
192         return 0;
193 }
194
195 static int seq_granted_open(struct inode *inode, struct file *file)
196 {
197         return single_open(file, seq_granted_show, inode->i_private);
198 }
199
200 static const struct file_operations ldlm_granted_fops = {
201         .owner  = THIS_MODULE,
202         .open   = seq_granted_open,
203         .read   = seq_read,
204         .llseek = seq_lseek,
205         .release = seq_release,
206 };
207
208 #endif /* HAVE_SERVER_SUPPORT */
209
210 static struct lprocfs_vars ldlm_debugfs_list[] = {
211         { .name =       "dump_namespaces",
212           .fops =       &ldlm_dump_ns_fops,
213           .proc_mode =  0222 },
214         { .name =       "dump_granted_max",
215           .fops =       &ldlm_rw_uint_fops,
216           .data =       &ldlm_dump_granted_max },
217 #ifdef HAVE_SERVER_SUPPORT
218         { .name =       "lock_reclaim_threshold_mb",
219           .fops =       &ldlm_watermark_fops,
220           .data =       &ldlm_reclaim_threshold_mb },
221         { .name =       "lock_limit_mb",
222           .fops =       &ldlm_watermark_fops,
223           .data =       &ldlm_lock_limit_mb },
224         { .name =       "lock_granted_count",
225           .fops =       &ldlm_granted_fops,
226           .data =       &ldlm_granted_total },
227 #endif
228         { NULL }
229 };
230
231 int ldlm_debugfs_setup(void)
232 {
233         int rc;
234
235         ENTRY;
236         ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
237                                              debugfs_lustre_root,
238                                              NULL, NULL);
239         if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
240                 CERROR("LDebugFS failed in ldlm-init\n");
241                 rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
242                 ldlm_debugfs_dir = NULL;
243                 GOTO(err, rc);
244         }
245
246         ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
247                                                 ldlm_debugfs_dir,
248                                                 NULL, NULL);
249         if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
250                 CERROR("LProcFS failed in ldlm-init\n");
251                 rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
252                                          : -ENOMEM;
253                 GOTO(err, rc);
254         }
255
256         ldlm_svc_debugfs_dir = ldebugfs_register("services",
257                                                  ldlm_debugfs_dir,
258                                                  NULL, NULL);
259         if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
260                 CERROR("LProcFS failed in ldlm-init\n");
261                 rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
262                                           : -ENOMEM;
263                 GOTO(err, rc);
264         }
265
266         rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
267         if (rc != 0) {
268                 CERROR("LProcFS failed in ldlm-init\n");
269                 GOTO(err, rc);
270         }
271
272         RETURN(0);
273
274 err:
275         debugfs_remove_recursive(ldlm_debugfs_dir);
276         ldlm_svc_debugfs_dir = NULL;
277         ldlm_ns_debugfs_dir = NULL;
278         ldlm_debugfs_dir = NULL;
279         RETURN(rc);
280 }
281
282 void ldlm_debugfs_cleanup(void)
283 {
284         debugfs_remove_recursive(ldlm_debugfs_dir);
285
286         ldlm_svc_debugfs_dir = NULL;
287         ldlm_ns_debugfs_dir = NULL;
288         ldlm_debugfs_dir = NULL;
289 }
290
291 static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
292                                    char *buf)
293 {
294         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
295                                                  ns_kobj);
296         __u64                   res = 0;
297         int                     i;
298
299         /* result is not strictly consistant */
300         for (i = 0; i < (1 << ns->ns_bucket_bits); i++)
301                 res += atomic_read(&ns->ns_rs_buckets[i].nsb_count);
302         return sprintf(buf, "%lld\n", res);
303 }
304 LUSTRE_RO_ATTR(resource_count);
305
306 static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
307                                char *buf)
308 {
309         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
310                                                  ns_kobj);
311         __u64                   locks;
312
313         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
314                                         LPROCFS_FIELDS_FLAGS_SUM);
315         return sprintf(buf, "%lld\n", locks);
316 }
317 LUSTRE_RO_ATTR(lock_count);
318
319 static ssize_t lock_unused_count_show(struct kobject *kobj,
320                                       struct attribute *attr,
321                                       char *buf)
322 {
323         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
324                                                  ns_kobj);
325
326         return sprintf(buf, "%d\n", ns->ns_nr_unused);
327 }
328 LUSTRE_RO_ATTR(lock_unused_count);
329
330 static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
331                              char *buf)
332 {
333         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
334                                                  ns_kobj);
335         __u32 *nr = &ns->ns_max_unused;
336
337         if (ns_connect_lru_resize(ns))
338                 nr = &ns->ns_nr_unused;
339         return sprintf(buf, "%u\n", *nr);
340 }
341
342 static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
343                               const char *buffer, size_t count)
344 {
345         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
346                                                  ns_kobj);
347         unsigned long tmp;
348         int lru_resize;
349         int err;
350
351         if (strncmp(buffer, "clear", 5) == 0) {
352                 CDEBUG(D_DLMTRACE,
353                        "dropping all unused locks from namespace %s\n",
354                        ldlm_ns_name(ns));
355                 if (ns_connect_lru_resize(ns)) {
356                         /* Try to cancel all @ns_nr_unused locks. */
357                         ldlm_cancel_lru(ns, ns->ns_nr_unused, 0,
358                                         LDLM_LRU_FLAG_PASSED |
359                                         LDLM_LRU_FLAG_CLEANUP);
360                 } else {
361                         tmp = ns->ns_max_unused;
362                         ns->ns_max_unused = 0;
363                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED |
364                                         LDLM_LRU_FLAG_CLEANUP);
365                         ns->ns_max_unused = tmp;
366                 }
367                 return count;
368         }
369
370         err = kstrtoul(buffer, 10, &tmp);
371         if (err != 0) {
372                 CERROR("lru_size: invalid value written\n");
373                 return -EINVAL;
374         }
375         lru_resize = (tmp == 0);
376
377         if (ns_connect_lru_resize(ns)) {
378                 if (!lru_resize)
379                         ns->ns_max_unused = (unsigned int)tmp;
380
381                 if (tmp > ns->ns_nr_unused)
382                         tmp = ns->ns_nr_unused;
383                 tmp = ns->ns_nr_unused - tmp;
384
385                 CDEBUG(D_DLMTRACE,
386                        "changing namespace %s unused locks from %u to %u\n",
387                        ldlm_ns_name(ns), ns->ns_nr_unused,
388                        (unsigned int)tmp);
389                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
390
391                 if (!lru_resize) {
392                         CDEBUG(D_DLMTRACE,
393                                "disable lru_resize for namespace %s\n",
394                                ldlm_ns_name(ns));
395                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
396                 }
397         } else {
398                 CDEBUG(D_DLMTRACE,
399                        "changing namespace %s max_unused from %u to %u\n",
400                        ldlm_ns_name(ns), ns->ns_max_unused,
401                        (unsigned int)tmp);
402                 ns->ns_max_unused = (unsigned int)tmp;
403                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
404
405                 /* Make sure that LRU resize was originally supported before
406                  * turning it on here.
407                  */
408                 if (lru_resize &&
409                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
410                         CDEBUG(D_DLMTRACE,
411                                "enable lru_resize for namespace %s\n",
412                                ldlm_ns_name(ns));
413                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
414                 }
415         }
416
417         return count;
418 }
419 LUSTRE_RW_ATTR(lru_size);
420
421 static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
422                                 char *buf)
423 {
424         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
425                                                  ns_kobj);
426
427         return sprintf(buf, "%lld\n", ktime_to_ms(ns->ns_max_age));
428 }
429
430 static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
431                                  const char *buffer, size_t count)
432 {
433         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
434                                                  ns_kobj);
435         int scale = NSEC_PER_MSEC;
436         unsigned long long tmp;
437         char *buf;
438
439         /* Did the user ask in seconds or milliseconds. Default is in ms */
440         buf = strstr(buffer, "ms");
441         if (!buf) {
442                 buf = strchr(buffer, 's');
443                 if (buf)
444                         scale = NSEC_PER_SEC;
445         }
446
447         if (buf)
448                 *buf = '\0';
449
450         if (kstrtoull(buffer, 10, &tmp))
451                 return -EINVAL;
452
453         ns->ns_max_age = ktime_set(0, tmp * scale);
454
455         return count;
456 }
457 LUSTRE_RW_ATTR(lru_max_age);
458
459 static ssize_t early_lock_cancel_show(struct kobject *kobj,
460                                       struct attribute *attr,
461                                       char *buf)
462 {
463         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
464                                                  ns_kobj);
465
466         return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
467 }
468
469 static ssize_t early_lock_cancel_store(struct kobject *kobj,
470                                        struct attribute *attr,
471                                        const char *buffer,
472                                        size_t count)
473 {
474         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
475                                                  ns_kobj);
476         unsigned long supp = -1;
477         int rc;
478
479         rc = kstrtoul(buffer, 10, &supp);
480         if (rc < 0)
481                 return rc;
482
483         if (supp == 0)
484                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
485         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
486                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
487         return count;
488 }
489 LUSTRE_RW_ATTR(early_lock_cancel);
490
491 static ssize_t dirty_age_limit_show(struct kobject *kobj,
492                                     struct attribute *attr, char *buf)
493 {
494         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
495                                                  ns_kobj);
496
497         return sprintf(buf, "%llu\n", ns->ns_dirty_age_limit);
498 }
499
500 static ssize_t dirty_age_limit_store(struct kobject *kobj,
501                                      struct attribute *attr,
502                                      const char *buffer, size_t count)
503 {
504         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
505                                                  ns_kobj);
506         unsigned long long tmp;
507
508         if (kstrtoull(buffer, 10, &tmp))
509                 return -EINVAL;
510
511         ns->ns_dirty_age_limit = tmp;
512
513         return count;
514 }
515 LUSTRE_RW_ATTR(dirty_age_limit);
516
517 #ifdef HAVE_SERVER_SUPPORT
518 static ssize_t ctime_age_limit_show(struct kobject *kobj,
519                                     struct attribute *attr, char *buf)
520 {
521         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
522                                                  ns_kobj);
523
524         return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit);
525 }
526
527 static ssize_t ctime_age_limit_store(struct kobject *kobj,
528                                      struct attribute *attr,
529                                      const char *buffer, size_t count)
530 {
531         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
532                                                  ns_kobj);
533         unsigned long long tmp;
534
535         if (kstrtoull(buffer, 10, &tmp))
536                 return -EINVAL;
537
538         ns->ns_ctime_age_limit = tmp;
539
540         return count;
541 }
542 LUSTRE_RW_ATTR(ctime_age_limit);
543
544 static ssize_t lock_timeouts_show(struct kobject *kobj, struct attribute *attr,
545                                   char *buf)
546 {
547         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
548                                                  ns_kobj);
549
550         return sprintf(buf, "%d\n", ns->ns_timeouts);
551 }
552 LUSTRE_RO_ATTR(lock_timeouts);
553
554 static ssize_t max_nolock_bytes_show(struct kobject *kobj,
555                                      struct attribute *attr, char *buf)
556 {
557         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
558                                                  ns_kobj);
559
560         return sprintf(buf, "%u\n", ns->ns_max_nolock_size);
561 }
562
563 static ssize_t max_nolock_bytes_store(struct kobject *kobj,
564                                       struct attribute *attr,
565                                       const char *buffer, size_t count)
566 {
567         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
568                                                  ns_kobj);
569         unsigned long tmp;
570         int err;
571
572         err = kstrtoul(buffer, 10, &tmp);
573         if (err != 0)
574                 return -EINVAL;
575
576         ns->ns_max_nolock_size = tmp;
577
578         return count;
579 }
580 LUSTRE_RW_ATTR(max_nolock_bytes);
581
582 static ssize_t contention_seconds_show(struct kobject *kobj,
583                                        struct attribute *attr, char *buf)
584 {
585         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
586                                                  ns_kobj);
587
588         return sprintf(buf, "%llu\n", ns->ns_contention_time);
589 }
590
591 static ssize_t contention_seconds_store(struct kobject *kobj,
592                                         struct attribute *attr,
593                                         const char *buffer, size_t count)
594 {
595         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
596                                                  ns_kobj);
597         unsigned long long tmp;
598
599         if (kstrtoull(buffer, 10, &tmp))
600                 return -EINVAL;
601
602         ns->ns_contention_time = tmp;
603
604         return count;
605 }
606 LUSTRE_RW_ATTR(contention_seconds);
607
608 static ssize_t contended_locks_show(struct kobject *kobj,
609                                     struct attribute *attr, char *buf)
610 {
611         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
612                                                  ns_kobj);
613
614         return sprintf(buf, "%u\n", ns->ns_contended_locks);
615 }
616
617 static ssize_t contended_locks_store(struct kobject *kobj,
618                                      struct attribute *attr,
619                                      const char *buffer, size_t count)
620 {
621         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
622                                                  ns_kobj);
623         unsigned long tmp;
624         int err;
625
626         err = kstrtoul(buffer, 10, &tmp);
627         if (err != 0)
628                 return -EINVAL;
629
630         ns->ns_contended_locks = tmp;
631
632         return count;
633 }
634 LUSTRE_RW_ATTR(contended_locks);
635
636 static ssize_t max_parallel_ast_show(struct kobject *kobj,
637                                      struct attribute *attr, char *buf)
638 {
639         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
640                                                  ns_kobj);
641
642         return sprintf(buf, "%u\n", ns->ns_max_parallel_ast);
643 }
644
645 static ssize_t max_parallel_ast_store(struct kobject *kobj,
646                                       struct attribute *attr,
647                                       const char *buffer, size_t count)
648 {
649         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
650                                                  ns_kobj);
651         unsigned long tmp;
652         int err;
653
654         err = kstrtoul(buffer, 10, &tmp);
655         if (err != 0)
656                 return -EINVAL;
657
658         ns->ns_max_parallel_ast = tmp;
659
660         return count;
661 }
662 LUSTRE_RW_ATTR(max_parallel_ast);
663
664 #endif /* HAVE_SERVER_SUPPORT */
665
666 /* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
667 static struct attribute *ldlm_ns_attrs[] = {
668         &lustre_attr_resource_count.attr,
669         &lustre_attr_lock_count.attr,
670         &lustre_attr_lock_unused_count.attr,
671         &lustre_attr_lru_size.attr,
672         &lustre_attr_lru_max_age.attr,
673         &lustre_attr_early_lock_cancel.attr,
674         &lustre_attr_dirty_age_limit.attr,
675 #ifdef HAVE_SERVER_SUPPORT
676         &lustre_attr_ctime_age_limit.attr,
677         &lustre_attr_lock_timeouts.attr,
678         &lustre_attr_max_nolock_bytes.attr,
679         &lustre_attr_contention_seconds.attr,
680         &lustre_attr_contended_locks.attr,
681         &lustre_attr_max_parallel_ast.attr,
682 #endif
683         NULL,
684 };
685
686 static void ldlm_ns_release(struct kobject *kobj)
687 {
688         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
689                                                  ns_kobj);
690         complete(&ns->ns_kobj_unregister);
691 }
692
693 static struct kobj_type ldlm_ns_ktype = {
694         .default_attrs  = ldlm_ns_attrs,
695         .sysfs_ops      = &lustre_sysfs_ops,
696         .release        = ldlm_ns_release,
697 };
698
699 static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
700 {
701         if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
702                 CERROR("dlm namespace %s has no procfs dir?\n",
703                        ldlm_ns_name(ns));
704         else
705                 debugfs_remove_recursive(ns->ns_debugfs_entry);
706
707         if (ns->ns_stats != NULL)
708                 lprocfs_free_stats(&ns->ns_stats);
709 }
710
711 void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
712 {
713         kobject_put(&ns->ns_kobj);
714         wait_for_completion(&ns->ns_kobj_unregister);
715 }
716
717 int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
718 {
719         int err;
720
721         ns->ns_kobj.kset = ldlm_ns_kset;
722         init_completion(&ns->ns_kobj_unregister);
723         err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
724                                    "%s", ldlm_ns_name(ns));
725
726         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
727         if (!ns->ns_stats) {
728                 kobject_put(&ns->ns_kobj);
729                 return -ENOMEM;
730         }
731
732         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
733                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
734
735         return err;
736 }
737
738 static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
739 {
740         struct dentry *ns_entry;
741
742         if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
743                 ns_entry = ns->ns_debugfs_entry;
744         } else {
745                 ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
746                                               ldlm_ns_debugfs_dir);
747                 if (!ns_entry)
748                         return -ENOMEM;
749                 ns->ns_debugfs_entry = ns_entry;
750         }
751
752         return 0;
753 }
754 #undef MAX_STRING_SIZE
755
756 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
757                                   const void *key, unsigned int mask)
758 {
759         const struct ldlm_res_id *id = key;
760         unsigned int val = 0;
761         unsigned int i;
762
763         for (i = 0; i < RES_NAME_SIZE; i++)
764                 val += id->name[i];
765         return val & mask;
766 }
767
768 static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned int bits)
769 {
770         struct lu_fid       fid;
771         __u32               hash;
772         __u32               val;
773
774         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
775         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
776         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
777
778         hash = fid_flatten32(&fid);
779         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
780         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
781                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
782         } else {
783                 val = fid_oid(&fid);
784         }
785         hash += (val >> 5) + (val << 11);
786         return cfs_hash_32(hash, bits);
787 }
788
789 static void *ldlm_res_hop_key(struct hlist_node *hnode)
790 {
791         struct ldlm_resource   *res;
792
793         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
794         return &res->lr_name;
795 }
796
797 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
798 {
799         struct ldlm_resource   *res;
800
801         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
802         return ldlm_res_eq((const struct ldlm_res_id *)key,
803                            (const struct ldlm_res_id *)&res->lr_name);
804 }
805
806 static void *ldlm_res_hop_object(struct hlist_node *hnode)
807 {
808         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
809 }
810
811 static void
812 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
813 {
814         struct ldlm_resource *res;
815
816         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
817         ldlm_resource_getref(res);
818 }
819
820 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
821 {
822         struct ldlm_resource *res;
823
824         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
825         ldlm_resource_putref(res);
826 }
827
828 static struct cfs_hash_ops ldlm_ns_hash_ops = {
829         .hs_hash        = ldlm_res_hop_hash,
830         .hs_key         = ldlm_res_hop_key,
831         .hs_keycmp      = ldlm_res_hop_keycmp,
832         .hs_keycpy      = NULL,
833         .hs_object      = ldlm_res_hop_object,
834         .hs_get         = ldlm_res_hop_get_locked,
835         .hs_put         = ldlm_res_hop_put
836 };
837
838 static struct {
839         /** hash bucket bits */
840         unsigned                nsd_bkt_bits;
841         /** hash bits */
842         unsigned                nsd_all_bits;
843 } ldlm_ns_hash_defs[] = {
844         [LDLM_NS_TYPE_MDC] = {
845                 .nsd_bkt_bits   = 11,
846                 .nsd_all_bits   = 16,
847         },
848         [LDLM_NS_TYPE_MDT] = {
849                 .nsd_bkt_bits   = 14,
850                 .nsd_all_bits   = 21,
851         },
852         [LDLM_NS_TYPE_OSC] = {
853                 .nsd_bkt_bits   = 8,
854                 .nsd_all_bits   = 12,
855         },
856         [LDLM_NS_TYPE_OST] = {
857                 .nsd_bkt_bits   = 11,
858                 .nsd_all_bits   = 17,
859         },
860         [LDLM_NS_TYPE_MGC] = {
861                 .nsd_bkt_bits   = 3,
862                 .nsd_all_bits   = 4,
863         },
864         [LDLM_NS_TYPE_MGT] = {
865                 .nsd_bkt_bits   = 3,
866                 .nsd_all_bits   = 4,
867         },
868 };
869
870 /**
871  * Create and initialize new empty namespace.
872  */
873 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
874                                           enum ldlm_side client,
875                                           enum ldlm_appetite apt,
876                                           enum ldlm_ns_type ns_type)
877 {
878         struct ldlm_namespace *ns = NULL;
879         int idx;
880         int rc;
881
882         ENTRY;
883         LASSERT(obd != NULL);
884
885         rc = ldlm_get_ref();
886         if (rc) {
887                 CERROR("ldlm_get_ref failed: %d\n", rc);
888                 RETURN(NULL);
889         }
890
891         if (ns_type >= ARRAY_SIZE(ldlm_ns_hash_defs) ||
892             ldlm_ns_hash_defs[ns_type].nsd_bkt_bits == 0) {
893                 CERROR("Unknown type %d for ns %s\n", ns_type, name);
894                 GOTO(out_ref, NULL);
895         }
896
897         OBD_ALLOC_PTR(ns);
898         if (!ns)
899                 GOTO(out_ref, NULL);
900
901         ns->ns_rs_hash = cfs_hash_create(name,
902                                          ldlm_ns_hash_defs[ns_type].nsd_all_bits,
903                                          ldlm_ns_hash_defs[ns_type].nsd_all_bits,
904                                          ldlm_ns_hash_defs[ns_type].nsd_bkt_bits,
905                                          0,
906                                          CFS_HASH_MIN_THETA,
907                                          CFS_HASH_MAX_THETA,
908                                          &ldlm_ns_hash_ops,
909                                          CFS_HASH_DEPTH |
910                                          CFS_HASH_BIGNAME |
911                                          CFS_HASH_SPIN_BKTLOCK |
912                                          CFS_HASH_NO_ITEMREF);
913         if (ns->ns_rs_hash == NULL)
914                 GOTO(out_ns, NULL);
915
916         ns->ns_bucket_bits = ldlm_ns_hash_defs[ns_type].nsd_all_bits -
917                              ldlm_ns_hash_defs[ns_type].nsd_bkt_bits;
918
919         OBD_ALLOC_LARGE(ns->ns_rs_buckets,
920                         BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
921         if (!ns->ns_rs_buckets)
922                 goto out_hash;
923
924         for (idx = 0; idx < (1 << ns->ns_bucket_bits); idx++) {
925                 struct ldlm_ns_bucket *nsb = &ns->ns_rs_buckets[idx];
926
927                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
928                 nsb->nsb_namespace = ns;
929                 nsb->nsb_reclaim_start = 0;
930                 atomic_set(&nsb->nsb_count, 0);
931         }
932
933         ns->ns_obd = obd;
934         ns->ns_appetite = apt;
935         ns->ns_client = client;
936         ns->ns_name = kstrdup(name, GFP_KERNEL);
937         if (!ns->ns_name)
938                 goto out_hash;
939
940         INIT_LIST_HEAD(&ns->ns_list_chain);
941         INIT_LIST_HEAD(&ns->ns_unused_list);
942         spin_lock_init(&ns->ns_lock);
943         atomic_set(&ns->ns_bref, 0);
944         init_waitqueue_head(&ns->ns_waitq);
945
946         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
947         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
948         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
949
950         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
951         ns->ns_nr_unused          = 0;
952         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
953         ns->ns_max_age            = ktime_set(LDLM_DEFAULT_MAX_ALIVE, 0);
954         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
955         ns->ns_dirty_age_limit    = LDLM_DIRTY_AGE_LIMIT;
956         ns->ns_timeouts           = 0;
957         ns->ns_orig_connect_flags = 0;
958         ns->ns_connect_flags      = 0;
959         ns->ns_stopping           = 0;
960         ns->ns_reclaim_start      = 0;
961         ns->ns_last_pos           = &ns->ns_unused_list;
962
963         rc = ldlm_namespace_sysfs_register(ns);
964         if (rc) {
965                 CERROR("Can't initialize ns sysfs, rc %d\n", rc);
966                 GOTO(out_hash, rc);
967         }
968
969         rc = ldlm_namespace_debugfs_register(ns);
970         if (rc) {
971                 CERROR("Can't initialize ns proc, rc %d\n", rc);
972                 GOTO(out_sysfs, rc);
973         }
974
975         idx = ldlm_namespace_nr_read(client);
976         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
977         if (rc) {
978                 CERROR("Can't initialize lock pool, rc %d\n", rc);
979                 GOTO(out_proc, rc);
980         }
981
982         ldlm_namespace_register(ns, client);
983         RETURN(ns);
984 out_proc:
985         ldlm_namespace_debugfs_unregister(ns);
986 out_sysfs:
987         ldlm_namespace_sysfs_unregister(ns);
988         ldlm_namespace_cleanup(ns, 0);
989 out_hash:
990         OBD_FREE_LARGE(ns->ns_rs_buckets,
991                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
992         kfree(ns->ns_name);
993         cfs_hash_putref(ns->ns_rs_hash);
994 out_ns:
995         OBD_FREE_PTR(ns);
996 out_ref:
997         ldlm_put_ref();
998         RETURN(NULL);
999 }
1000 EXPORT_SYMBOL(ldlm_namespace_new);
1001
1002 /**
1003  * Cancel and destroy all locks on a resource.
1004  *
1005  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
1006  * clean up.  This is currently only used for recovery, and we make
1007  * certain assumptions as a result--notably, that we shouldn't cancel
1008  * locks with refs.
1009  */
1010 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
1011                              __u64 flags)
1012 {
1013         struct list_head *tmp;
1014         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
1015         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
1016
1017         do {
1018                 struct ldlm_lock *lock = NULL;
1019
1020                 /* First, we look for non-cleaned-yet lock
1021                  * all cleaned locks are marked by CLEANED flag. */
1022                 lock_res(res);
1023                 list_for_each(tmp, q) {
1024                         lock = list_entry(tmp, struct ldlm_lock,
1025                                           l_res_link);
1026                         if (ldlm_is_cleaned(lock)) {
1027                                 lock = NULL;
1028                                 continue;
1029                         }
1030                         LDLM_LOCK_GET(lock);
1031                         ldlm_set_cleaned(lock);
1032                         break;
1033                 }
1034
1035                 if (lock == NULL) {
1036                         unlock_res(res);
1037                         break;
1038                 }
1039
1040                 /* Set CBPENDING so nothing in the cancellation path
1041                  * can match this lock. */
1042                 ldlm_set_cbpending(lock);
1043                 ldlm_set_failed(lock);
1044                 lock->l_flags |= flags;
1045
1046                 /* ... without sending a CANCEL message for local_only. */
1047                 if (local_only)
1048                         ldlm_set_local_only(lock);
1049
1050                 if (local_only && (lock->l_readers || lock->l_writers)) {
1051                         /*
1052                          * This is a little bit gross, but much better than the
1053                          * alternative: pretend that we got a blocking AST from
1054                          * the server, so that when the lock is decref'd, it
1055                          * will go away ...
1056                          */
1057                         unlock_res(res);
1058                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
1059                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
1060                                 set_current_state(TASK_UNINTERRUPTIBLE);
1061                                 schedule_timeout(cfs_time_seconds(4));
1062                                 set_current_state(TASK_RUNNING);
1063                         }
1064                         if (lock->l_completion_ast)
1065                                 lock->l_completion_ast(lock,
1066                                                        LDLM_FL_FAILED, NULL);
1067                         LDLM_LOCK_RELEASE(lock);
1068                         continue;
1069                 }
1070
1071                 if (client) {
1072                         struct lustre_handle lockh;
1073
1074                         unlock_res(res);
1075                         ldlm_lock2handle(lock, &lockh);
1076                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
1077                         if (rc)
1078                                 CERROR("ldlm_cli_cancel: %d\n", rc);
1079                 } else {
1080                         unlock_res(res);
1081                         LDLM_DEBUG(lock,
1082                                    "Freeing a lock still held by a client node");
1083                         ldlm_lock_cancel(lock);
1084                 }
1085                 LDLM_LOCK_RELEASE(lock);
1086         } while (1);
1087 }
1088
1089 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1090                                struct hlist_node *hnode, void *arg)
1091 {
1092         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1093         __u64 flags = *(__u64 *)arg;
1094
1095         cleanup_resource(res, &res->lr_granted, flags);
1096         cleanup_resource(res, &res->lr_waiting, flags);
1097
1098         return 0;
1099 }
1100
1101 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1102                                   struct hlist_node *hnode, void *arg)
1103 {
1104         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
1105
1106         lock_res(res);
1107         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
1108                "(%d) after lock cleanup; forcing cleanup.\n",
1109                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
1110                atomic_read(&res->lr_refcount) - 1);
1111
1112         /* Use D_NETERROR since it is in the default mask */
1113         ldlm_resource_dump(D_NETERROR, res);
1114         unlock_res(res);
1115         return 0;
1116 }
1117
1118 /**
1119  * Cancel and destroy all locks in the namespace.
1120  *
1121  * Typically used during evictions when server notified client that it was
1122  * evicted and all of its state needs to be destroyed.
1123  * Also used during shutdown.
1124  */
1125 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
1126 {
1127         if (ns == NULL) {
1128                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
1129                 return ELDLM_OK;
1130         }
1131
1132         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
1133                                  &flags, 0);
1134         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
1135                                  NULL, 0);
1136         return ELDLM_OK;
1137 }
1138 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1139
1140 /**
1141  * Attempts to free namespace.
1142  *
1143  * Only used when namespace goes away, like during an unmount.
1144  */
1145 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
1146 {
1147         ENTRY;
1148
1149         /* At shutdown time, don't call the cancellation callback */
1150         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
1151
1152         if (atomic_read(&ns->ns_bref) > 0) {
1153                 int rc;
1154                 CDEBUG(D_DLMTRACE,
1155                        "dlm namespace %s free waiting on refcount %d\n",
1156                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
1157 force_wait:
1158                 if (force)
1159                         rc = wait_event_idle_timeout(
1160                                 ns->ns_waitq,
1161                                 atomic_read(&ns->ns_bref) == 0,
1162                                 cfs_time_seconds(1) / 4);
1163                 else
1164                         rc = l_wait_event_abortable(
1165                                 ns->ns_waitq, atomic_read(&ns->ns_bref) == 0);
1166
1167                 /* Forced cleanups should be able to reclaim all references,
1168                  * so it's safe to wait forever... we can't leak locks... */
1169                 if (force && rc == 0) {
1170                         rc = -ETIMEDOUT;
1171                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
1172                                        "namespace with %d resources in use, "
1173                                        "(rc=%d)\n", ldlm_ns_name(ns),
1174                                        atomic_read(&ns->ns_bref), rc);
1175                         GOTO(force_wait, rc);
1176                 }
1177
1178                 if (atomic_read(&ns->ns_bref)) {
1179                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
1180                                        "with %d resources in use, (rc=%d)\n",
1181                                        ldlm_ns_name(ns),
1182                                        atomic_read(&ns->ns_bref), rc);
1183                         RETURN(ELDLM_NAMESPACE_EXISTS);
1184                 }
1185                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
1186                        ldlm_ns_name(ns));
1187         }
1188
1189         RETURN(ELDLM_OK);
1190 }
1191
1192 /**
1193  * Performs various cleanups for passed \a ns to make it drop refc and be
1194  * ready for freeing. Waits for refc == 0.
1195  *
1196  * The following is done:
1197  * (0) Unregister \a ns from its list to make inaccessible for potential
1198  * users like pools thread and others;
1199  * (1) Clear all locks in \a ns.
1200  */
1201 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
1202                                struct obd_import *imp,
1203                                int force)
1204 {
1205         int rc;
1206
1207         ENTRY;
1208         if (!ns) {
1209                 EXIT;
1210                 return;
1211         }
1212
1213         spin_lock(&ns->ns_lock);
1214         ns->ns_stopping = 1;
1215         spin_unlock(&ns->ns_lock);
1216
1217         /*
1218          * Can fail with -EINTR when force == 0 in which case try harder.
1219          */
1220         rc = __ldlm_namespace_free(ns, force);
1221         if (rc != ELDLM_OK) {
1222                 if (imp) {
1223                         ptlrpc_disconnect_import(imp, 0);
1224                         ptlrpc_invalidate_import(imp);
1225                 }
1226
1227                 /*
1228                  * With all requests dropped and the import inactive
1229                  * we are gaurenteed all reference will be dropped.
1230                  */
1231                 rc = __ldlm_namespace_free(ns, 1);
1232                 LASSERT(rc == 0);
1233         }
1234         EXIT;
1235 }
1236 EXPORT_SYMBOL(ldlm_namespace_free_prior);
1237
1238 /**
1239  * Performs freeing memory structures related to \a ns. This is only done
1240  * when ldlm_namespce_free_prior() successfully removed all resources
1241  * referencing \a ns and its refc == 0.
1242  */
1243 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1244 {
1245         ENTRY;
1246         if (!ns) {
1247                 EXIT;
1248                 return;
1249         }
1250
1251         /* Make sure that nobody can find this ns in its list. */
1252         ldlm_namespace_unregister(ns, ns->ns_client);
1253         /* Fini pool _before_ parent proc dir is removed. This is important as
1254          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1255          * Removing it after @dir may cause oops. */
1256         ldlm_pool_fini(&ns->ns_pool);
1257
1258         ldlm_namespace_debugfs_unregister(ns);
1259         ldlm_namespace_sysfs_unregister(ns);
1260         cfs_hash_putref(ns->ns_rs_hash);
1261         OBD_FREE_LARGE(ns->ns_rs_buckets,
1262                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
1263         kfree(ns->ns_name);
1264         /* Namespace \a ns should be not on list at this time, otherwise
1265          * this will cause issues related to using freed \a ns in poold
1266          * thread.
1267          */
1268         LASSERT(list_empty(&ns->ns_list_chain));
1269         OBD_FREE_PTR(ns);
1270         ldlm_put_ref();
1271         EXIT;
1272 }
1273 EXPORT_SYMBOL(ldlm_namespace_free_post);
1274
1275 /**
1276  * Cleanup the resource, and free namespace.
1277  * bug 12864:
1278  * Deadlock issue:
1279  * proc1: destroy import
1280  *        class_disconnect_export(grab cl_sem) ->
1281  *              -> ldlm_namespace_free ->
1282  *              -> lprocfs_remove(grab _lprocfs_lock).
1283  * proc2: read proc info
1284  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1285  *              -> osc_rd_active, etc(grab cl_sem).
1286  *
1287  * So that I have to split the ldlm_namespace_free into two parts - the first
1288  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1289  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1290  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1291  * held.
1292  */
1293 void ldlm_namespace_free(struct ldlm_namespace *ns,
1294                          struct obd_import *imp,
1295                          int force)
1296 {
1297         ldlm_namespace_free_prior(ns, imp, force);
1298         ldlm_namespace_free_post(ns);
1299 }
1300 EXPORT_SYMBOL(ldlm_namespace_free);
1301
1302 void ldlm_namespace_get(struct ldlm_namespace *ns)
1303 {
1304         atomic_inc(&ns->ns_bref);
1305 }
1306
1307 /* This is only for callers that care about refcount */
1308 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1309 {
1310         return atomic_inc_return(&ns->ns_bref);
1311 }
1312
1313 void ldlm_namespace_put(struct ldlm_namespace *ns)
1314 {
1315         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1316                 wake_up(&ns->ns_waitq);
1317                 spin_unlock(&ns->ns_lock);
1318         }
1319 }
1320
1321 /** Register \a ns in the list of namespaces */
1322 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1323 {
1324         mutex_lock(ldlm_namespace_lock(client));
1325         LASSERT(list_empty(&ns->ns_list_chain));
1326         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1327         ldlm_namespace_nr_inc(client);
1328         mutex_unlock(ldlm_namespace_lock(client));
1329 }
1330
1331 /** Unregister \a ns from the list of namespaces. */
1332 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1333 {
1334         mutex_lock(ldlm_namespace_lock(client));
1335         LASSERT(!list_empty(&ns->ns_list_chain));
1336         /* Some asserts and possibly other parts of the code are still
1337          * using list_empty(&ns->ns_list_chain). This is why it is
1338          * important to use list_del_init() here. */
1339         list_del_init(&ns->ns_list_chain);
1340         ldlm_namespace_nr_dec(client);
1341         mutex_unlock(ldlm_namespace_lock(client));
1342 }
1343
1344 /** Should be called with ldlm_namespace_lock(client) taken. */
1345 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1346                                           enum ldlm_side client)
1347 {
1348         LASSERT(!list_empty(&ns->ns_list_chain));
1349         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1350         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1351 }
1352
1353 /** Should be called with ldlm_namespace_lock(client) taken. */
1354 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1355                                             enum ldlm_side client)
1356 {
1357         LASSERT(!list_empty(&ns->ns_list_chain));
1358         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1359         list_move_tail(&ns->ns_list_chain,
1360                        ldlm_namespace_inactive_list(client));
1361 }
1362
1363 /** Should be called with ldlm_namespace_lock(client) taken. */
1364 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1365 {
1366         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1367         LASSERT(!list_empty(ldlm_namespace_list(client)));
1368         return container_of(ldlm_namespace_list(client)->next,
1369                             struct ldlm_namespace, ns_list_chain);
1370 }
1371
1372 static bool ldlm_resource_extent_new(struct ldlm_resource *res)
1373 {
1374         int idx;
1375
1376         OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1377                        sizeof(*res->lr_itree) * LCK_MODE_NUM);
1378         if (res->lr_itree == NULL)
1379                 return false;
1380         /* Initialize interval trees for each lock mode. */
1381         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1382                 res->lr_itree[idx].lit_size = 0;
1383                 res->lr_itree[idx].lit_mode = 1 << idx;
1384                 res->lr_itree[idx].lit_root = NULL;
1385         }
1386         return true;
1387 }
1388
1389 static bool ldlm_resource_inodebits_new(struct ldlm_resource *res)
1390 {
1391         int i;
1392
1393         OBD_ALLOC_PTR(res->lr_ibits_queues);
1394         if (res->lr_ibits_queues == NULL)
1395                 return false;
1396         for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
1397                 INIT_LIST_HEAD(&res->lr_ibits_queues->liq_waiting[i]);
1398         return true;
1399 }
1400
1401 /** Create and initialize new resource. */
1402 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1403 {
1404         struct ldlm_resource *res;
1405         bool rc;
1406
1407         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1408         if (res == NULL)
1409                 return NULL;
1410
1411         switch (ldlm_type) {
1412         case LDLM_EXTENT:
1413                 rc = ldlm_resource_extent_new(res);
1414                 break;
1415         case LDLM_IBITS:
1416                 rc = ldlm_resource_inodebits_new(res);
1417                 break;
1418         default:
1419                 rc = true;
1420                 break;
1421         }
1422         if (!rc) {
1423                 OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1424                 return NULL;
1425         }
1426
1427         INIT_LIST_HEAD(&res->lr_granted);
1428         INIT_LIST_HEAD(&res->lr_waiting);
1429
1430         atomic_set(&res->lr_refcount, 1);
1431         spin_lock_init(&res->lr_lock);
1432         lu_ref_init(&res->lr_reference);
1433
1434         /* Since LVB init can be delayed now, there is no longer need to
1435          * immediatelly acquire mutex here. */
1436         mutex_init(&res->lr_lvb_mutex);
1437         res->lr_lvb_initialized = false;
1438
1439         return res;
1440 }
1441
1442 static void ldlm_resource_free(struct ldlm_resource *res)
1443 {
1444         if (res->lr_type == LDLM_EXTENT) {
1445                 if (res->lr_itree != NULL)
1446                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1447                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1448         } else if (res->lr_type == LDLM_IBITS) {
1449                 if (res->lr_ibits_queues != NULL)
1450                         OBD_FREE_PTR(res->lr_ibits_queues);
1451         }
1452
1453         OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1454 }
1455
1456 /**
1457  * Return a reference to resource with given name, creating it if necessary.
1458  * Args: namespace with ns_lock unlocked
1459  * Locks: takes and releases NS hash-lock and res->lr_lock
1460  * Returns: referenced, unlocked ldlm_resource or NULL
1461  */
1462 struct ldlm_resource *
1463 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1464                   const struct ldlm_res_id *name, enum ldlm_type type,
1465                   int create)
1466 {
1467         struct hlist_node       *hnode;
1468         struct ldlm_resource    *res = NULL;
1469         struct cfs_hash_bd              bd;
1470         __u64                   version;
1471         int                     ns_refcount = 0;
1472         int hash;
1473
1474         LASSERT(ns != NULL);
1475         LASSERT(parent == NULL);
1476         LASSERT(ns->ns_rs_hash != NULL);
1477         LASSERT(name->name[0] != 0);
1478
1479         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1480         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1481         if (hnode != NULL) {
1482                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1483                 GOTO(found, res);
1484         }
1485
1486         version = cfs_hash_bd_version_get(&bd);
1487         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1488
1489         if (create == 0)
1490                 return ERR_PTR(-ENOENT);
1491
1492         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1493                  "type: %d\n", type);
1494         res = ldlm_resource_new(type);
1495         if (res == NULL)
1496                 return ERR_PTR(-ENOMEM);
1497
1498         hash = ldlm_res_hop_fid_hash(name, ns->ns_bucket_bits);
1499         res->lr_ns_bucket = &ns->ns_rs_buckets[hash];
1500         res->lr_name = *name;
1501         res->lr_type = type;
1502
1503         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1504         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1505                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1506
1507         if (hnode != NULL) {
1508                 /* Someone won the race and already added the resource. */
1509                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1510                 /* Clean lu_ref for failed resource. */
1511                 lu_ref_fini(&res->lr_reference);
1512                 ldlm_resource_free(res);
1513 found:
1514                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1515                 return res;
1516         }
1517         /* We won! Let's add the resource. */
1518         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1519         if (atomic_inc_return(&res->lr_ns_bucket->nsb_count) == 1)
1520                 ns_refcount = ldlm_namespace_get_return(ns);
1521
1522         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1523
1524         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1525
1526         /* Let's see if we happened to be the very first resource in this
1527          * namespace. If so, and this is a client namespace, we need to move
1528          * the namespace into the active namespaces list to be patrolled by
1529          * the ldlm_poold. */
1530         if (ns_is_client(ns) && ns_refcount == 1) {
1531                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1532                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1533                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1534         }
1535
1536         return res;
1537 }
1538 EXPORT_SYMBOL(ldlm_resource_get);
1539
1540 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1541 {
1542         LASSERT(res != NULL);
1543         LASSERT(res != LP_POISON);
1544         atomic_inc(&res->lr_refcount);
1545         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1546                atomic_read(&res->lr_refcount));
1547         return res;
1548 }
1549
1550 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1551                                          struct ldlm_resource *res)
1552 {
1553         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1554
1555         if (!list_empty(&res->lr_granted)) {
1556                 ldlm_resource_dump(D_ERROR, res);
1557                 LBUG();
1558         }
1559
1560         if (!list_empty(&res->lr_waiting)) {
1561                 ldlm_resource_dump(D_ERROR, res);
1562                 LBUG();
1563         }
1564
1565         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1566                                bd, &res->lr_hash);
1567         lu_ref_fini(&res->lr_reference);
1568         if (atomic_dec_and_test(&nsb->nsb_count))
1569                 ldlm_namespace_put(nsb->nsb_namespace);
1570 }
1571
1572 /* Returns 1 if the resource was freed, 0 if it remains. */
1573 int ldlm_resource_putref(struct ldlm_resource *res)
1574 {
1575         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1576         struct cfs_hash_bd   bd;
1577
1578         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1579         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1580                res, atomic_read(&res->lr_refcount) - 1);
1581
1582         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1583         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1584                 __ldlm_resource_putref_final(&bd, res);
1585                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1586                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1587                         ns->ns_lvbo->lvbo_free(res);
1588                 ldlm_resource_free(res);
1589                 return 1;
1590         }
1591         return 0;
1592 }
1593 EXPORT_SYMBOL(ldlm_resource_putref);
1594
1595 /**
1596  * Add a lock into a given resource into specified lock list.
1597  */
1598 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1599                             struct ldlm_lock *lock)
1600 {
1601         check_res_locked(res);
1602
1603         LDLM_DEBUG(lock, "About to add this lock");
1604
1605         if (ldlm_is_destroyed(lock)) {
1606                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1607                 return;
1608         }
1609
1610         LASSERT(list_empty(&lock->l_res_link));
1611
1612         list_add_tail(&lock->l_res_link, head);
1613
1614         if (res->lr_type == LDLM_IBITS)
1615                 ldlm_inodebits_add_lock(res, head, lock);
1616 }
1617
1618 /**
1619  * Insert a lock into resource after specified lock.
1620  *
1621  * Obtain resource description from the lock we are inserting after.
1622  */
1623 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1624                                      struct ldlm_lock *new)
1625 {
1626         struct ldlm_resource *res = original->l_resource;
1627
1628         check_res_locked(res);
1629
1630         ldlm_resource_dump(D_INFO, res);
1631         LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
1632
1633         if (ldlm_is_destroyed(new)) {
1634                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1635                 goto out;
1636         }
1637
1638         LASSERT(list_empty(&new->l_res_link));
1639
1640         list_add(&new->l_res_link, &original->l_res_link);
1641  out:;
1642 }
1643
1644 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1645 {
1646         int type = lock->l_resource->lr_type;
1647
1648         check_res_locked(lock->l_resource);
1649         switch (type) {
1650         case LDLM_PLAIN:
1651                 ldlm_unlink_lock_skiplist(lock);
1652                 break;
1653         case LDLM_EXTENT:
1654                 ldlm_extent_unlink_lock(lock);
1655                 break;
1656         case LDLM_IBITS:
1657                 ldlm_inodebits_unlink_lock(lock);
1658                 break;
1659         }
1660         list_del_init(&lock->l_res_link);
1661 }
1662 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1663
1664 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1665 {
1666         desc->lr_type = res->lr_type;
1667         desc->lr_name = res->lr_name;
1668 }
1669
1670 /**
1671  * Print information about all locks in all namespaces on this node to debug
1672  * log.
1673  */
1674 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1675 {
1676         struct list_head *tmp;
1677
1678         if (!((libcfs_debug | D_ERROR) & level))
1679                 return;
1680
1681         mutex_lock(ldlm_namespace_lock(client));
1682
1683         list_for_each(tmp, ldlm_namespace_list(client)) {
1684                 struct ldlm_namespace *ns;
1685
1686                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1687                 ldlm_namespace_dump(level, ns);
1688         }
1689
1690         mutex_unlock(ldlm_namespace_lock(client));
1691 }
1692
1693 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1694                               struct hlist_node *hnode, void *arg)
1695 {
1696         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1697         int    level = (int)(unsigned long)arg;
1698
1699         lock_res(res);
1700         ldlm_resource_dump(level, res);
1701         unlock_res(res);
1702
1703         return 0;
1704 }
1705
1706 /**
1707  * Print information about all locks in this namespace on this node to debug
1708  * log.
1709  */
1710 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1711 {
1712         if (!((libcfs_debug | D_ERROR) & level))
1713                 return;
1714
1715         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1716                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1717                ns_is_client(ns) ? "client" : "server");
1718
1719         if (ktime_get_seconds() < ns->ns_next_dump)
1720                 return;
1721
1722         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1723                                  ldlm_res_hash_dump,
1724                                  (void *)(unsigned long)level, 0);
1725         spin_lock(&ns->ns_lock);
1726         ns->ns_next_dump = ktime_get_seconds() + 10;
1727         spin_unlock(&ns->ns_lock);
1728 }
1729
1730 /**
1731  * Print information about all locks in this resource to debug log.
1732  */
1733 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1734 {
1735         struct ldlm_lock *lock;
1736         unsigned int granted = 0;
1737
1738         BUILD_BUG_ON(RES_NAME_SIZE != 4);
1739
1740         if (!((libcfs_debug | D_ERROR) & level))
1741                 return;
1742
1743         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1744                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1745
1746         if (!list_empty(&res->lr_granted)) {
1747                 CDEBUG(level, "Granted locks (in reverse order):\n");
1748                 list_for_each_entry_reverse(lock, &res->lr_granted,
1749                                                 l_res_link) {
1750                         LDLM_DEBUG_LIMIT(level, lock, "###");
1751                         if (!(level & D_CANTMASK) &&
1752                             ++granted > ldlm_dump_granted_max) {
1753                                 CDEBUG(level,
1754                                        "only dump %d granted locks to avoid DDOS.\n",
1755                                        granted);
1756                                 break;
1757                         }
1758                 }
1759         }
1760
1761         if (!list_empty(&res->lr_waiting)) {
1762                 CDEBUG(level, "Waiting locks:\n");
1763                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1764                         LDLM_DEBUG_LIMIT(level, lock, "###");
1765         }
1766 }
1767 EXPORT_SYMBOL(ldlm_resource_dump);