Whamcloud - gitweb
LU-8130 ldlm: add a counter to the per-namespace data
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_resource.c
33  *
34  * Author: Phil Schwan <phil@clusterfs.com>
35  * Author: Peter Braam <braam@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39 #include <lustre_dlm.h>
40 #include <lustre_fid.h>
41 #include <obd_class.h>
42 #include "ldlm_internal.h"
43
44 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
45 struct kmem_cache *ldlm_interval_tree_slab;
46 struct kmem_cache *ldlm_inodebits_slab;
47
48 int ldlm_srv_namespace_nr = 0;
49 int ldlm_cli_namespace_nr = 0;
50
51 DEFINE_MUTEX(ldlm_srv_namespace_lock);
52 LIST_HEAD(ldlm_srv_namespace_list);
53
54 DEFINE_MUTEX(ldlm_cli_namespace_lock);
55 /* Client Namespaces that have active resources in them.
56  * Once all resources go away, ldlm_poold moves such namespaces to the
57  * inactive list */
58 LIST_HEAD(ldlm_cli_active_namespace_list);
59 /* Client namespaces that don't have any locks in them */
60 LIST_HEAD(ldlm_cli_inactive_namespace_list);
61
62 static struct dentry *ldlm_debugfs_dir;
63 static struct dentry *ldlm_ns_debugfs_dir;
64 struct dentry *ldlm_svc_debugfs_dir;
65
66 /* during debug dump certain amount of granted locks for one resource to avoid
67  * DDOS. */
68 static unsigned int ldlm_dump_granted_max = 256;
69
70 static ssize_t ldebugfs_dump_ns_seq_write(struct file *file,
71                                           const char __user *buffer,
72                                           size_t count, loff_t *off)
73 {
74         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
75         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
76         RETURN(count);
77 }
78
79 LDEBUGFS_FOPS_WR_ONLY(ldlm, dump_ns);
80
81 static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
82 {
83         seq_printf(m, "%u\n", *(unsigned int *)m->private);
84         return 0;
85 }
86
87 static ssize_t
88 ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
89                        size_t count, loff_t *off)
90 {
91         struct seq_file *seq = file->private_data;
92
93         if (!count)
94                 return 0;
95
96         return kstrtouint_from_user(buffer, count, 0,
97                                     (unsigned int *)seq->private);
98 }
99
100 LDEBUGFS_SEQ_FOPS(ldlm_rw_uint);
101
102 #ifdef HAVE_SERVER_SUPPORT
103
104 static int seq_watermark_show(struct seq_file *m, void *data)
105 {
106         seq_printf(m, "%llu\n", *(__u64 *)m->private);
107         return 0;
108 }
109
110 static ssize_t seq_watermark_write(struct file *file,
111                                    const char __user *buffer, size_t count,
112                                    loff_t *off)
113 {
114         __s64 value;
115         __u64 watermark;
116         __u64 *data = ((struct seq_file *)file->private_data)->private;
117         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
118         int rc;
119
120         rc = lprocfs_str_with_units_to_s64(buffer, count, &value, 'M');
121         if (rc) {
122                 CERROR("Failed to set %s, rc = %d.\n",
123                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
124                        rc);
125                 return rc;
126         } else if (value != 0 && value < (1 << 20)) {
127                 CERROR("%s should be greater than 1MB.\n",
128                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
129                 return -EINVAL;
130         }
131         watermark = value >> 20;
132
133         if (wm_low) {
134                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
135                         CERROR("lock_reclaim_threshold_mb must be smaller than "
136                                "lock_limit_mb.\n");
137                         return -EINVAL;
138                 }
139
140                 *data = watermark;
141                 if (watermark != 0) {
142                         watermark <<= 20;
143                         do_div(watermark, sizeof(struct ldlm_lock));
144                 }
145                 ldlm_reclaim_threshold = watermark;
146         } else {
147                 if (ldlm_reclaim_threshold_mb != 0 &&
148                     watermark < ldlm_reclaim_threshold_mb) {
149                         CERROR("lock_limit_mb must be greater than "
150                                "lock_reclaim_threshold_mb.\n");
151                         return -EINVAL;
152                 }
153
154                 *data = watermark;
155                 if (watermark != 0) {
156                         watermark <<= 20;
157                         do_div(watermark, sizeof(struct ldlm_lock));
158                 }
159                 ldlm_lock_limit = watermark;
160         }
161
162         return count;
163 }
164
165 static int seq_watermark_open(struct inode *inode, struct file *file)
166 {
167         return single_open(file, seq_watermark_show, inode->i_private);
168 }
169
170 static const struct file_operations ldlm_watermark_fops = {
171         .owner          = THIS_MODULE,
172         .open           = seq_watermark_open,
173         .read           = seq_read,
174         .write          = seq_watermark_write,
175         .llseek         = seq_lseek,
176         .release        = lprocfs_single_release,
177 };
178
179 static int seq_granted_show(struct seq_file *m, void *data)
180 {
181         seq_printf(m, "%llu\n", percpu_counter_sum_positive(
182                    (struct percpu_counter *)m->private));
183         return 0;
184 }
185
186 static int seq_granted_open(struct inode *inode, struct file *file)
187 {
188         return single_open(file, seq_granted_show, inode->i_private);
189 }
190
191 static const struct file_operations ldlm_granted_fops = {
192         .owner  = THIS_MODULE,
193         .open   = seq_granted_open,
194         .read   = seq_read,
195         .llseek = seq_lseek,
196         .release = seq_release,
197 };
198
199 #endif /* HAVE_SERVER_SUPPORT */
200
201 static struct lprocfs_vars ldlm_debugfs_list[] = {
202         { .name =       "dump_namespaces",
203           .fops =       &ldlm_dump_ns_fops,
204           .proc_mode =  0222 },
205         { .name =       "dump_granted_max",
206           .fops =       &ldlm_rw_uint_fops,
207           .data =       &ldlm_dump_granted_max },
208 #ifdef HAVE_SERVER_SUPPORT
209         { .name =       "lock_reclaim_threshold_mb",
210           .fops =       &ldlm_watermark_fops,
211           .data =       &ldlm_reclaim_threshold_mb },
212         { .name =       "lock_limit_mb",
213           .fops =       &ldlm_watermark_fops,
214           .data =       &ldlm_lock_limit_mb },
215         { .name =       "lock_granted_count",
216           .fops =       &ldlm_granted_fops,
217           .data =       &ldlm_granted_total },
218 #endif
219         { NULL }
220 };
221
222 int ldlm_debugfs_setup(void)
223 {
224         int rc;
225
226         ENTRY;
227         ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
228                                              debugfs_lustre_root,
229                                              NULL, NULL);
230         if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
231                 CERROR("LDebugFS failed in ldlm-init\n");
232                 rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
233                 ldlm_debugfs_dir = NULL;
234                 GOTO(err, rc);
235         }
236
237         ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
238                                                 ldlm_debugfs_dir,
239                                                 NULL, NULL);
240         if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
241                 CERROR("LProcFS failed in ldlm-init\n");
242                 rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
243                                          : -ENOMEM;
244                 GOTO(err, rc);
245         }
246
247         ldlm_svc_debugfs_dir = ldebugfs_register("services",
248                                                  ldlm_debugfs_dir,
249                                                  NULL, NULL);
250         if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
251                 CERROR("LProcFS failed in ldlm-init\n");
252                 rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
253                                           : -ENOMEM;
254                 GOTO(err, rc);
255         }
256
257         rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
258         if (rc != 0) {
259                 CERROR("LProcFS failed in ldlm-init\n");
260                 GOTO(err, rc);
261         }
262
263         RETURN(0);
264
265 err:
266         debugfs_remove_recursive(ldlm_debugfs_dir);
267         ldlm_svc_debugfs_dir = NULL;
268         ldlm_ns_debugfs_dir = NULL;
269         ldlm_debugfs_dir = NULL;
270         RETURN(rc);
271 }
272
273 void ldlm_debugfs_cleanup(void)
274 {
275         debugfs_remove_recursive(ldlm_debugfs_dir);
276
277         ldlm_svc_debugfs_dir = NULL;
278         ldlm_ns_debugfs_dir = NULL;
279         ldlm_debugfs_dir = NULL;
280 }
281
282 static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
283                                    char *buf)
284 {
285         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
286                                                  ns_kobj);
287         __u64                   res = 0;
288         int                     i;
289
290         /* result is not strictly consistant */
291         for (i = 0; i < (1 << ns->ns_bucket_bits); i++)
292                 res += atomic_read(&ns->ns_rs_buckets[i].nsb_count);
293         return sprintf(buf, "%lld\n", res);
294 }
295 LUSTRE_RO_ATTR(resource_count);
296
297 static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
298                                char *buf)
299 {
300         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
301                                                  ns_kobj);
302         __u64                   locks;
303
304         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
305                                         LPROCFS_FIELDS_FLAGS_SUM);
306         return sprintf(buf, "%lld\n", locks);
307 }
308 LUSTRE_RO_ATTR(lock_count);
309
310 static ssize_t lock_unused_count_show(struct kobject *kobj,
311                                       struct attribute *attr,
312                                       char *buf)
313 {
314         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
315                                                  ns_kobj);
316
317         return sprintf(buf, "%d\n", ns->ns_nr_unused);
318 }
319 LUSTRE_RO_ATTR(lock_unused_count);
320
321 static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
322                              char *buf)
323 {
324         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
325                                                  ns_kobj);
326         __u32 *nr = &ns->ns_max_unused;
327
328         if (ns_connect_lru_resize(ns))
329                 nr = &ns->ns_nr_unused;
330         return sprintf(buf, "%u\n", *nr);
331 }
332
333 static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
334                               const char *buffer, size_t count)
335 {
336         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
337                                                  ns_kobj);
338         unsigned long tmp;
339         int lru_resize;
340         int err;
341
342         if (strncmp(buffer, "clear", 5) == 0) {
343                 CDEBUG(D_DLMTRACE,
344                        "dropping all unused locks from namespace %s\n",
345                        ldlm_ns_name(ns));
346                 if (ns_connect_lru_resize(ns)) {
347                         /* Try to cancel all @ns_nr_unused locks. */
348                         ldlm_cancel_lru(ns, ns->ns_nr_unused, 0,
349                                         LDLM_LRU_FLAG_PASSED |
350                                         LDLM_LRU_FLAG_CLEANUP);
351                 } else {
352                         tmp = ns->ns_max_unused;
353                         ns->ns_max_unused = 0;
354                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED |
355                                         LDLM_LRU_FLAG_CLEANUP);
356                         ns->ns_max_unused = tmp;
357                 }
358                 return count;
359         }
360
361         err = kstrtoul(buffer, 10, &tmp);
362         if (err != 0) {
363                 CERROR("lru_size: invalid value written\n");
364                 return -EINVAL;
365         }
366         lru_resize = (tmp == 0);
367
368         if (ns_connect_lru_resize(ns)) {
369                 if (!lru_resize)
370                         ns->ns_max_unused = (unsigned int)tmp;
371
372                 if (tmp > ns->ns_nr_unused)
373                         tmp = ns->ns_nr_unused;
374                 tmp = ns->ns_nr_unused - tmp;
375
376                 CDEBUG(D_DLMTRACE,
377                        "changing namespace %s unused locks from %u to %u\n",
378                        ldlm_ns_name(ns), ns->ns_nr_unused,
379                        (unsigned int)tmp);
380                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
381
382                 if (!lru_resize) {
383                         CDEBUG(D_DLMTRACE,
384                                "disable lru_resize for namespace %s\n",
385                                ldlm_ns_name(ns));
386                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
387                 }
388         } else {
389                 CDEBUG(D_DLMTRACE,
390                        "changing namespace %s max_unused from %u to %u\n",
391                        ldlm_ns_name(ns), ns->ns_max_unused,
392                        (unsigned int)tmp);
393                 ns->ns_max_unused = (unsigned int)tmp;
394                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
395
396                 /* Make sure that LRU resize was originally supported before
397                  * turning it on here.
398                  */
399                 if (lru_resize &&
400                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
401                         CDEBUG(D_DLMTRACE,
402                                "enable lru_resize for namespace %s\n",
403                                ldlm_ns_name(ns));
404                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
405                 }
406         }
407
408         return count;
409 }
410 LUSTRE_RW_ATTR(lru_size);
411
412 static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
413                                 char *buf)
414 {
415         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
416                                                  ns_kobj);
417
418         return sprintf(buf, "%lld\n", ktime_to_ms(ns->ns_max_age));
419 }
420
421 static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
422                                  const char *buffer, size_t count)
423 {
424         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
425                                                  ns_kobj);
426         int scale = NSEC_PER_MSEC;
427         unsigned long long tmp;
428         char *buf;
429
430         /* Did the user ask in seconds or milliseconds. Default is in ms */
431         buf = strstr(buffer, "ms");
432         if (!buf) {
433                 buf = strchr(buffer, 's');
434                 if (buf)
435                         scale = NSEC_PER_SEC;
436         }
437
438         if (buf)
439                 *buf = '\0';
440
441         if (kstrtoull(buffer, 10, &tmp))
442                 return -EINVAL;
443
444         ns->ns_max_age = ktime_set(0, tmp * scale);
445
446         return count;
447 }
448 LUSTRE_RW_ATTR(lru_max_age);
449
450 static ssize_t early_lock_cancel_show(struct kobject *kobj,
451                                       struct attribute *attr,
452                                       char *buf)
453 {
454         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
455                                                  ns_kobj);
456
457         return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
458 }
459
460 static ssize_t early_lock_cancel_store(struct kobject *kobj,
461                                        struct attribute *attr,
462                                        const char *buffer,
463                                        size_t count)
464 {
465         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
466                                                  ns_kobj);
467         unsigned long supp = -1;
468         int rc;
469
470         rc = kstrtoul(buffer, 10, &supp);
471         if (rc < 0)
472                 return rc;
473
474         if (supp == 0)
475                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
476         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
477                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
478         return count;
479 }
480 LUSTRE_RW_ATTR(early_lock_cancel);
481
482 static ssize_t dirty_age_limit_show(struct kobject *kobj,
483                                     struct attribute *attr, char *buf)
484 {
485         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
486                                                  ns_kobj);
487
488         return sprintf(buf, "%llu\n", ns->ns_dirty_age_limit);
489 }
490
491 static ssize_t dirty_age_limit_store(struct kobject *kobj,
492                                      struct attribute *attr,
493                                      const char *buffer, size_t count)
494 {
495         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
496                                                  ns_kobj);
497         unsigned long long tmp;
498
499         if (kstrtoull(buffer, 10, &tmp))
500                 return -EINVAL;
501
502         ns->ns_dirty_age_limit = tmp;
503
504         return count;
505 }
506 LUSTRE_RW_ATTR(dirty_age_limit);
507
508 #ifdef HAVE_SERVER_SUPPORT
509 static ssize_t ctime_age_limit_show(struct kobject *kobj,
510                                     struct attribute *attr, char *buf)
511 {
512         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
513                                                  ns_kobj);
514
515         return sprintf(buf, "%llu\n", ns->ns_ctime_age_limit);
516 }
517
518 static ssize_t ctime_age_limit_store(struct kobject *kobj,
519                                      struct attribute *attr,
520                                      const char *buffer, size_t count)
521 {
522         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
523                                                  ns_kobj);
524         unsigned long long tmp;
525
526         if (kstrtoull(buffer, 10, &tmp))
527                 return -EINVAL;
528
529         ns->ns_ctime_age_limit = tmp;
530
531         return count;
532 }
533 LUSTRE_RW_ATTR(ctime_age_limit);
534
535 static ssize_t lock_timeouts_show(struct kobject *kobj, struct attribute *attr,
536                                   char *buf)
537 {
538         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
539                                                  ns_kobj);
540
541         return sprintf(buf, "%d\n", ns->ns_timeouts);
542 }
543 LUSTRE_RO_ATTR(lock_timeouts);
544
545 static ssize_t max_nolock_bytes_show(struct kobject *kobj,
546                                      struct attribute *attr, char *buf)
547 {
548         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
549                                                  ns_kobj);
550
551         return sprintf(buf, "%u\n", ns->ns_max_nolock_size);
552 }
553
554 static ssize_t max_nolock_bytes_store(struct kobject *kobj,
555                                       struct attribute *attr,
556                                       const char *buffer, size_t count)
557 {
558         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
559                                                  ns_kobj);
560         unsigned long tmp;
561         int err;
562
563         err = kstrtoul(buffer, 10, &tmp);
564         if (err != 0)
565                 return -EINVAL;
566
567         ns->ns_max_nolock_size = tmp;
568
569         return count;
570 }
571 LUSTRE_RW_ATTR(max_nolock_bytes);
572
573 static ssize_t contention_seconds_show(struct kobject *kobj,
574                                        struct attribute *attr, char *buf)
575 {
576         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
577                                                  ns_kobj);
578
579         return sprintf(buf, "%llu\n", ns->ns_contention_time);
580 }
581
582 static ssize_t contention_seconds_store(struct kobject *kobj,
583                                         struct attribute *attr,
584                                         const char *buffer, size_t count)
585 {
586         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
587                                                  ns_kobj);
588         unsigned long long tmp;
589
590         if (kstrtoull(buffer, 10, &tmp))
591                 return -EINVAL;
592
593         ns->ns_contention_time = tmp;
594
595         return count;
596 }
597 LUSTRE_RW_ATTR(contention_seconds);
598
599 static ssize_t contended_locks_show(struct kobject *kobj,
600                                     struct attribute *attr, char *buf)
601 {
602         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
603                                                  ns_kobj);
604
605         return sprintf(buf, "%u\n", ns->ns_contended_locks);
606 }
607
608 static ssize_t contended_locks_store(struct kobject *kobj,
609                                      struct attribute *attr,
610                                      const char *buffer, size_t count)
611 {
612         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
613                                                  ns_kobj);
614         unsigned long tmp;
615         int err;
616
617         err = kstrtoul(buffer, 10, &tmp);
618         if (err != 0)
619                 return -EINVAL;
620
621         ns->ns_contended_locks = tmp;
622
623         return count;
624 }
625 LUSTRE_RW_ATTR(contended_locks);
626
627 static ssize_t max_parallel_ast_show(struct kobject *kobj,
628                                      struct attribute *attr, char *buf)
629 {
630         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
631                                                  ns_kobj);
632
633         return sprintf(buf, "%u\n", ns->ns_max_parallel_ast);
634 }
635
636 static ssize_t max_parallel_ast_store(struct kobject *kobj,
637                                       struct attribute *attr,
638                                       const char *buffer, size_t count)
639 {
640         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
641                                                  ns_kobj);
642         unsigned long tmp;
643         int err;
644
645         err = kstrtoul(buffer, 10, &tmp);
646         if (err != 0)
647                 return -EINVAL;
648
649         ns->ns_max_parallel_ast = tmp;
650
651         return count;
652 }
653 LUSTRE_RW_ATTR(max_parallel_ast);
654
655 #endif /* HAVE_SERVER_SUPPORT */
656
657 /* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
658 static struct attribute *ldlm_ns_attrs[] = {
659         &lustre_attr_resource_count.attr,
660         &lustre_attr_lock_count.attr,
661         &lustre_attr_lock_unused_count.attr,
662         &lustre_attr_lru_size.attr,
663         &lustre_attr_lru_max_age.attr,
664         &lustre_attr_early_lock_cancel.attr,
665         &lustre_attr_dirty_age_limit.attr,
666 #ifdef HAVE_SERVER_SUPPORT
667         &lustre_attr_ctime_age_limit.attr,
668         &lustre_attr_lock_timeouts.attr,
669         &lustre_attr_max_nolock_bytes.attr,
670         &lustre_attr_contention_seconds.attr,
671         &lustre_attr_contended_locks.attr,
672         &lustre_attr_max_parallel_ast.attr,
673 #endif
674         NULL,
675 };
676
677 static void ldlm_ns_release(struct kobject *kobj)
678 {
679         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
680                                                  ns_kobj);
681         complete(&ns->ns_kobj_unregister);
682 }
683
684 static struct kobj_type ldlm_ns_ktype = {
685         .default_attrs  = ldlm_ns_attrs,
686         .sysfs_ops      = &lustre_sysfs_ops,
687         .release        = ldlm_ns_release,
688 };
689
690 static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
691 {
692         if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
693                 CERROR("dlm namespace %s has no procfs dir?\n",
694                        ldlm_ns_name(ns));
695         else
696                 debugfs_remove_recursive(ns->ns_debugfs_entry);
697
698         if (ns->ns_stats != NULL)
699                 lprocfs_free_stats(&ns->ns_stats);
700 }
701
702 void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
703 {
704         kobject_put(&ns->ns_kobj);
705         wait_for_completion(&ns->ns_kobj_unregister);
706 }
707
708 int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
709 {
710         int err;
711
712         ns->ns_kobj.kset = ldlm_ns_kset;
713         init_completion(&ns->ns_kobj_unregister);
714         err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
715                                    "%s", ldlm_ns_name(ns));
716
717         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
718         if (!ns->ns_stats) {
719                 kobject_put(&ns->ns_kobj);
720                 return -ENOMEM;
721         }
722
723         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
724                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
725
726         return err;
727 }
728
729 static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
730 {
731         struct dentry *ns_entry;
732
733         if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
734                 ns_entry = ns->ns_debugfs_entry;
735         } else {
736                 ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
737                                               ldlm_ns_debugfs_dir);
738                 if (!ns_entry)
739                         return -ENOMEM;
740                 ns->ns_debugfs_entry = ns_entry;
741         }
742
743         return 0;
744 }
745 #undef MAX_STRING_SIZE
746
747 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
748                                   const void *key, unsigned int mask)
749 {
750         const struct ldlm_res_id *id = key;
751         unsigned int val = 0;
752         unsigned int i;
753
754         for (i = 0; i < RES_NAME_SIZE; i++)
755                 val += id->name[i];
756         return val & mask;
757 }
758
759 static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned int bits)
760 {
761         struct lu_fid       fid;
762         __u32               hash;
763         __u32               val;
764
765         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
766         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
767         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
768
769         hash = fid_flatten32(&fid);
770         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
771         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
772                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
773         } else {
774                 val = fid_oid(&fid);
775         }
776         hash += (val >> 5) + (val << 11);
777         return cfs_hash_32(hash, bits);
778 }
779
780 static void *ldlm_res_hop_key(struct hlist_node *hnode)
781 {
782         struct ldlm_resource   *res;
783
784         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
785         return &res->lr_name;
786 }
787
788 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
789 {
790         struct ldlm_resource   *res;
791
792         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
793         return ldlm_res_eq((const struct ldlm_res_id *)key,
794                            (const struct ldlm_res_id *)&res->lr_name);
795 }
796
797 static void *ldlm_res_hop_object(struct hlist_node *hnode)
798 {
799         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
800 }
801
802 static void
803 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
804 {
805         struct ldlm_resource *res;
806
807         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
808         ldlm_resource_getref(res);
809 }
810
811 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
812 {
813         struct ldlm_resource *res;
814
815         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
816         ldlm_resource_putref(res);
817 }
818
819 static struct cfs_hash_ops ldlm_ns_hash_ops = {
820         .hs_hash        = ldlm_res_hop_hash,
821         .hs_key         = ldlm_res_hop_key,
822         .hs_keycmp      = ldlm_res_hop_keycmp,
823         .hs_keycpy      = NULL,
824         .hs_object      = ldlm_res_hop_object,
825         .hs_get         = ldlm_res_hop_get_locked,
826         .hs_put         = ldlm_res_hop_put
827 };
828
829 typedef struct ldlm_ns_hash_def {
830         enum ldlm_ns_type       nsd_type;
831         /** hash bucket bits */
832         unsigned                nsd_bkt_bits;
833         /** hash bits */
834         unsigned                nsd_all_bits;
835         /** hash operations */
836         struct cfs_hash_ops *nsd_hops;
837 } ldlm_ns_hash_def_t;
838
839 static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] =
840 {
841         {
842                 .nsd_type       = LDLM_NS_TYPE_MDC,
843                 .nsd_bkt_bits   = 11,
844                 .nsd_all_bits   = 16,
845                 .nsd_hops       = &ldlm_ns_hash_ops,
846         },
847         {
848                 .nsd_type       = LDLM_NS_TYPE_MDT,
849                 .nsd_bkt_bits   = 14,
850                 .nsd_all_bits   = 21,
851                 .nsd_hops       = &ldlm_ns_hash_ops,
852         },
853         {
854                 .nsd_type       = LDLM_NS_TYPE_OSC,
855                 .nsd_bkt_bits   = 8,
856                 .nsd_all_bits   = 12,
857                 .nsd_hops       = &ldlm_ns_hash_ops,
858         },
859         {
860                 .nsd_type       = LDLM_NS_TYPE_OST,
861                 .nsd_bkt_bits   = 11,
862                 .nsd_all_bits   = 17,
863                 .nsd_hops       = &ldlm_ns_hash_ops,
864         },
865         {
866                 .nsd_type       = LDLM_NS_TYPE_MGC,
867                 .nsd_bkt_bits   = 3,
868                 .nsd_all_bits   = 4,
869                 .nsd_hops       = &ldlm_ns_hash_ops,
870         },
871         {
872                 .nsd_type       = LDLM_NS_TYPE_MGT,
873                 .nsd_bkt_bits   = 3,
874                 .nsd_all_bits   = 4,
875                 .nsd_hops       = &ldlm_ns_hash_ops,
876         },
877         {
878                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
879         },
880 };
881
882 /**
883  * Create and initialize new empty namespace.
884  */
885 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
886                                           enum ldlm_side client,
887                                           enum ldlm_appetite apt,
888                                           enum ldlm_ns_type ns_type)
889 {
890         struct ldlm_namespace *ns = NULL;
891         struct ldlm_ns_hash_def *nsd;
892         int idx;
893         int rc;
894
895         ENTRY;
896         LASSERT(obd != NULL);
897
898         rc = ldlm_get_ref();
899         if (rc) {
900                 CERROR("ldlm_get_ref failed: %d\n", rc);
901                 RETURN(NULL);
902         }
903
904         for (idx = 0; ; idx++) {
905                 nsd = &ldlm_ns_hash_defs[idx];
906                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
907                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
908                         GOTO(out_ref, NULL);
909                 }
910
911                 if (nsd->nsd_type == ns_type)
912                         break;
913         }
914
915         OBD_ALLOC_PTR(ns);
916         if (!ns)
917                 GOTO(out_ref, NULL);
918
919         ns->ns_rs_hash = cfs_hash_create(name,
920                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
921                                          nsd->nsd_bkt_bits, 0,
922                                          CFS_HASH_MIN_THETA,
923                                          CFS_HASH_MAX_THETA,
924                                          nsd->nsd_hops,
925                                          CFS_HASH_DEPTH |
926                                          CFS_HASH_BIGNAME |
927                                          CFS_HASH_SPIN_BKTLOCK |
928                                          CFS_HASH_NO_ITEMREF);
929         if (ns->ns_rs_hash == NULL)
930                 GOTO(out_ns, NULL);
931
932         ns->ns_bucket_bits = nsd->nsd_all_bits - nsd->nsd_bkt_bits;
933         OBD_ALLOC_LARGE(ns->ns_rs_buckets,
934                         BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
935         if (!ns->ns_rs_buckets)
936                 goto out_hash;
937
938         for (idx = 0; idx < (1 << ns->ns_bucket_bits); idx++) {
939                 struct ldlm_ns_bucket *nsb = &ns->ns_rs_buckets[idx];
940
941                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
942                 nsb->nsb_namespace = ns;
943                 nsb->nsb_reclaim_start = 0;
944                 atomic_set(&nsb->nsb_count, 0);
945         }
946
947         ns->ns_obd = obd;
948         ns->ns_appetite = apt;
949         ns->ns_client = client;
950         ns->ns_name = kstrdup(name, GFP_KERNEL);
951         if (!ns->ns_name)
952                 goto out_hash;
953
954         INIT_LIST_HEAD(&ns->ns_list_chain);
955         INIT_LIST_HEAD(&ns->ns_unused_list);
956         spin_lock_init(&ns->ns_lock);
957         atomic_set(&ns->ns_bref, 0);
958         init_waitqueue_head(&ns->ns_waitq);
959
960         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
961         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
962         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
963
964         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
965         ns->ns_nr_unused          = 0;
966         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
967         ns->ns_max_age            = ktime_set(LDLM_DEFAULT_MAX_ALIVE, 0);
968         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
969         ns->ns_dirty_age_limit    = LDLM_DIRTY_AGE_LIMIT;
970         ns->ns_timeouts           = 0;
971         ns->ns_orig_connect_flags = 0;
972         ns->ns_connect_flags      = 0;
973         ns->ns_stopping           = 0;
974         ns->ns_reclaim_start      = 0;
975         ns->ns_last_pos           = &ns->ns_unused_list;
976
977         rc = ldlm_namespace_sysfs_register(ns);
978         if (rc) {
979                 CERROR("Can't initialize ns sysfs, rc %d\n", rc);
980                 GOTO(out_hash, rc);
981         }
982
983         rc = ldlm_namespace_debugfs_register(ns);
984         if (rc) {
985                 CERROR("Can't initialize ns proc, rc %d\n", rc);
986                 GOTO(out_sysfs, rc);
987         }
988
989         idx = ldlm_namespace_nr_read(client);
990         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
991         if (rc) {
992                 CERROR("Can't initialize lock pool, rc %d\n", rc);
993                 GOTO(out_proc, rc);
994         }
995
996         ldlm_namespace_register(ns, client);
997         RETURN(ns);
998 out_proc:
999         ldlm_namespace_debugfs_unregister(ns);
1000 out_sysfs:
1001         ldlm_namespace_sysfs_unregister(ns);
1002         ldlm_namespace_cleanup(ns, 0);
1003 out_hash:
1004         OBD_FREE_LARGE(ns->ns_rs_buckets,
1005                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
1006         kfree(ns->ns_name);
1007         cfs_hash_putref(ns->ns_rs_hash);
1008 out_ns:
1009         OBD_FREE_PTR(ns);
1010 out_ref:
1011         ldlm_put_ref();
1012         RETURN(NULL);
1013 }
1014 EXPORT_SYMBOL(ldlm_namespace_new);
1015
1016 /**
1017  * Cancel and destroy all locks on a resource.
1018  *
1019  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
1020  * clean up.  This is currently only used for recovery, and we make
1021  * certain assumptions as a result--notably, that we shouldn't cancel
1022  * locks with refs.
1023  */
1024 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
1025                              __u64 flags)
1026 {
1027         struct list_head *tmp;
1028         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
1029         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
1030
1031         do {
1032                 struct ldlm_lock *lock = NULL;
1033
1034                 /* First, we look for non-cleaned-yet lock
1035                  * all cleaned locks are marked by CLEANED flag. */
1036                 lock_res(res);
1037                 list_for_each(tmp, q) {
1038                         lock = list_entry(tmp, struct ldlm_lock,
1039                                           l_res_link);
1040                         if (ldlm_is_cleaned(lock)) {
1041                                 lock = NULL;
1042                                 continue;
1043                         }
1044                         LDLM_LOCK_GET(lock);
1045                         ldlm_set_cleaned(lock);
1046                         break;
1047                 }
1048
1049                 if (lock == NULL) {
1050                         unlock_res(res);
1051                         break;
1052                 }
1053
1054                 /* Set CBPENDING so nothing in the cancellation path
1055                  * can match this lock. */
1056                 ldlm_set_cbpending(lock);
1057                 ldlm_set_failed(lock);
1058                 lock->l_flags |= flags;
1059
1060                 /* ... without sending a CANCEL message for local_only. */
1061                 if (local_only)
1062                         ldlm_set_local_only(lock);
1063
1064                 if (local_only && (lock->l_readers || lock->l_writers)) {
1065                         /*
1066                          * This is a little bit gross, but much better than the
1067                          * alternative: pretend that we got a blocking AST from
1068                          * the server, so that when the lock is decref'd, it
1069                          * will go away ...
1070                          */
1071                         unlock_res(res);
1072                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
1073                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
1074                                 set_current_state(TASK_UNINTERRUPTIBLE);
1075                                 schedule_timeout(cfs_time_seconds(4));
1076                                 set_current_state(TASK_RUNNING);
1077                         }
1078                         if (lock->l_completion_ast)
1079                                 lock->l_completion_ast(lock,
1080                                                        LDLM_FL_FAILED, NULL);
1081                         LDLM_LOCK_RELEASE(lock);
1082                         continue;
1083                 }
1084
1085                 if (client) {
1086                         struct lustre_handle lockh;
1087
1088                         unlock_res(res);
1089                         ldlm_lock2handle(lock, &lockh);
1090                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
1091                         if (rc)
1092                                 CERROR("ldlm_cli_cancel: %d\n", rc);
1093                 } else {
1094                         unlock_res(res);
1095                         LDLM_DEBUG(lock,
1096                                    "Freeing a lock still held by a client node");
1097                         ldlm_lock_cancel(lock);
1098                 }
1099                 LDLM_LOCK_RELEASE(lock);
1100         } while (1);
1101 }
1102
1103 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1104                                struct hlist_node *hnode, void *arg)
1105 {
1106         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1107         __u64 flags = *(__u64 *)arg;
1108
1109         cleanup_resource(res, &res->lr_granted, flags);
1110         cleanup_resource(res, &res->lr_waiting, flags);
1111
1112         return 0;
1113 }
1114
1115 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1116                                   struct hlist_node *hnode, void *arg)
1117 {
1118         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
1119
1120         lock_res(res);
1121         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
1122                "(%d) after lock cleanup; forcing cleanup.\n",
1123                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
1124                atomic_read(&res->lr_refcount) - 1);
1125
1126         /* Use D_NETERROR since it is in the default mask */
1127         ldlm_resource_dump(D_NETERROR, res);
1128         unlock_res(res);
1129         return 0;
1130 }
1131
1132 /**
1133  * Cancel and destroy all locks in the namespace.
1134  *
1135  * Typically used during evictions when server notified client that it was
1136  * evicted and all of its state needs to be destroyed.
1137  * Also used during shutdown.
1138  */
1139 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
1140 {
1141         if (ns == NULL) {
1142                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
1143                 return ELDLM_OK;
1144         }
1145
1146         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
1147                                  &flags, 0);
1148         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
1149                                  NULL, 0);
1150         return ELDLM_OK;
1151 }
1152 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1153
1154 /**
1155  * Attempts to free namespace.
1156  *
1157  * Only used when namespace goes away, like during an unmount.
1158  */
1159 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
1160 {
1161         ENTRY;
1162
1163         /* At shutdown time, don't call the cancellation callback */
1164         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
1165
1166         if (atomic_read(&ns->ns_bref) > 0) {
1167                 int rc;
1168                 CDEBUG(D_DLMTRACE,
1169                        "dlm namespace %s free waiting on refcount %d\n",
1170                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
1171 force_wait:
1172                 if (force)
1173                         rc = wait_event_idle_timeout(
1174                                 ns->ns_waitq,
1175                                 atomic_read(&ns->ns_bref) == 0,
1176                                 cfs_time_seconds(1) / 4);
1177                 else
1178                         rc = l_wait_event_abortable(
1179                                 ns->ns_waitq, atomic_read(&ns->ns_bref) == 0);
1180
1181                 /* Forced cleanups should be able to reclaim all references,
1182                  * so it's safe to wait forever... we can't leak locks... */
1183                 if (force && rc == 0) {
1184                         rc = -ETIMEDOUT;
1185                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
1186                                        "namespace with %d resources in use, "
1187                                        "(rc=%d)\n", ldlm_ns_name(ns),
1188                                        atomic_read(&ns->ns_bref), rc);
1189                         GOTO(force_wait, rc);
1190                 }
1191
1192                 if (atomic_read(&ns->ns_bref)) {
1193                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
1194                                        "with %d resources in use, (rc=%d)\n",
1195                                        ldlm_ns_name(ns),
1196                                        atomic_read(&ns->ns_bref), rc);
1197                         RETURN(ELDLM_NAMESPACE_EXISTS);
1198                 }
1199                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
1200                        ldlm_ns_name(ns));
1201         }
1202
1203         RETURN(ELDLM_OK);
1204 }
1205
1206 /**
1207  * Performs various cleanups for passed \a ns to make it drop refc and be
1208  * ready for freeing. Waits for refc == 0.
1209  *
1210  * The following is done:
1211  * (0) Unregister \a ns from its list to make inaccessible for potential
1212  * users like pools thread and others;
1213  * (1) Clear all locks in \a ns.
1214  */
1215 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
1216                                struct obd_import *imp,
1217                                int force)
1218 {
1219         int rc;
1220
1221         ENTRY;
1222         if (!ns) {
1223                 EXIT;
1224                 return;
1225         }
1226
1227         spin_lock(&ns->ns_lock);
1228         ns->ns_stopping = 1;
1229         spin_unlock(&ns->ns_lock);
1230
1231         /*
1232          * Can fail with -EINTR when force == 0 in which case try harder.
1233          */
1234         rc = __ldlm_namespace_free(ns, force);
1235         if (rc != ELDLM_OK) {
1236                 if (imp) {
1237                         ptlrpc_disconnect_import(imp, 0);
1238                         ptlrpc_invalidate_import(imp);
1239                 }
1240
1241                 /*
1242                  * With all requests dropped and the import inactive
1243                  * we are gaurenteed all reference will be dropped.
1244                  */
1245                 rc = __ldlm_namespace_free(ns, 1);
1246                 LASSERT(rc == 0);
1247         }
1248         EXIT;
1249 }
1250 EXPORT_SYMBOL(ldlm_namespace_free_prior);
1251
1252 /**
1253  * Performs freeing memory structures related to \a ns. This is only done
1254  * when ldlm_namespce_free_prior() successfully removed all resources
1255  * referencing \a ns and its refc == 0.
1256  */
1257 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1258 {
1259         ENTRY;
1260         if (!ns) {
1261                 EXIT;
1262                 return;
1263         }
1264
1265         /* Make sure that nobody can find this ns in its list. */
1266         ldlm_namespace_unregister(ns, ns->ns_client);
1267         /* Fini pool _before_ parent proc dir is removed. This is important as
1268          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1269          * Removing it after @dir may cause oops. */
1270         ldlm_pool_fini(&ns->ns_pool);
1271
1272         ldlm_namespace_debugfs_unregister(ns);
1273         ldlm_namespace_sysfs_unregister(ns);
1274         cfs_hash_putref(ns->ns_rs_hash);
1275         OBD_FREE_LARGE(ns->ns_rs_buckets,
1276                        BIT(ns->ns_bucket_bits) * sizeof(ns->ns_rs_buckets[0]));
1277         kfree(ns->ns_name);
1278         /* Namespace \a ns should be not on list at this time, otherwise
1279          * this will cause issues related to using freed \a ns in poold
1280          * thread.
1281          */
1282         LASSERT(list_empty(&ns->ns_list_chain));
1283         OBD_FREE_PTR(ns);
1284         ldlm_put_ref();
1285         EXIT;
1286 }
1287 EXPORT_SYMBOL(ldlm_namespace_free_post);
1288
1289 /**
1290  * Cleanup the resource, and free namespace.
1291  * bug 12864:
1292  * Deadlock issue:
1293  * proc1: destroy import
1294  *        class_disconnect_export(grab cl_sem) ->
1295  *              -> ldlm_namespace_free ->
1296  *              -> lprocfs_remove(grab _lprocfs_lock).
1297  * proc2: read proc info
1298  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1299  *              -> osc_rd_active, etc(grab cl_sem).
1300  *
1301  * So that I have to split the ldlm_namespace_free into two parts - the first
1302  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1303  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1304  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1305  * held.
1306  */
1307 void ldlm_namespace_free(struct ldlm_namespace *ns,
1308                          struct obd_import *imp,
1309                          int force)
1310 {
1311         ldlm_namespace_free_prior(ns, imp, force);
1312         ldlm_namespace_free_post(ns);
1313 }
1314 EXPORT_SYMBOL(ldlm_namespace_free);
1315
1316 void ldlm_namespace_get(struct ldlm_namespace *ns)
1317 {
1318         atomic_inc(&ns->ns_bref);
1319 }
1320
1321 /* This is only for callers that care about refcount */
1322 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1323 {
1324         return atomic_inc_return(&ns->ns_bref);
1325 }
1326
1327 void ldlm_namespace_put(struct ldlm_namespace *ns)
1328 {
1329         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1330                 wake_up(&ns->ns_waitq);
1331                 spin_unlock(&ns->ns_lock);
1332         }
1333 }
1334
1335 /** Register \a ns in the list of namespaces */
1336 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1337 {
1338         mutex_lock(ldlm_namespace_lock(client));
1339         LASSERT(list_empty(&ns->ns_list_chain));
1340         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1341         ldlm_namespace_nr_inc(client);
1342         mutex_unlock(ldlm_namespace_lock(client));
1343 }
1344
1345 /** Unregister \a ns from the list of namespaces. */
1346 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1347 {
1348         mutex_lock(ldlm_namespace_lock(client));
1349         LASSERT(!list_empty(&ns->ns_list_chain));
1350         /* Some asserts and possibly other parts of the code are still
1351          * using list_empty(&ns->ns_list_chain). This is why it is
1352          * important to use list_del_init() here. */
1353         list_del_init(&ns->ns_list_chain);
1354         ldlm_namespace_nr_dec(client);
1355         mutex_unlock(ldlm_namespace_lock(client));
1356 }
1357
1358 /** Should be called with ldlm_namespace_lock(client) taken. */
1359 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1360                                           enum ldlm_side client)
1361 {
1362         LASSERT(!list_empty(&ns->ns_list_chain));
1363         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1364         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1365 }
1366
1367 /** Should be called with ldlm_namespace_lock(client) taken. */
1368 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1369                                             enum ldlm_side client)
1370 {
1371         LASSERT(!list_empty(&ns->ns_list_chain));
1372         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1373         list_move_tail(&ns->ns_list_chain,
1374                        ldlm_namespace_inactive_list(client));
1375 }
1376
1377 /** Should be called with ldlm_namespace_lock(client) taken. */
1378 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1379 {
1380         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1381         LASSERT(!list_empty(ldlm_namespace_list(client)));
1382         return container_of(ldlm_namespace_list(client)->next,
1383                             struct ldlm_namespace, ns_list_chain);
1384 }
1385
1386 static bool ldlm_resource_extent_new(struct ldlm_resource *res)
1387 {
1388         int idx;
1389
1390         OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1391                        sizeof(*res->lr_itree) * LCK_MODE_NUM);
1392         if (res->lr_itree == NULL)
1393                 return false;
1394         /* Initialize interval trees for each lock mode. */
1395         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1396                 res->lr_itree[idx].lit_size = 0;
1397                 res->lr_itree[idx].lit_mode = 1 << idx;
1398                 res->lr_itree[idx].lit_root = NULL;
1399         }
1400         return true;
1401 }
1402
1403 static bool ldlm_resource_inodebits_new(struct ldlm_resource *res)
1404 {
1405         int i;
1406
1407         OBD_ALLOC_PTR(res->lr_ibits_queues);
1408         if (res->lr_ibits_queues == NULL)
1409                 return false;
1410         for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
1411                 INIT_LIST_HEAD(&res->lr_ibits_queues->liq_waiting[i]);
1412         return true;
1413 }
1414
1415 /** Create and initialize new resource. */
1416 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1417 {
1418         struct ldlm_resource *res;
1419         bool rc;
1420
1421         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1422         if (res == NULL)
1423                 return NULL;
1424
1425         switch (ldlm_type) {
1426         case LDLM_EXTENT:
1427                 rc = ldlm_resource_extent_new(res);
1428                 break;
1429         case LDLM_IBITS:
1430                 rc = ldlm_resource_inodebits_new(res);
1431                 break;
1432         default:
1433                 rc = true;
1434                 break;
1435         }
1436         if (!rc) {
1437                 OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1438                 return NULL;
1439         }
1440
1441         INIT_LIST_HEAD(&res->lr_granted);
1442         INIT_LIST_HEAD(&res->lr_waiting);
1443
1444         atomic_set(&res->lr_refcount, 1);
1445         spin_lock_init(&res->lr_lock);
1446         lu_ref_init(&res->lr_reference);
1447
1448         /* Since LVB init can be delayed now, there is no longer need to
1449          * immediatelly acquire mutex here. */
1450         mutex_init(&res->lr_lvb_mutex);
1451         res->lr_lvb_initialized = false;
1452
1453         return res;
1454 }
1455
1456 static void ldlm_resource_free(struct ldlm_resource *res)
1457 {
1458         if (res->lr_type == LDLM_EXTENT) {
1459                 if (res->lr_itree != NULL)
1460                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1461                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1462         } else if (res->lr_type == LDLM_IBITS) {
1463                 if (res->lr_ibits_queues != NULL)
1464                         OBD_FREE_PTR(res->lr_ibits_queues);
1465         }
1466
1467         OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1468 }
1469
1470 /**
1471  * Return a reference to resource with given name, creating it if necessary.
1472  * Args: namespace with ns_lock unlocked
1473  * Locks: takes and releases NS hash-lock and res->lr_lock
1474  * Returns: referenced, unlocked ldlm_resource or NULL
1475  */
1476 struct ldlm_resource *
1477 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1478                   const struct ldlm_res_id *name, enum ldlm_type type,
1479                   int create)
1480 {
1481         struct hlist_node       *hnode;
1482         struct ldlm_resource    *res = NULL;
1483         struct cfs_hash_bd              bd;
1484         __u64                   version;
1485         int                     ns_refcount = 0;
1486         int hash;
1487
1488         LASSERT(ns != NULL);
1489         LASSERT(parent == NULL);
1490         LASSERT(ns->ns_rs_hash != NULL);
1491         LASSERT(name->name[0] != 0);
1492
1493         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1494         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1495         if (hnode != NULL) {
1496                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1497                 GOTO(found, res);
1498         }
1499
1500         version = cfs_hash_bd_version_get(&bd);
1501         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1502
1503         if (create == 0)
1504                 return ERR_PTR(-ENOENT);
1505
1506         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1507                  "type: %d\n", type);
1508         res = ldlm_resource_new(type);
1509         if (res == NULL)
1510                 return ERR_PTR(-ENOMEM);
1511
1512         hash = ldlm_res_hop_fid_hash(name, ns->ns_bucket_bits);
1513         res->lr_ns_bucket = &ns->ns_rs_buckets[hash];
1514         res->lr_name = *name;
1515         res->lr_type = type;
1516
1517         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1518         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1519                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1520
1521         if (hnode != NULL) {
1522                 /* Someone won the race and already added the resource. */
1523                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1524                 /* Clean lu_ref for failed resource. */
1525                 lu_ref_fini(&res->lr_reference);
1526                 ldlm_resource_free(res);
1527 found:
1528                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1529                 return res;
1530         }
1531         /* We won! Let's add the resource. */
1532         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1533         if (atomic_inc_return(&res->lr_ns_bucket->nsb_count) == 1)
1534                 ns_refcount = ldlm_namespace_get_return(ns);
1535
1536         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1537
1538         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1539
1540         /* Let's see if we happened to be the very first resource in this
1541          * namespace. If so, and this is a client namespace, we need to move
1542          * the namespace into the active namespaces list to be patrolled by
1543          * the ldlm_poold. */
1544         if (ns_is_client(ns) && ns_refcount == 1) {
1545                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1546                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1547                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1548         }
1549
1550         return res;
1551 }
1552 EXPORT_SYMBOL(ldlm_resource_get);
1553
1554 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1555 {
1556         LASSERT(res != NULL);
1557         LASSERT(res != LP_POISON);
1558         atomic_inc(&res->lr_refcount);
1559         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1560                atomic_read(&res->lr_refcount));
1561         return res;
1562 }
1563
1564 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1565                                          struct ldlm_resource *res)
1566 {
1567         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1568
1569         if (!list_empty(&res->lr_granted)) {
1570                 ldlm_resource_dump(D_ERROR, res);
1571                 LBUG();
1572         }
1573
1574         if (!list_empty(&res->lr_waiting)) {
1575                 ldlm_resource_dump(D_ERROR, res);
1576                 LBUG();
1577         }
1578
1579         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1580                                bd, &res->lr_hash);
1581         lu_ref_fini(&res->lr_reference);
1582         if (atomic_dec_and_test(&nsb->nsb_count))
1583                 ldlm_namespace_put(nsb->nsb_namespace);
1584 }
1585
1586 /* Returns 1 if the resource was freed, 0 if it remains. */
1587 int ldlm_resource_putref(struct ldlm_resource *res)
1588 {
1589         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1590         struct cfs_hash_bd   bd;
1591
1592         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1593         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1594                res, atomic_read(&res->lr_refcount) - 1);
1595
1596         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1597         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1598                 __ldlm_resource_putref_final(&bd, res);
1599                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1600                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1601                         ns->ns_lvbo->lvbo_free(res);
1602                 ldlm_resource_free(res);
1603                 return 1;
1604         }
1605         return 0;
1606 }
1607 EXPORT_SYMBOL(ldlm_resource_putref);
1608
1609 /**
1610  * Add a lock into a given resource into specified lock list.
1611  */
1612 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1613                             struct ldlm_lock *lock)
1614 {
1615         check_res_locked(res);
1616
1617         LDLM_DEBUG(lock, "About to add this lock");
1618
1619         if (ldlm_is_destroyed(lock)) {
1620                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1621                 return;
1622         }
1623
1624         LASSERT(list_empty(&lock->l_res_link));
1625
1626         list_add_tail(&lock->l_res_link, head);
1627
1628         if (res->lr_type == LDLM_IBITS)
1629                 ldlm_inodebits_add_lock(res, head, lock);
1630 }
1631
1632 /**
1633  * Insert a lock into resource after specified lock.
1634  *
1635  * Obtain resource description from the lock we are inserting after.
1636  */
1637 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1638                                      struct ldlm_lock *new)
1639 {
1640         struct ldlm_resource *res = original->l_resource;
1641
1642         check_res_locked(res);
1643
1644         ldlm_resource_dump(D_INFO, res);
1645         LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
1646
1647         if (ldlm_is_destroyed(new)) {
1648                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1649                 goto out;
1650         }
1651
1652         LASSERT(list_empty(&new->l_res_link));
1653
1654         list_add(&new->l_res_link, &original->l_res_link);
1655  out:;
1656 }
1657
1658 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1659 {
1660         int type = lock->l_resource->lr_type;
1661
1662         check_res_locked(lock->l_resource);
1663         switch (type) {
1664         case LDLM_PLAIN:
1665                 ldlm_unlink_lock_skiplist(lock);
1666                 break;
1667         case LDLM_EXTENT:
1668                 ldlm_extent_unlink_lock(lock);
1669                 break;
1670         case LDLM_IBITS:
1671                 ldlm_inodebits_unlink_lock(lock);
1672                 break;
1673         }
1674         list_del_init(&lock->l_res_link);
1675 }
1676 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1677
1678 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1679 {
1680         desc->lr_type = res->lr_type;
1681         desc->lr_name = res->lr_name;
1682 }
1683
1684 /**
1685  * Print information about all locks in all namespaces on this node to debug
1686  * log.
1687  */
1688 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1689 {
1690         struct list_head *tmp;
1691
1692         if (!((libcfs_debug | D_ERROR) & level))
1693                 return;
1694
1695         mutex_lock(ldlm_namespace_lock(client));
1696
1697         list_for_each(tmp, ldlm_namespace_list(client)) {
1698                 struct ldlm_namespace *ns;
1699
1700                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1701                 ldlm_namespace_dump(level, ns);
1702         }
1703
1704         mutex_unlock(ldlm_namespace_lock(client));
1705 }
1706
1707 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1708                               struct hlist_node *hnode, void *arg)
1709 {
1710         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1711         int    level = (int)(unsigned long)arg;
1712
1713         lock_res(res);
1714         ldlm_resource_dump(level, res);
1715         unlock_res(res);
1716
1717         return 0;
1718 }
1719
1720 /**
1721  * Print information about all locks in this namespace on this node to debug
1722  * log.
1723  */
1724 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1725 {
1726         if (!((libcfs_debug | D_ERROR) & level))
1727                 return;
1728
1729         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1730                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1731                ns_is_client(ns) ? "client" : "server");
1732
1733         if (ktime_get_seconds() < ns->ns_next_dump)
1734                 return;
1735
1736         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1737                                  ldlm_res_hash_dump,
1738                                  (void *)(unsigned long)level, 0);
1739         spin_lock(&ns->ns_lock);
1740         ns->ns_next_dump = ktime_get_seconds() + 10;
1741         spin_unlock(&ns->ns_lock);
1742 }
1743
1744 /**
1745  * Print information about all locks in this resource to debug log.
1746  */
1747 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1748 {
1749         struct ldlm_lock *lock;
1750         unsigned int granted = 0;
1751
1752         BUILD_BUG_ON(RES_NAME_SIZE != 4);
1753
1754         if (!((libcfs_debug | D_ERROR) & level))
1755                 return;
1756
1757         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1758                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1759
1760         if (!list_empty(&res->lr_granted)) {
1761                 CDEBUG(level, "Granted locks (in reverse order):\n");
1762                 list_for_each_entry_reverse(lock, &res->lr_granted,
1763                                                 l_res_link) {
1764                         LDLM_DEBUG_LIMIT(level, lock, "###");
1765                         if (!(level & D_CANTMASK) &&
1766                             ++granted > ldlm_dump_granted_max) {
1767                                 CDEBUG(level,
1768                                        "only dump %d granted locks to avoid DDOS.\n",
1769                                        granted);
1770                                 break;
1771                         }
1772                 }
1773         }
1774
1775         if (!list_empty(&res->lr_waiting)) {
1776                 CDEBUG(level, "Waiting locks:\n");
1777                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1778                         LDLM_DEBUG_LIMIT(level, lock, "###");
1779         }
1780 }
1781 EXPORT_SYMBOL(ldlm_resource_dump);