Whamcloud - gitweb
1ec08e7d6a308f3043ed2c18b30b1c290aefcb7e
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include <lustre_dlm.h>
44 #include <lustre_fid.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49 struct kmem_cache *ldlm_interval_tree_slab;
50
51 int ldlm_srv_namespace_nr = 0;
52 int ldlm_cli_namespace_nr = 0;
53
54 struct mutex ldlm_srv_namespace_lock;
55 struct list_head ldlm_srv_namespace_list;
56
57 struct mutex ldlm_cli_namespace_lock;
58 /* Client Namespaces that have active resources in them.
59  * Once all resources go away, ldlm_poold moves such namespaces to the
60  * inactive list */
61 struct list_head ldlm_cli_active_namespace_list;
62 /* Client namespaces that don't have any locks in them */
63 struct list_head ldlm_cli_inactive_namespace_list;
64
65 static struct proc_dir_entry *ldlm_type_proc_dir;
66 static struct proc_dir_entry *ldlm_ns_proc_dir;
67 struct proc_dir_entry *ldlm_svc_proc_dir;
68
69 /* during debug dump certain amount of granted locks for one resource to avoid
70  * DDOS. */
71 static unsigned int ldlm_dump_granted_max = 256;
72
73 #ifdef CONFIG_PROC_FS
74 static ssize_t
75 lprocfs_dump_ns_seq_write(struct file *file, const char __user *buffer,
76                           size_t count, loff_t *off)
77 {
78         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
80         RETURN(count);
81 }
82 LPROC_SEQ_FOPS_WO_TYPE(ldlm, dump_ns);
83
84 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
85 LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
86
87 #ifdef HAVE_SERVER_SUPPORT
88
89 static int seq_watermark_show(struct seq_file *m, void *data)
90 {
91         return seq_printf(m, LPU64"\n", *(__u64 *)m->private);
92 }
93
94 static ssize_t seq_watermark_write(struct file *file,
95                                    const char __user *buffer, size_t count,
96                                    loff_t *off)
97 {
98         __u64 watermark;
99         __u64 *data = ((struct seq_file *)file->private_data)->private;
100         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
101         int rc;
102
103         rc = lprocfs_write_frac_u64_helper(buffer, count, &watermark, 1 << 20);
104         if (rc) {
105                 CERROR("Failed to set %s, rc = %d.\n",
106                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
107                        rc);
108                 return rc;
109         } else if (watermark != 0 && watermark < (1 << 20)) {
110                 CERROR("%s should be greater than 1MB.\n",
111                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
112                 return -EINVAL;
113         }
114         watermark >>= 20;
115
116         if (wm_low) {
117                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
118                         CERROR("lock_reclaim_threshold_mb must be smaller than "
119                                "lock_limit_mb.\n");
120                         return -EINVAL;
121                 }
122
123                 *data = watermark;
124                 if (watermark != 0) {
125                         watermark <<= 20;
126                         do_div(watermark, sizeof(struct ldlm_lock));
127                 }
128                 ldlm_reclaim_threshold = watermark;
129         } else {
130                 if (ldlm_reclaim_threshold_mb != 0 &&
131                     watermark < ldlm_reclaim_threshold_mb) {
132                         CERROR("lock_limit_mb must be greater than "
133                                "lock_reclaim_threshold_mb.\n");
134                         return -EINVAL;
135                 }
136
137                 *data = watermark;
138                 if (watermark != 0) {
139                         watermark <<= 20;
140                         do_div(watermark, sizeof(struct ldlm_lock));
141                 }
142                 ldlm_lock_limit = watermark;
143         }
144
145         return count;
146 }
147
148 static int seq_watermark_open(struct inode *inode, struct file *file)
149 {
150         return single_open(file, seq_watermark_show, PDE_DATA(inode));
151 }
152
153 static const struct file_operations ldlm_watermark_fops = {
154         .owner          = THIS_MODULE,
155         .open           = seq_watermark_open,
156         .read           = seq_read,
157         .write          = seq_watermark_write,
158         .llseek         = seq_lseek,
159         .release        = lprocfs_single_release,
160 };
161
162 static int seq_granted_show(struct seq_file *m, void *data)
163 {
164         return seq_printf(m, LPU64"\n", percpu_counter_sum_positive(
165                                 (struct percpu_counter *)m->private));
166 }
167
168 static int seq_granted_open(struct inode *inode, struct file *file)
169 {
170         return single_open(file, seq_granted_show, PDE_DATA(inode));
171 }
172
173 static const struct file_operations ldlm_granted_fops = {
174         .owner  = THIS_MODULE,
175         .open   = seq_granted_open,
176         .read   = seq_read,
177         .llseek = seq_lseek,
178         .release = seq_release,
179 };
180
181 #endif /* HAVE_SERVER_SUPPORT */
182
183 int ldlm_proc_setup(void)
184 {
185         int rc;
186         struct lprocfs_vars list[] = {
187                 { .name =       "dump_namespaces",
188                   .fops =       &ldlm_dump_ns_fops,
189                   .proc_mode =  0222 },
190                 { .name =       "dump_granted_max",
191                   .fops =       &ldlm_rw_uint_fops,
192                   .data =       &ldlm_dump_granted_max },
193                 { .name =       "cancel_unused_locks_before_replay",
194                   .fops =       &ldlm_rw_uint_fops,
195                   .data =       &ldlm_cancel_unused_locks_before_replay },
196 #ifdef HAVE_SERVER_SUPPORT
197                 { .name =       "lock_reclaim_threshold_mb",
198                   .fops =       &ldlm_watermark_fops,
199                   .data =       &ldlm_reclaim_threshold_mb },
200                 { .name =       "lock_limit_mb",
201                   .fops =       &ldlm_watermark_fops,
202                   .data =       &ldlm_lock_limit_mb },
203                 { .name =       "lock_granted_count",
204                   .fops =       &ldlm_granted_fops,
205                   .data =       &ldlm_granted_total },
206 #endif
207                 { NULL }};
208         ENTRY;
209         LASSERT(ldlm_ns_proc_dir == NULL);
210
211         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
212                                               proc_lustre_root,
213                                               NULL, NULL);
214         if (IS_ERR(ldlm_type_proc_dir)) {
215                 CERROR("LProcFS failed in ldlm-init\n");
216                 rc = PTR_ERR(ldlm_type_proc_dir);
217                 GOTO(err, rc);
218         }
219
220         ldlm_ns_proc_dir = lprocfs_register("namespaces",
221                                             ldlm_type_proc_dir,
222                                             NULL, NULL);
223         if (IS_ERR(ldlm_ns_proc_dir)) {
224                 CERROR("LProcFS failed in ldlm-init\n");
225                 rc = PTR_ERR(ldlm_ns_proc_dir);
226                 GOTO(err_type, rc);
227         }
228
229         ldlm_svc_proc_dir = lprocfs_register("services",
230                                              ldlm_type_proc_dir,
231                                              NULL, NULL);
232         if (IS_ERR(ldlm_svc_proc_dir)) {
233                 CERROR("LProcFS failed in ldlm-init\n");
234                 rc = PTR_ERR(ldlm_svc_proc_dir);
235                 GOTO(err_ns, rc);
236         }
237
238         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
239         if (rc != 0) {
240                 CERROR("LProcFS failed in ldlm-init\n");
241                 GOTO(err_svc, rc);
242         }
243
244         RETURN(0);
245
246 err_svc:
247         lprocfs_remove(&ldlm_svc_proc_dir);
248 err_ns:
249         lprocfs_remove(&ldlm_ns_proc_dir);
250 err_type:
251         lprocfs_remove(&ldlm_type_proc_dir);
252 err:
253         ldlm_svc_proc_dir = NULL;
254         RETURN(rc);
255 }
256
257 void ldlm_proc_cleanup(void)
258 {
259         if (ldlm_svc_proc_dir)
260                 lprocfs_remove(&ldlm_svc_proc_dir);
261
262         if (ldlm_ns_proc_dir)
263                 lprocfs_remove(&ldlm_ns_proc_dir);
264
265         if (ldlm_type_proc_dir)
266                 lprocfs_remove(&ldlm_type_proc_dir);
267 }
268
269 static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
270 {
271         struct ldlm_namespace   *ns  = m->private;
272         __u64                   res = 0;
273         struct cfs_hash_bd              bd;
274         int                     i;
275
276         /* result is not strictly consistant */
277         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
278                 res += cfs_hash_bd_count_get(&bd);
279         return lprocfs_u64_seq_show(m, &res);
280 }
281 LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
282
283 static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
284 {
285         struct ldlm_namespace   *ns = m->private;
286         __u64                   locks;
287
288         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
289                                         LPROCFS_FIELDS_FLAGS_SUM);
290         return lprocfs_u64_seq_show(m, &locks);
291 }
292 LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
293
294 static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
295 {
296         struct ldlm_namespace *ns = m->private;
297         __u32 *nr = &ns->ns_max_unused;
298
299         if (ns_connect_lru_resize(ns))
300                 nr = &ns->ns_nr_unused;
301         return lprocfs_uint_seq_show(m, nr);
302 }
303
304 static ssize_t lprocfs_lru_size_seq_write(struct file *file,
305                                           const char __user *buffer,
306                                           size_t count, loff_t *off)
307 {
308         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
309         char dummy[MAX_STRING_SIZE + 1], *end;
310         unsigned long tmp;
311         int lru_resize;
312
313         dummy[MAX_STRING_SIZE] = '\0';
314         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
315                 return -EFAULT;
316
317         if (strncmp(dummy, "clear", 5) == 0) {
318                 CDEBUG(D_DLMTRACE,
319                        "dropping all unused locks from namespace %s\n",
320                        ldlm_ns_name(ns));
321                 if (ns_connect_lru_resize(ns)) {
322                         int canceled, unused  = ns->ns_nr_unused;
323
324                         /* Try to cancel all @ns_nr_unused locks. */
325                         canceled = ldlm_cancel_lru(ns, unused, 0,
326                                                    LDLM_LRU_FLAG_PASSED);
327                         if (canceled < unused) {
328                                 CDEBUG(D_DLMTRACE,
329                                        "not all requested locks are canceled, "
330                                        "requested: %d, canceled: %d\n", unused,
331                                        canceled);
332                                 return -EINVAL;
333                         }
334                 } else {
335                         tmp = ns->ns_max_unused;
336                         ns->ns_max_unused = 0;
337                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
338                         ns->ns_max_unused = tmp;
339                 }
340                 return count;
341         }
342
343         tmp = simple_strtoul(dummy, &end, 0);
344         if (dummy == end) {
345                 CERROR("invalid value written\n");
346                 return -EINVAL;
347         }
348         lru_resize = (tmp == 0);
349
350         if (ns_connect_lru_resize(ns)) {
351                 if (!lru_resize)
352                         ns->ns_max_unused = tmp;
353
354                 if (tmp > ns->ns_nr_unused)
355                         tmp = ns->ns_nr_unused;
356                 tmp = ns->ns_nr_unused - tmp;
357
358                 CDEBUG(D_DLMTRACE,
359                        "changing namespace %s unused locks from %u to %u\n",
360                        ldlm_ns_name(ns), ns->ns_nr_unused,
361                        (unsigned int)tmp);
362                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
363
364                 if (!lru_resize) {
365                         CDEBUG(D_DLMTRACE,
366                                "disable lru_resize for namespace %s\n",
367                                ldlm_ns_name(ns));
368                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
369                 }
370         } else {
371                 CDEBUG(D_DLMTRACE,
372                        "changing namespace %s max_unused from %u to %u\n",
373                        ldlm_ns_name(ns), ns->ns_max_unused,
374                        (unsigned int)tmp);
375                 ns->ns_max_unused = (unsigned int)tmp;
376                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
377
378                 /* Make sure that LRU resize was originally supported before
379                  * turning it on here. */
380                 if (lru_resize &&
381                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
382                         CDEBUG(D_DLMTRACE,
383                                "enable lru_resize for namespace %s\n",
384                                ldlm_ns_name(ns));
385                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
386                 }
387         }
388
389         return count;
390 }
391 LPROC_SEQ_FOPS(lprocfs_lru_size);
392
393 static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
394 {
395         struct ldlm_namespace *ns = m->private;
396         unsigned int supp = ns_connect_cancelset(ns);
397
398         return lprocfs_uint_seq_show(m, &supp);
399 }
400
401 static ssize_t lprocfs_elc_seq_write(struct file *file,
402                                      const char __user *buffer,
403                                      size_t count, loff_t *off)
404 {
405         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
406         unsigned int supp = -1;
407         int rc;
408
409         rc = lprocfs_wr_uint(file, buffer, count, &supp);
410         if (rc < 0)
411                 return rc;
412
413         if (supp == 0)
414                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
415         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
416                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
417         return count;
418 }
419 LPROC_SEQ_FOPS(lprocfs_elc);
420
421 static void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
422 {
423         if (ns->ns_proc_dir_entry == NULL)
424                 CERROR("dlm namespace %s has no procfs dir?\n",
425                        ldlm_ns_name(ns));
426         else
427                 lprocfs_remove(&ns->ns_proc_dir_entry);
428
429         if (ns->ns_stats != NULL)
430                 lprocfs_free_stats(&ns->ns_stats);
431 }
432
433 static int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
434 {
435         struct lprocfs_vars lock_vars[2];
436         char lock_name[MAX_STRING_SIZE + 1];
437         struct proc_dir_entry *ns_pde;
438
439         LASSERT(ns != NULL);
440         LASSERT(ns->ns_rs_hash != NULL);
441
442         if (ns->ns_proc_dir_entry != NULL) {
443                 ns_pde = ns->ns_proc_dir_entry;
444         } else {
445                 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
446                 if (ns_pde == NULL)
447                         return -ENOMEM;
448                 ns->ns_proc_dir_entry = ns_pde;
449         }
450
451         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
452         if (ns->ns_stats == NULL)
453                 return -ENOMEM;
454
455         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
456                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
457
458         lock_name[MAX_STRING_SIZE] = '\0';
459
460         memset(lock_vars, 0, sizeof(lock_vars));
461         lock_vars[0].name = lock_name;
462
463         ldlm_add_var(&lock_vars[0], ns_pde, "resource_count", ns,
464                      &lprocfs_ns_resources_fops);
465         ldlm_add_var(&lock_vars[0], ns_pde, "lock_count", ns,
466                      &lprocfs_ns_locks_fops);
467
468         if (ns_is_client(ns)) {
469                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_unused_count",
470                              &ns->ns_nr_unused, &ldlm_uint_fops);
471                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_size", ns,
472                              &lprocfs_lru_size_fops);
473                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_max_age",
474                              &ns->ns_max_age, &ldlm_rw_uint_fops);
475                 ldlm_add_var(&lock_vars[0], ns_pde, "early_lock_cancel",
476                              ns, &lprocfs_elc_fops);
477         } else {
478                 ldlm_add_var(&lock_vars[0], ns_pde, "ctime_age_limit",
479                              &ns->ns_ctime_age_limit, &ldlm_rw_uint_fops);
480                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_timeouts",
481                              &ns->ns_timeouts, &ldlm_uint_fops);
482                 ldlm_add_var(&lock_vars[0], ns_pde, "max_nolock_bytes",
483                              &ns->ns_max_nolock_size, &ldlm_rw_uint_fops);
484                 ldlm_add_var(&lock_vars[0], ns_pde, "contention_seconds",
485                              &ns->ns_contention_time, &ldlm_rw_uint_fops);
486                 ldlm_add_var(&lock_vars[0], ns_pde, "contended_locks",
487                              &ns->ns_contended_locks, &ldlm_rw_uint_fops);
488                 ldlm_add_var(&lock_vars[0], ns_pde, "max_parallel_ast",
489                              &ns->ns_max_parallel_ast, &ldlm_rw_uint_fops);
490         }
491         return 0;
492 }
493 #undef MAX_STRING_SIZE
494 #else /* CONFIG_PROC_FS */
495
496 #define ldlm_namespace_proc_unregister(ns)      ({;})
497 #define ldlm_namespace_proc_register(ns)        ({0;})
498
499 #endif /* CONFIG_PROC_FS */
500
501 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
502                                   const void *key, unsigned mask)
503 {
504         const struct ldlm_res_id     *id  = key;
505         unsigned                val = 0;
506         unsigned                i;
507
508         for (i = 0; i < RES_NAME_SIZE; i++)
509                 val += id->name[i];
510         return val & mask;
511 }
512
513 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
514                                       const void *key, unsigned mask)
515 {
516         const struct ldlm_res_id *id = key;
517         struct lu_fid       fid;
518         __u32               hash;
519         __u32               val;
520
521         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
522         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
523         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
524
525         hash = fid_flatten32(&fid);
526         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
527         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
528                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
529                 hash += (val >> 5) + (val << 11);
530         } else {
531                 val = fid_oid(&fid);
532         }
533         hash = hash_long(hash, hs->hs_bkt_bits);
534         /* give me another random factor */
535         hash -= hash_long((unsigned long)hs, val % 11 + 3);
536
537         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
538         hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
539
540         return hash & mask;
541 }
542
543 static void *ldlm_res_hop_key(struct hlist_node *hnode)
544 {
545         struct ldlm_resource   *res;
546
547         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
548         return &res->lr_name;
549 }
550
551 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
552 {
553         struct ldlm_resource   *res;
554
555         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
556         return ldlm_res_eq((const struct ldlm_res_id *)key,
557                            (const struct ldlm_res_id *)&res->lr_name);
558 }
559
560 static void *ldlm_res_hop_object(struct hlist_node *hnode)
561 {
562         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
563 }
564
565 static void
566 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
567 {
568         struct ldlm_resource *res;
569
570         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
571         ldlm_resource_getref(res);
572 }
573
574 static void
575 ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
576 {
577         struct ldlm_resource *res;
578
579         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
580         /* cfs_hash_for_each_nolock is the only chance we call it */
581         ldlm_resource_putref_locked(res);
582 }
583
584 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
585 {
586         struct ldlm_resource *res;
587
588         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
589         ldlm_resource_putref(res);
590 }
591
592 static struct cfs_hash_ops ldlm_ns_hash_ops = {
593         .hs_hash        = ldlm_res_hop_hash,
594         .hs_key         = ldlm_res_hop_key,
595         .hs_keycmp      = ldlm_res_hop_keycmp,
596         .hs_keycpy      = NULL,
597         .hs_object      = ldlm_res_hop_object,
598         .hs_get         = ldlm_res_hop_get_locked,
599         .hs_put_locked  = ldlm_res_hop_put_locked,
600         .hs_put         = ldlm_res_hop_put
601 };
602
603 static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
604         .hs_hash        = ldlm_res_hop_fid_hash,
605         .hs_key         = ldlm_res_hop_key,
606         .hs_keycmp      = ldlm_res_hop_keycmp,
607         .hs_keycpy      = NULL,
608         .hs_object      = ldlm_res_hop_object,
609         .hs_get         = ldlm_res_hop_get_locked,
610         .hs_put_locked  = ldlm_res_hop_put_locked,
611         .hs_put         = ldlm_res_hop_put
612 };
613
614 typedef struct ldlm_ns_hash_def {
615         enum ldlm_ns_type       nsd_type;
616         /** hash bucket bits */
617         unsigned                nsd_bkt_bits;
618         /** hash bits */
619         unsigned                nsd_all_bits;
620         /** hash operations */
621         struct cfs_hash_ops *nsd_hops;
622 } ldlm_ns_hash_def_t;
623
624 static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] =
625 {
626         {
627                 .nsd_type       = LDLM_NS_TYPE_MDC,
628                 .nsd_bkt_bits   = 11,
629                 .nsd_all_bits   = 16,
630                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
631         },
632         {
633                 .nsd_type       = LDLM_NS_TYPE_MDT,
634                 .nsd_bkt_bits   = 14,
635                 .nsd_all_bits   = 21,
636                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
637         },
638         {
639                 .nsd_type       = LDLM_NS_TYPE_OSC,
640                 .nsd_bkt_bits   = 8,
641                 .nsd_all_bits   = 12,
642                 .nsd_hops       = &ldlm_ns_hash_ops,
643         },
644         {
645                 .nsd_type       = LDLM_NS_TYPE_OST,
646                 .nsd_bkt_bits   = 11,
647                 .nsd_all_bits   = 17,
648                 .nsd_hops       = &ldlm_ns_hash_ops,
649         },
650         {
651                 .nsd_type       = LDLM_NS_TYPE_MGC,
652                 .nsd_bkt_bits   = 4,
653                 .nsd_all_bits   = 4,
654                 .nsd_hops       = &ldlm_ns_hash_ops,
655         },
656         {
657                 .nsd_type       = LDLM_NS_TYPE_MGT,
658                 .nsd_bkt_bits   = 4,
659                 .nsd_all_bits   = 4,
660                 .nsd_hops       = &ldlm_ns_hash_ops,
661         },
662         {
663                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
664         },
665 };
666
667 /**
668  * Create and initialize new empty namespace.
669  */
670 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
671                                           enum ldlm_side client,
672                                           enum ldlm_appetite apt,
673                                           enum ldlm_ns_type ns_type)
674 {
675         struct ldlm_namespace *ns = NULL;
676         struct ldlm_ns_bucket *nsb;
677         struct ldlm_ns_hash_def *nsd;
678         struct cfs_hash_bd bd;
679         int idx;
680         int rc;
681         ENTRY;
682
683         LASSERT(obd != NULL);
684
685         rc = ldlm_get_ref();
686         if (rc) {
687                 CERROR("ldlm_get_ref failed: %d\n", rc);
688                 RETURN(NULL);
689         }
690
691         for (idx = 0;;idx++) {
692                 nsd = &ldlm_ns_hash_defs[idx];
693                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
694                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
695                         GOTO(out_ref, NULL);
696                 }
697
698                 if (nsd->nsd_type == ns_type)
699                         break;
700         }
701
702         OBD_ALLOC_PTR(ns);
703         if (!ns)
704                 GOTO(out_ref, NULL);
705
706         ns->ns_rs_hash = cfs_hash_create(name,
707                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
708                                          nsd->nsd_bkt_bits, sizeof(*nsb),
709                                          CFS_HASH_MIN_THETA,
710                                          CFS_HASH_MAX_THETA,
711                                          nsd->nsd_hops,
712                                          CFS_HASH_DEPTH |
713                                          CFS_HASH_BIGNAME |
714                                          CFS_HASH_SPIN_BKTLOCK |
715                                          CFS_HASH_NO_ITEMREF);
716         if (ns->ns_rs_hash == NULL)
717                 GOTO(out_ns, NULL);
718
719         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
720                 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
721                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
722                 nsb->nsb_namespace = ns;
723                 nsb->nsb_reclaim_start = 0;
724         }
725
726         ns->ns_obd      = obd;
727         ns->ns_appetite = apt;
728         ns->ns_client   = client;
729
730         INIT_LIST_HEAD(&ns->ns_list_chain);
731         INIT_LIST_HEAD(&ns->ns_unused_list);
732         spin_lock_init(&ns->ns_lock);
733         atomic_set(&ns->ns_bref, 0);
734         init_waitqueue_head(&ns->ns_waitq);
735
736         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
737         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
738         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
739
740         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
741         ns->ns_nr_unused          = 0;
742         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
743         ns->ns_max_age            = LDLM_DEFAULT_MAX_ALIVE;
744         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
745         ns->ns_timeouts           = 0;
746         ns->ns_orig_connect_flags = 0;
747         ns->ns_connect_flags      = 0;
748         ns->ns_stopping           = 0;
749         ns->ns_reclaim_start      = 0;
750         rc = ldlm_namespace_proc_register(ns);
751         if (rc != 0) {
752                 CERROR("Can't initialize ns proc, rc %d\n", rc);
753                 GOTO(out_hash, rc);
754         }
755
756         idx = ldlm_namespace_nr_read(client);
757         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
758         if (rc) {
759                 CERROR("Can't initialize lock pool, rc %d\n", rc);
760                 GOTO(out_proc, rc);
761         }
762
763         ldlm_namespace_register(ns, client);
764         RETURN(ns);
765 out_proc:
766         ldlm_namespace_proc_unregister(ns);
767         ldlm_namespace_cleanup(ns, 0);
768 out_hash:
769         cfs_hash_putref(ns->ns_rs_hash);
770 out_ns:
771         OBD_FREE_PTR(ns);
772 out_ref:
773         ldlm_put_ref();
774         RETURN(NULL);
775 }
776 EXPORT_SYMBOL(ldlm_namespace_new);
777
778 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
779
780 /**
781  * Cancel and destroy all locks on a resource.
782  *
783  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
784  * clean up.  This is currently only used for recovery, and we make
785  * certain assumptions as a result--notably, that we shouldn't cancel
786  * locks with refs.
787  */
788 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
789                              __u64 flags)
790 {
791         struct list_head *tmp;
792         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
793         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
794
795         do {
796                 struct ldlm_lock *lock = NULL;
797
798                 /* First, we look for non-cleaned-yet lock
799                  * all cleaned locks are marked by CLEANED flag. */
800                 lock_res(res);
801                 list_for_each(tmp, q) {
802                         lock = list_entry(tmp, struct ldlm_lock,
803                                           l_res_link);
804                         if (ldlm_is_cleaned(lock)) {
805                                 lock = NULL;
806                                 continue;
807                         }
808                         LDLM_LOCK_GET(lock);
809                         ldlm_set_cleaned(lock);
810                         break;
811                 }
812
813                 if (lock == NULL) {
814                         unlock_res(res);
815                         break;
816                 }
817
818                 /* Set CBPENDING so nothing in the cancellation path
819                  * can match this lock. */
820                 ldlm_set_cbpending(lock);
821                 ldlm_set_failed(lock);
822                 lock->l_flags |= flags;
823
824                 /* ... without sending a CANCEL message for local_only. */
825                 if (local_only)
826                         ldlm_set_local_only(lock);
827
828                 if (local_only && (lock->l_readers || lock->l_writers)) {
829                         /* This is a little bit gross, but much better than the
830                          * alternative: pretend that we got a blocking AST from
831                          * the server, so that when the lock is decref'd, it
832                          * will go away ... */
833                         unlock_res(res);
834                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
835                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
836                                 set_current_state(TASK_UNINTERRUPTIBLE);
837                                 schedule_timeout(cfs_time_seconds(4));
838                                 set_current_state(TASK_RUNNING);
839                         }
840                         if (lock->l_completion_ast)
841                                 lock->l_completion_ast(lock,
842                                                        LDLM_FL_FAILED, NULL);
843                         LDLM_LOCK_RELEASE(lock);
844                         continue;
845                 }
846
847                 if (client) {
848                         struct lustre_handle lockh;
849
850                         unlock_res(res);
851                         ldlm_lock2handle(lock, &lockh);
852                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
853                         if (rc)
854                                 CERROR("ldlm_cli_cancel: %d\n", rc);
855                 } else {
856                         unlock_res(res);
857                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
858                                    "client node");
859                         ldlm_lock_cancel(lock);
860                 }
861                 LDLM_LOCK_RELEASE(lock);
862         } while (1);
863 }
864
865 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
866                                struct hlist_node *hnode, void *arg)
867 {
868         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
869         __u64 flags = *(__u64 *)arg;
870
871         cleanup_resource(res, &res->lr_granted, flags);
872         cleanup_resource(res, &res->lr_converting, flags);
873         cleanup_resource(res, &res->lr_waiting, flags);
874
875         return 0;
876 }
877
878 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
879                                   struct hlist_node *hnode, void *arg)
880 {
881         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
882
883         lock_res(res);
884         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
885                "(%d) after lock cleanup; forcing cleanup.\n",
886                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
887                atomic_read(&res->lr_refcount) - 1);
888
889         ldlm_resource_dump(D_ERROR, res);
890         unlock_res(res);
891         return 0;
892 }
893
894 /**
895  * Cancel and destroy all locks in the namespace.
896  *
897  * Typically used during evictions when server notified client that it was
898  * evicted and all of its state needs to be destroyed.
899  * Also used during shutdown.
900  */
901 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
902 {
903         if (ns == NULL) {
904                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
905                 return ELDLM_OK;
906         }
907
908         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
909                                  &flags, 0);
910         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
911                                  NULL, 0);
912         return ELDLM_OK;
913 }
914 EXPORT_SYMBOL(ldlm_namespace_cleanup);
915
916 /**
917  * Attempts to free namespace.
918  *
919  * Only used when namespace goes away, like during an unmount.
920  */
921 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
922 {
923         ENTRY;
924
925         /* At shutdown time, don't call the cancellation callback */
926         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
927
928         if (atomic_read(&ns->ns_bref) > 0) {
929                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
930                 int rc;
931                 CDEBUG(D_DLMTRACE,
932                        "dlm namespace %s free waiting on refcount %d\n",
933                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
934 force_wait:
935                 if (force)
936                         lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
937                                           MSEC_PER_SEC) / 4, NULL, NULL);
938
939                 rc = l_wait_event(ns->ns_waitq,
940                                   atomic_read(&ns->ns_bref) == 0, &lwi);
941
942                 /* Forced cleanups should be able to reclaim all references,
943                  * so it's safe to wait forever... we can't leak locks... */
944                 if (force && rc == -ETIMEDOUT) {
945                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
946                                        "namespace with %d resources in use, "
947                                        "(rc=%d)\n", ldlm_ns_name(ns),
948                                        atomic_read(&ns->ns_bref), rc);
949                         GOTO(force_wait, rc);
950                 }
951
952                 if (atomic_read(&ns->ns_bref)) {
953                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
954                                        "with %d resources in use, (rc=%d)\n",
955                                        ldlm_ns_name(ns),
956                                        atomic_read(&ns->ns_bref), rc);
957                         RETURN(ELDLM_NAMESPACE_EXISTS);
958                 }
959                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
960                        ldlm_ns_name(ns));
961         }
962
963         RETURN(ELDLM_OK);
964 }
965
966 /**
967  * Performs various cleanups for passed \a ns to make it drop refc and be
968  * ready for freeing. Waits for refc == 0.
969  *
970  * The following is done:
971  * (0) Unregister \a ns from its list to make inaccessible for potential
972  * users like pools thread and others;
973  * (1) Clear all locks in \a ns.
974  */
975 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
976                                struct obd_import *imp,
977                                int force)
978 {
979         int rc;
980         ENTRY;
981         if (!ns) {
982                 EXIT;
983                 return;
984         }
985
986         spin_lock(&ns->ns_lock);
987         ns->ns_stopping = 1;
988         spin_unlock(&ns->ns_lock);
989
990         /*
991          * Can fail with -EINTR when force == 0 in which case try harder.
992          */
993         rc = __ldlm_namespace_free(ns, force);
994         if (rc != ELDLM_OK) {
995                 if (imp) {
996                         ptlrpc_disconnect_import(imp, 0);
997                         ptlrpc_invalidate_import(imp);
998                 }
999
1000                 /*
1001                  * With all requests dropped and the import inactive
1002                  * we are gaurenteed all reference will be dropped.
1003                  */
1004                 rc = __ldlm_namespace_free(ns, 1);
1005                 LASSERT(rc == 0);
1006         }
1007         EXIT;
1008 }
1009 EXPORT_SYMBOL(ldlm_namespace_free_prior);
1010
1011 /**
1012  * Performs freeing memory structures related to \a ns. This is only done
1013  * when ldlm_namespce_free_prior() successfully removed all resources
1014  * referencing \a ns and its refc == 0.
1015  */
1016 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1017 {
1018         ENTRY;
1019         if (!ns) {
1020                 EXIT;
1021                 return;
1022         }
1023
1024         /* Make sure that nobody can find this ns in its list. */
1025         ldlm_namespace_unregister(ns, ns->ns_client);
1026         /* Fini pool _before_ parent proc dir is removed. This is important as
1027          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1028          * Removing it after @dir may cause oops. */
1029         ldlm_pool_fini(&ns->ns_pool);
1030
1031         ldlm_namespace_proc_unregister(ns);
1032         cfs_hash_putref(ns->ns_rs_hash);
1033         /* Namespace \a ns should be not on list at this time, otherwise
1034          * this will cause issues related to using freed \a ns in poold
1035          * thread. */
1036         LASSERT(list_empty(&ns->ns_list_chain));
1037         OBD_FREE_PTR(ns);
1038         ldlm_put_ref();
1039         EXIT;
1040 }
1041 EXPORT_SYMBOL(ldlm_namespace_free_post);
1042
1043 /**
1044  * Cleanup the resource, and free namespace.
1045  * bug 12864:
1046  * Deadlock issue:
1047  * proc1: destroy import
1048  *        class_disconnect_export(grab cl_sem) ->
1049  *              -> ldlm_namespace_free ->
1050  *              -> lprocfs_remove(grab _lprocfs_lock).
1051  * proc2: read proc info
1052  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1053  *              -> osc_rd_active, etc(grab cl_sem).
1054  *
1055  * So that I have to split the ldlm_namespace_free into two parts - the first
1056  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1057  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1058  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1059  * held.
1060  */
1061 void ldlm_namespace_free(struct ldlm_namespace *ns,
1062                          struct obd_import *imp,
1063                          int force)
1064 {
1065         ldlm_namespace_free_prior(ns, imp, force);
1066         ldlm_namespace_free_post(ns);
1067 }
1068 EXPORT_SYMBOL(ldlm_namespace_free);
1069
1070 void ldlm_namespace_get(struct ldlm_namespace *ns)
1071 {
1072         atomic_inc(&ns->ns_bref);
1073 }
1074
1075 /* This is only for callers that care about refcount */
1076 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1077 {
1078         return atomic_inc_return(&ns->ns_bref);
1079 }
1080
1081 void ldlm_namespace_put(struct ldlm_namespace *ns)
1082 {
1083         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1084                 wake_up(&ns->ns_waitq);
1085                 spin_unlock(&ns->ns_lock);
1086         }
1087 }
1088
1089 /** Register \a ns in the list of namespaces */
1090 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1091 {
1092         mutex_lock(ldlm_namespace_lock(client));
1093         LASSERT(list_empty(&ns->ns_list_chain));
1094         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1095         ldlm_namespace_nr_inc(client);
1096         mutex_unlock(ldlm_namespace_lock(client));
1097 }
1098
1099 /** Unregister \a ns from the list of namespaces. */
1100 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1101 {
1102         mutex_lock(ldlm_namespace_lock(client));
1103         LASSERT(!list_empty(&ns->ns_list_chain));
1104         /* Some asserts and possibly other parts of the code are still
1105          * using list_empty(&ns->ns_list_chain). This is why it is
1106          * important to use list_del_init() here. */
1107         list_del_init(&ns->ns_list_chain);
1108         ldlm_namespace_nr_dec(client);
1109         mutex_unlock(ldlm_namespace_lock(client));
1110 }
1111
1112 /** Should be called with ldlm_namespace_lock(client) taken. */
1113 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1114                                           enum ldlm_side client)
1115 {
1116         LASSERT(!list_empty(&ns->ns_list_chain));
1117         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1118         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1119 }
1120
1121 /** Should be called with ldlm_namespace_lock(client) taken. */
1122 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1123                                             enum ldlm_side client)
1124 {
1125         LASSERT(!list_empty(&ns->ns_list_chain));
1126         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1127         list_move_tail(&ns->ns_list_chain,
1128                        ldlm_namespace_inactive_list(client));
1129 }
1130
1131 /** Should be called with ldlm_namespace_lock(client) taken. */
1132 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1133 {
1134         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1135         LASSERT(!list_empty(ldlm_namespace_list(client)));
1136         return container_of(ldlm_namespace_list(client)->next,
1137                             struct ldlm_namespace, ns_list_chain);
1138 }
1139
1140 /** Create and initialize new resource. */
1141 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1142 {
1143         struct ldlm_resource *res;
1144         int idx;
1145
1146         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1147         if (res == NULL)
1148                 return NULL;
1149
1150         if (ldlm_type == LDLM_EXTENT) {
1151                 OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1152                                sizeof(*res->lr_itree) * LCK_MODE_NUM);
1153                 if (res->lr_itree == NULL) {
1154                         OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1155                         return NULL;
1156                 }
1157                 /* Initialize interval trees for each lock mode. */
1158                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1159                         res->lr_itree[idx].lit_size = 0;
1160                         res->lr_itree[idx].lit_mode = 1 << idx;
1161                         res->lr_itree[idx].lit_root = NULL;
1162                 }
1163         }
1164
1165         INIT_LIST_HEAD(&res->lr_granted);
1166         INIT_LIST_HEAD(&res->lr_converting);
1167         INIT_LIST_HEAD(&res->lr_waiting);
1168
1169         atomic_set(&res->lr_refcount, 1);
1170         spin_lock_init(&res->lr_lock);
1171         lu_ref_init(&res->lr_reference);
1172
1173         /* Since LVB init can be delayed now, there is no longer need to
1174          * immediatelly acquire mutex here. */
1175         mutex_init(&res->lr_lvb_mutex);
1176         res->lr_lvb_initialized = false;
1177
1178         return res;
1179 }
1180
1181 /**
1182  * Return a reference to resource with given name, creating it if necessary.
1183  * Args: namespace with ns_lock unlocked
1184  * Locks: takes and releases NS hash-lock and res->lr_lock
1185  * Returns: referenced, unlocked ldlm_resource or NULL
1186  */
1187 struct ldlm_resource *
1188 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1189                   const struct ldlm_res_id *name, enum ldlm_type type,
1190                   int create)
1191 {
1192         struct hlist_node       *hnode;
1193         struct ldlm_resource    *res = NULL;
1194         struct cfs_hash_bd              bd;
1195         __u64                   version;
1196         int                     ns_refcount = 0;
1197
1198         LASSERT(ns != NULL);
1199         LASSERT(parent == NULL);
1200         LASSERT(ns->ns_rs_hash != NULL);
1201         LASSERT(name->name[0] != 0);
1202
1203         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1204         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1205         if (hnode != NULL) {
1206                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1207                 GOTO(found, res);
1208         }
1209
1210         version = cfs_hash_bd_version_get(&bd);
1211         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1212
1213         if (create == 0)
1214                 return ERR_PTR(-ENOENT);
1215
1216         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1217                  "type: %d\n", type);
1218         res = ldlm_resource_new(type);
1219         if (res == NULL)
1220                 return ERR_PTR(-ENOMEM);
1221
1222         res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1223         res->lr_name = *name;
1224         res->lr_type = type;
1225
1226         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1227         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1228                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1229
1230         if (hnode != NULL) {
1231                 /* Someone won the race and already added the resource. */
1232                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1233                 /* Clean lu_ref for failed resource. */
1234                 lu_ref_fini(&res->lr_reference);
1235                 if (res->lr_itree != NULL)
1236                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1237                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1238                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1239 found:
1240                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1241                 return res;
1242         }
1243         /* We won! Let's add the resource. */
1244         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1245         if (cfs_hash_bd_count_get(&bd) == 1)
1246                 ns_refcount = ldlm_namespace_get_return(ns);
1247
1248         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1249
1250         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1251
1252         /* Let's see if we happened to be the very first resource in this
1253          * namespace. If so, and this is a client namespace, we need to move
1254          * the namespace into the active namespaces list to be patrolled by
1255          * the ldlm_poold. */
1256         if (ns_is_client(ns) && ns_refcount == 1) {
1257                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1258                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1259                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1260         }
1261
1262         return res;
1263 }
1264 EXPORT_SYMBOL(ldlm_resource_get);
1265
1266 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1267 {
1268         LASSERT(res != NULL);
1269         LASSERT(res != LP_POISON);
1270         atomic_inc(&res->lr_refcount);
1271         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1272                atomic_read(&res->lr_refcount));
1273         return res;
1274 }
1275
1276 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1277                                          struct ldlm_resource *res)
1278 {
1279         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1280
1281         if (!list_empty(&res->lr_granted)) {
1282                 ldlm_resource_dump(D_ERROR, res);
1283                 LBUG();
1284         }
1285
1286         if (!list_empty(&res->lr_converting)) {
1287                 ldlm_resource_dump(D_ERROR, res);
1288                 LBUG();
1289         }
1290
1291         if (!list_empty(&res->lr_waiting)) {
1292                 ldlm_resource_dump(D_ERROR, res);
1293                 LBUG();
1294         }
1295
1296         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1297                                bd, &res->lr_hash);
1298         lu_ref_fini(&res->lr_reference);
1299         if (cfs_hash_bd_count_get(bd) == 0)
1300                 ldlm_namespace_put(nsb->nsb_namespace);
1301 }
1302
1303 /* Returns 1 if the resource was freed, 0 if it remains. */
1304 int ldlm_resource_putref(struct ldlm_resource *res)
1305 {
1306         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1307         struct cfs_hash_bd   bd;
1308
1309         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1310         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1311                res, atomic_read(&res->lr_refcount) - 1);
1312
1313         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1314         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1315                 __ldlm_resource_putref_final(&bd, res);
1316                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1317                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1318                         ns->ns_lvbo->lvbo_free(res);
1319                 if (res->lr_itree != NULL)
1320                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1321                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1322                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1323                 return 1;
1324         }
1325         return 0;
1326 }
1327 EXPORT_SYMBOL(ldlm_resource_putref);
1328
1329 /* Returns 1 if the resource was freed, 0 if it remains. */
1330 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1331 {
1332         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1333
1334         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1335         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1336                res, atomic_read(&res->lr_refcount) - 1);
1337
1338         if (atomic_dec_and_test(&res->lr_refcount)) {
1339                 struct cfs_hash_bd bd;
1340
1341                 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1342                                 &res->lr_name, &bd);
1343                 __ldlm_resource_putref_final(&bd, res);
1344                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1345                 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1346                  * so we should never be here while calling cfs_hash_del,
1347                  * cfs_hash_for_each_nolock is the only case we can get
1348                  * here, which is safe to release cfs_hash_bd_lock.
1349                  */
1350                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1351                         ns->ns_lvbo->lvbo_free(res);
1352                 if (res->lr_itree != NULL)
1353                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1354                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1355                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1356
1357                 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1358                 return 1;
1359         }
1360         return 0;
1361 }
1362
1363 /**
1364  * Add a lock into a given resource into specified lock list.
1365  */
1366 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1367                             struct ldlm_lock *lock)
1368 {
1369         check_res_locked(res);
1370
1371         LDLM_DEBUG(lock, "About to add this lock:\n");
1372
1373         if (ldlm_is_destroyed(lock)) {
1374                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1375                 return;
1376         }
1377
1378         LASSERT(list_empty(&lock->l_res_link));
1379
1380         list_add_tail(&lock->l_res_link, head);
1381 }
1382
1383 /**
1384  * Insert a lock into resource after specified lock.
1385  *
1386  * Obtain resource description from the lock we are inserting after.
1387  */
1388 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1389                                      struct ldlm_lock *new)
1390 {
1391         struct ldlm_resource *res = original->l_resource;
1392
1393         check_res_locked(res);
1394
1395         ldlm_resource_dump(D_INFO, res);
1396         LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1397
1398         if (ldlm_is_destroyed(new)) {
1399                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1400                 goto out;
1401         }
1402
1403         LASSERT(list_empty(&new->l_res_link));
1404
1405         list_add(&new->l_res_link, &original->l_res_link);
1406  out:;
1407 }
1408
1409 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1410 {
1411         int type = lock->l_resource->lr_type;
1412
1413         check_res_locked(lock->l_resource);
1414         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1415                 ldlm_unlink_lock_skiplist(lock);
1416         else if (type == LDLM_EXTENT)
1417                 ldlm_extent_unlink_lock(lock);
1418         list_del_init(&lock->l_res_link);
1419 }
1420 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1421
1422 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1423 {
1424         desc->lr_type = res->lr_type;
1425         desc->lr_name = res->lr_name;
1426 }
1427
1428 /**
1429  * Print information about all locks in all namespaces on this node to debug
1430  * log.
1431  */
1432 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1433 {
1434         struct list_head *tmp;
1435
1436         if (!((libcfs_debug | D_ERROR) & level))
1437                 return;
1438
1439         mutex_lock(ldlm_namespace_lock(client));
1440
1441         list_for_each(tmp, ldlm_namespace_list(client)) {
1442                 struct ldlm_namespace *ns;
1443
1444                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1445                 ldlm_namespace_dump(level, ns);
1446         }
1447
1448         mutex_unlock(ldlm_namespace_lock(client));
1449 }
1450
1451 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1452                               struct hlist_node *hnode, void *arg)
1453 {
1454         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1455         int    level = (int)(unsigned long)arg;
1456
1457         lock_res(res);
1458         ldlm_resource_dump(level, res);
1459         unlock_res(res);
1460
1461         return 0;
1462 }
1463
1464 /**
1465  * Print information about all locks in this namespace on this node to debug
1466  * log.
1467  */
1468 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1469 {
1470         if (!((libcfs_debug | D_ERROR) & level))
1471                 return;
1472
1473         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1474                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1475                ns_is_client(ns) ? "client" : "server");
1476
1477         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1478                 return;
1479
1480         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1481                                  ldlm_res_hash_dump,
1482                                  (void *)(unsigned long)level, 0);
1483         spin_lock(&ns->ns_lock);
1484         ns->ns_next_dump = cfs_time_shift(10);
1485         spin_unlock(&ns->ns_lock);
1486 }
1487
1488 /**
1489  * Print information about all locks in this resource to debug log.
1490  */
1491 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1492 {
1493         struct ldlm_lock *lock;
1494         unsigned int granted = 0;
1495
1496         CLASSERT(RES_NAME_SIZE == 4);
1497
1498         if (!((libcfs_debug | D_ERROR) & level))
1499                 return;
1500
1501         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1502                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1503
1504         if (!list_empty(&res->lr_granted)) {
1505                 CDEBUG(level, "Granted locks (in reverse order):\n");
1506                 list_for_each_entry_reverse(lock, &res->lr_granted,
1507                                                 l_res_link) {
1508                         LDLM_DEBUG_LIMIT(level, lock, "###");
1509                         if (!(level & D_CANTMASK) &&
1510                             ++granted > ldlm_dump_granted_max) {
1511                                 CDEBUG(level, "only dump %d granted locks to "
1512                                        "avoid DDOS.\n", granted);
1513                                 break;
1514                         }
1515                 }
1516         }
1517         if (!list_empty(&res->lr_converting)) {
1518                 CDEBUG(level, "Converting locks:\n");
1519                 list_for_each_entry(lock, &res->lr_converting, l_res_link)
1520                         LDLM_DEBUG_LIMIT(level, lock, "###");
1521         }
1522         if (!list_empty(&res->lr_waiting)) {
1523                 CDEBUG(level, "Waiting locks:\n");
1524                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1525                         LDLM_DEBUG_LIMIT(level, lock, "###");
1526         }
1527 }
1528 EXPORT_SYMBOL(ldlm_resource_dump);