Whamcloud - gitweb
LU-7521 ldlm: LDLM_DEBUG() shouldn't be passed \n
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include <lustre_dlm.h>
44 #include <lustre_fid.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49 struct kmem_cache *ldlm_interval_tree_slab;
50
51 int ldlm_srv_namespace_nr = 0;
52 int ldlm_cli_namespace_nr = 0;
53
54 struct mutex ldlm_srv_namespace_lock;
55 struct list_head ldlm_srv_namespace_list;
56
57 struct mutex ldlm_cli_namespace_lock;
58 /* Client Namespaces that have active resources in them.
59  * Once all resources go away, ldlm_poold moves such namespaces to the
60  * inactive list */
61 struct list_head ldlm_cli_active_namespace_list;
62 /* Client namespaces that don't have any locks in them */
63 struct list_head ldlm_cli_inactive_namespace_list;
64
65 static struct proc_dir_entry *ldlm_type_proc_dir;
66 static struct proc_dir_entry *ldlm_ns_proc_dir;
67 struct proc_dir_entry *ldlm_svc_proc_dir;
68
69 /* during debug dump certain amount of granted locks for one resource to avoid
70  * DDOS. */
71 static unsigned int ldlm_dump_granted_max = 256;
72
73 #ifdef CONFIG_PROC_FS
74 static ssize_t
75 lprocfs_dump_ns_seq_write(struct file *file, const char __user *buffer,
76                           size_t count, loff_t *off)
77 {
78         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
80         RETURN(count);
81 }
82 LPROC_SEQ_FOPS_WO_TYPE(ldlm, dump_ns);
83
84 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
85 LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
86
87 #ifdef HAVE_SERVER_SUPPORT
88
89 static int seq_watermark_show(struct seq_file *m, void *data)
90 {
91         seq_printf(m, LPU64"\n", *(__u64 *)m->private);
92         return 0;
93 }
94
95 static ssize_t seq_watermark_write(struct file *file,
96                                    const char __user *buffer, size_t count,
97                                    loff_t *off)
98 {
99         __u64 watermark;
100         __u64 *data = ((struct seq_file *)file->private_data)->private;
101         bool wm_low = (data == &ldlm_reclaim_threshold_mb) ? true : false;
102         int rc;
103
104         rc = lprocfs_write_frac_u64_helper(buffer, count, &watermark, 1 << 20);
105         if (rc) {
106                 CERROR("Failed to set %s, rc = %d.\n",
107                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb",
108                        rc);
109                 return rc;
110         } else if (watermark != 0 && watermark < (1 << 20)) {
111                 CERROR("%s should be greater than 1MB.\n",
112                        wm_low ? "lock_reclaim_threshold_mb" : "lock_limit_mb");
113                 return -EINVAL;
114         }
115         watermark >>= 20;
116
117         if (wm_low) {
118                 if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
119                         CERROR("lock_reclaim_threshold_mb must be smaller than "
120                                "lock_limit_mb.\n");
121                         return -EINVAL;
122                 }
123
124                 *data = watermark;
125                 if (watermark != 0) {
126                         watermark <<= 20;
127                         do_div(watermark, sizeof(struct ldlm_lock));
128                 }
129                 ldlm_reclaim_threshold = watermark;
130         } else {
131                 if (ldlm_reclaim_threshold_mb != 0 &&
132                     watermark < ldlm_reclaim_threshold_mb) {
133                         CERROR("lock_limit_mb must be greater than "
134                                "lock_reclaim_threshold_mb.\n");
135                         return -EINVAL;
136                 }
137
138                 *data = watermark;
139                 if (watermark != 0) {
140                         watermark <<= 20;
141                         do_div(watermark, sizeof(struct ldlm_lock));
142                 }
143                 ldlm_lock_limit = watermark;
144         }
145
146         return count;
147 }
148
149 static int seq_watermark_open(struct inode *inode, struct file *file)
150 {
151         return single_open(file, seq_watermark_show, PDE_DATA(inode));
152 }
153
154 static const struct file_operations ldlm_watermark_fops = {
155         .owner          = THIS_MODULE,
156         .open           = seq_watermark_open,
157         .read           = seq_read,
158         .write          = seq_watermark_write,
159         .llseek         = seq_lseek,
160         .release        = lprocfs_single_release,
161 };
162
163 static int seq_granted_show(struct seq_file *m, void *data)
164 {
165         seq_printf(m, LPU64"\n", percpu_counter_sum_positive(
166                    (struct percpu_counter *)m->private));
167         return 0;
168 }
169
170 static int seq_granted_open(struct inode *inode, struct file *file)
171 {
172         return single_open(file, seq_granted_show, PDE_DATA(inode));
173 }
174
175 static const struct file_operations ldlm_granted_fops = {
176         .owner  = THIS_MODULE,
177         .open   = seq_granted_open,
178         .read   = seq_read,
179         .llseek = seq_lseek,
180         .release = seq_release,
181 };
182
183 #endif /* HAVE_SERVER_SUPPORT */
184
185 int ldlm_proc_setup(void)
186 {
187         int rc;
188         struct lprocfs_vars list[] = {
189                 { .name =       "dump_namespaces",
190                   .fops =       &ldlm_dump_ns_fops,
191                   .proc_mode =  0222 },
192                 { .name =       "dump_granted_max",
193                   .fops =       &ldlm_rw_uint_fops,
194                   .data =       &ldlm_dump_granted_max },
195                 { .name =       "cancel_unused_locks_before_replay",
196                   .fops =       &ldlm_rw_uint_fops,
197                   .data =       &ldlm_cancel_unused_locks_before_replay },
198 #ifdef HAVE_SERVER_SUPPORT
199                 { .name =       "lock_reclaim_threshold_mb",
200                   .fops =       &ldlm_watermark_fops,
201                   .data =       &ldlm_reclaim_threshold_mb },
202                 { .name =       "lock_limit_mb",
203                   .fops =       &ldlm_watermark_fops,
204                   .data =       &ldlm_lock_limit_mb },
205                 { .name =       "lock_granted_count",
206                   .fops =       &ldlm_granted_fops,
207                   .data =       &ldlm_granted_total },
208 #endif
209                 { NULL }};
210         ENTRY;
211         LASSERT(ldlm_ns_proc_dir == NULL);
212
213         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
214                                               proc_lustre_root,
215                                               NULL, NULL);
216         if (IS_ERR(ldlm_type_proc_dir)) {
217                 CERROR("LProcFS failed in ldlm-init\n");
218                 rc = PTR_ERR(ldlm_type_proc_dir);
219                 GOTO(err, rc);
220         }
221
222         ldlm_ns_proc_dir = lprocfs_register("namespaces",
223                                             ldlm_type_proc_dir,
224                                             NULL, NULL);
225         if (IS_ERR(ldlm_ns_proc_dir)) {
226                 CERROR("LProcFS failed in ldlm-init\n");
227                 rc = PTR_ERR(ldlm_ns_proc_dir);
228                 GOTO(err_type, rc);
229         }
230
231         ldlm_svc_proc_dir = lprocfs_register("services",
232                                              ldlm_type_proc_dir,
233                                              NULL, NULL);
234         if (IS_ERR(ldlm_svc_proc_dir)) {
235                 CERROR("LProcFS failed in ldlm-init\n");
236                 rc = PTR_ERR(ldlm_svc_proc_dir);
237                 GOTO(err_ns, rc);
238         }
239
240         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
241         if (rc != 0) {
242                 CERROR("LProcFS failed in ldlm-init\n");
243                 GOTO(err_svc, rc);
244         }
245
246         RETURN(0);
247
248 err_svc:
249         lprocfs_remove(&ldlm_svc_proc_dir);
250 err_ns:
251         lprocfs_remove(&ldlm_ns_proc_dir);
252 err_type:
253         lprocfs_remove(&ldlm_type_proc_dir);
254 err:
255         ldlm_svc_proc_dir = NULL;
256         RETURN(rc);
257 }
258
259 void ldlm_proc_cleanup(void)
260 {
261         if (ldlm_svc_proc_dir)
262                 lprocfs_remove(&ldlm_svc_proc_dir);
263
264         if (ldlm_ns_proc_dir)
265                 lprocfs_remove(&ldlm_ns_proc_dir);
266
267         if (ldlm_type_proc_dir)
268                 lprocfs_remove(&ldlm_type_proc_dir);
269 }
270
271 static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
272 {
273         struct ldlm_namespace   *ns  = m->private;
274         __u64                   res = 0;
275         struct cfs_hash_bd              bd;
276         int                     i;
277
278         /* result is not strictly consistant */
279         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
280                 res += cfs_hash_bd_count_get(&bd);
281         return lprocfs_u64_seq_show(m, &res);
282 }
283 LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
284
285 static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
286 {
287         struct ldlm_namespace   *ns = m->private;
288         __u64                   locks;
289
290         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
291                                         LPROCFS_FIELDS_FLAGS_SUM);
292         return lprocfs_u64_seq_show(m, &locks);
293 }
294 LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
295
296 static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
297 {
298         struct ldlm_namespace *ns = m->private;
299         __u32 *nr = &ns->ns_max_unused;
300
301         if (ns_connect_lru_resize(ns))
302                 nr = &ns->ns_nr_unused;
303         return lprocfs_uint_seq_show(m, nr);
304 }
305
306 static ssize_t lprocfs_lru_size_seq_write(struct file *file,
307                                           const char __user *buffer,
308                                           size_t count, loff_t *off)
309 {
310         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
311         char dummy[MAX_STRING_SIZE + 1], *end;
312         unsigned long tmp;
313         int lru_resize;
314
315         dummy[MAX_STRING_SIZE] = '\0';
316         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
317                 return -EFAULT;
318
319         if (strncmp(dummy, "clear", 5) == 0) {
320                 CDEBUG(D_DLMTRACE,
321                        "dropping all unused locks from namespace %s\n",
322                        ldlm_ns_name(ns));
323                 if (ns_connect_lru_resize(ns)) {
324                         int canceled, unused  = ns->ns_nr_unused;
325
326                         /* Try to cancel all @ns_nr_unused locks. */
327                         canceled = ldlm_cancel_lru(ns, unused, 0,
328                                                    LDLM_LRU_FLAG_PASSED);
329                         if (canceled < unused) {
330                                 CDEBUG(D_DLMTRACE,
331                                        "not all requested locks are canceled, "
332                                        "requested: %d, canceled: %d\n", unused,
333                                        canceled);
334                                 return -EINVAL;
335                         }
336                 } else {
337                         tmp = ns->ns_max_unused;
338                         ns->ns_max_unused = 0;
339                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
340                         ns->ns_max_unused = tmp;
341                 }
342                 return count;
343         }
344
345         tmp = simple_strtoul(dummy, &end, 0);
346         if (dummy == end) {
347                 CERROR("invalid value written\n");
348                 return -EINVAL;
349         }
350         lru_resize = (tmp == 0);
351
352         if (ns_connect_lru_resize(ns)) {
353                 if (!lru_resize)
354                         ns->ns_max_unused = tmp;
355
356                 if (tmp > ns->ns_nr_unused)
357                         tmp = ns->ns_nr_unused;
358                 tmp = ns->ns_nr_unused - tmp;
359
360                 CDEBUG(D_DLMTRACE,
361                        "changing namespace %s unused locks from %u to %u\n",
362                        ldlm_ns_name(ns), ns->ns_nr_unused,
363                        (unsigned int)tmp);
364                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
365
366                 if (!lru_resize) {
367                         CDEBUG(D_DLMTRACE,
368                                "disable lru_resize for namespace %s\n",
369                                ldlm_ns_name(ns));
370                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
371                 }
372         } else {
373                 CDEBUG(D_DLMTRACE,
374                        "changing namespace %s max_unused from %u to %u\n",
375                        ldlm_ns_name(ns), ns->ns_max_unused,
376                        (unsigned int)tmp);
377                 ns->ns_max_unused = (unsigned int)tmp;
378                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
379
380                 /* Make sure that LRU resize was originally supported before
381                  * turning it on here. */
382                 if (lru_resize &&
383                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
384                         CDEBUG(D_DLMTRACE,
385                                "enable lru_resize for namespace %s\n",
386                                ldlm_ns_name(ns));
387                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
388                 }
389         }
390
391         return count;
392 }
393 LPROC_SEQ_FOPS(lprocfs_lru_size);
394
395 static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
396 {
397         struct ldlm_namespace *ns = m->private;
398         unsigned int supp = ns_connect_cancelset(ns);
399
400         return lprocfs_uint_seq_show(m, &supp);
401 }
402
403 static ssize_t lprocfs_elc_seq_write(struct file *file,
404                                      const char __user *buffer,
405                                      size_t count, loff_t *off)
406 {
407         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
408         unsigned int supp = -1;
409         int rc;
410
411         rc = lprocfs_wr_uint(file, buffer, count, &supp);
412         if (rc < 0)
413                 return rc;
414
415         if (supp == 0)
416                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
417         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
418                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
419         return count;
420 }
421 LPROC_SEQ_FOPS(lprocfs_elc);
422
423 static void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
424 {
425         if (ns->ns_proc_dir_entry == NULL)
426                 CERROR("dlm namespace %s has no procfs dir?\n",
427                        ldlm_ns_name(ns));
428         else
429                 lprocfs_remove(&ns->ns_proc_dir_entry);
430
431         if (ns->ns_stats != NULL)
432                 lprocfs_free_stats(&ns->ns_stats);
433 }
434
435 static int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
436 {
437         struct lprocfs_vars lock_vars[2];
438         char lock_name[MAX_STRING_SIZE + 1];
439         struct proc_dir_entry *ns_pde;
440
441         LASSERT(ns != NULL);
442         LASSERT(ns->ns_rs_hash != NULL);
443
444         if (ns->ns_proc_dir_entry != NULL) {
445                 ns_pde = ns->ns_proc_dir_entry;
446         } else {
447                 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
448                 if (ns_pde == NULL)
449                         return -ENOMEM;
450                 ns->ns_proc_dir_entry = ns_pde;
451         }
452
453         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
454         if (ns->ns_stats == NULL)
455                 return -ENOMEM;
456
457         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
458                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
459
460         lock_name[MAX_STRING_SIZE] = '\0';
461
462         memset(lock_vars, 0, sizeof(lock_vars));
463         lock_vars[0].name = lock_name;
464
465         ldlm_add_var(&lock_vars[0], ns_pde, "resource_count", ns,
466                      &lprocfs_ns_resources_fops);
467         ldlm_add_var(&lock_vars[0], ns_pde, "lock_count", ns,
468                      &lprocfs_ns_locks_fops);
469
470         if (ns_is_client(ns)) {
471                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_unused_count",
472                              &ns->ns_nr_unused, &ldlm_uint_fops);
473                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_size", ns,
474                              &lprocfs_lru_size_fops);
475                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_max_age",
476                              &ns->ns_max_age, &ldlm_rw_uint_fops);
477                 ldlm_add_var(&lock_vars[0], ns_pde, "early_lock_cancel",
478                              ns, &lprocfs_elc_fops);
479         } else {
480                 ldlm_add_var(&lock_vars[0], ns_pde, "ctime_age_limit",
481                              &ns->ns_ctime_age_limit, &ldlm_rw_uint_fops);
482                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_timeouts",
483                              &ns->ns_timeouts, &ldlm_uint_fops);
484                 ldlm_add_var(&lock_vars[0], ns_pde, "max_nolock_bytes",
485                              &ns->ns_max_nolock_size, &ldlm_rw_uint_fops);
486                 ldlm_add_var(&lock_vars[0], ns_pde, "contention_seconds",
487                              &ns->ns_contention_time, &ldlm_rw_uint_fops);
488                 ldlm_add_var(&lock_vars[0], ns_pde, "contended_locks",
489                              &ns->ns_contended_locks, &ldlm_rw_uint_fops);
490                 ldlm_add_var(&lock_vars[0], ns_pde, "max_parallel_ast",
491                              &ns->ns_max_parallel_ast, &ldlm_rw_uint_fops);
492         }
493         return 0;
494 }
495 #undef MAX_STRING_SIZE
496 #else /* CONFIG_PROC_FS */
497
498 #define ldlm_namespace_proc_unregister(ns)      ({;})
499 #define ldlm_namespace_proc_register(ns)        ({0;})
500
501 #endif /* CONFIG_PROC_FS */
502
503 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
504                                   const void *key, unsigned mask)
505 {
506         const struct ldlm_res_id     *id  = key;
507         unsigned                val = 0;
508         unsigned                i;
509
510         for (i = 0; i < RES_NAME_SIZE; i++)
511                 val += id->name[i];
512         return val & mask;
513 }
514
515 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
516                                       const void *key, unsigned mask)
517 {
518         const struct ldlm_res_id *id = key;
519         struct lu_fid       fid;
520         __u32               hash;
521         __u32               val;
522
523         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
524         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
525         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
526
527         hash = fid_flatten32(&fid);
528         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
529         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
530                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
531                 hash += (val >> 5) + (val << 11);
532         } else {
533                 val = fid_oid(&fid);
534         }
535         hash = hash_long(hash, hs->hs_bkt_bits);
536         /* give me another random factor */
537         hash -= hash_long((unsigned long)hs, val % 11 + 3);
538
539         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
540         hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
541
542         return hash & mask;
543 }
544
545 static void *ldlm_res_hop_key(struct hlist_node *hnode)
546 {
547         struct ldlm_resource   *res;
548
549         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
550         return &res->lr_name;
551 }
552
553 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
554 {
555         struct ldlm_resource   *res;
556
557         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
558         return ldlm_res_eq((const struct ldlm_res_id *)key,
559                            (const struct ldlm_res_id *)&res->lr_name);
560 }
561
562 static void *ldlm_res_hop_object(struct hlist_node *hnode)
563 {
564         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
565 }
566
567 static void
568 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
569 {
570         struct ldlm_resource *res;
571
572         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
573         ldlm_resource_getref(res);
574 }
575
576 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
577 {
578         struct ldlm_resource *res;
579
580         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
581         ldlm_resource_putref(res);
582 }
583
584 static struct cfs_hash_ops ldlm_ns_hash_ops = {
585         .hs_hash        = ldlm_res_hop_hash,
586         .hs_key         = ldlm_res_hop_key,
587         .hs_keycmp      = ldlm_res_hop_keycmp,
588         .hs_keycpy      = NULL,
589         .hs_object      = ldlm_res_hop_object,
590         .hs_get         = ldlm_res_hop_get_locked,
591         .hs_put         = ldlm_res_hop_put
592 };
593
594 static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
595         .hs_hash        = ldlm_res_hop_fid_hash,
596         .hs_key         = ldlm_res_hop_key,
597         .hs_keycmp      = ldlm_res_hop_keycmp,
598         .hs_keycpy      = NULL,
599         .hs_object      = ldlm_res_hop_object,
600         .hs_get         = ldlm_res_hop_get_locked,
601         .hs_put         = ldlm_res_hop_put
602 };
603
604 typedef struct ldlm_ns_hash_def {
605         enum ldlm_ns_type       nsd_type;
606         /** hash bucket bits */
607         unsigned                nsd_bkt_bits;
608         /** hash bits */
609         unsigned                nsd_all_bits;
610         /** hash operations */
611         struct cfs_hash_ops *nsd_hops;
612 } ldlm_ns_hash_def_t;
613
614 static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] =
615 {
616         {
617                 .nsd_type       = LDLM_NS_TYPE_MDC,
618                 .nsd_bkt_bits   = 11,
619                 .nsd_all_bits   = 16,
620                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
621         },
622         {
623                 .nsd_type       = LDLM_NS_TYPE_MDT,
624                 .nsd_bkt_bits   = 14,
625                 .nsd_all_bits   = 21,
626                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
627         },
628         {
629                 .nsd_type       = LDLM_NS_TYPE_OSC,
630                 .nsd_bkt_bits   = 8,
631                 .nsd_all_bits   = 12,
632                 .nsd_hops       = &ldlm_ns_hash_ops,
633         },
634         {
635                 .nsd_type       = LDLM_NS_TYPE_OST,
636                 .nsd_bkt_bits   = 11,
637                 .nsd_all_bits   = 17,
638                 .nsd_hops       = &ldlm_ns_hash_ops,
639         },
640         {
641                 .nsd_type       = LDLM_NS_TYPE_MGC,
642                 .nsd_bkt_bits   = 4,
643                 .nsd_all_bits   = 4,
644                 .nsd_hops       = &ldlm_ns_hash_ops,
645         },
646         {
647                 .nsd_type       = LDLM_NS_TYPE_MGT,
648                 .nsd_bkt_bits   = 4,
649                 .nsd_all_bits   = 4,
650                 .nsd_hops       = &ldlm_ns_hash_ops,
651         },
652         {
653                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
654         },
655 };
656
657 /**
658  * Create and initialize new empty namespace.
659  */
660 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
661                                           enum ldlm_side client,
662                                           enum ldlm_appetite apt,
663                                           enum ldlm_ns_type ns_type)
664 {
665         struct ldlm_namespace *ns = NULL;
666         struct ldlm_ns_bucket *nsb;
667         struct ldlm_ns_hash_def *nsd;
668         struct cfs_hash_bd bd;
669         int idx;
670         int rc;
671         ENTRY;
672
673         LASSERT(obd != NULL);
674
675         rc = ldlm_get_ref();
676         if (rc) {
677                 CERROR("ldlm_get_ref failed: %d\n", rc);
678                 RETURN(NULL);
679         }
680
681         for (idx = 0;;idx++) {
682                 nsd = &ldlm_ns_hash_defs[idx];
683                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
684                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
685                         GOTO(out_ref, NULL);
686                 }
687
688                 if (nsd->nsd_type == ns_type)
689                         break;
690         }
691
692         OBD_ALLOC_PTR(ns);
693         if (!ns)
694                 GOTO(out_ref, NULL);
695
696         ns->ns_rs_hash = cfs_hash_create(name,
697                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
698                                          nsd->nsd_bkt_bits, sizeof(*nsb),
699                                          CFS_HASH_MIN_THETA,
700                                          CFS_HASH_MAX_THETA,
701                                          nsd->nsd_hops,
702                                          CFS_HASH_DEPTH |
703                                          CFS_HASH_BIGNAME |
704                                          CFS_HASH_SPIN_BKTLOCK |
705                                          CFS_HASH_NO_ITEMREF);
706         if (ns->ns_rs_hash == NULL)
707                 GOTO(out_ns, NULL);
708
709         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
710                 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
711                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
712                 nsb->nsb_namespace = ns;
713                 nsb->nsb_reclaim_start = 0;
714         }
715
716         ns->ns_obd      = obd;
717         ns->ns_appetite = apt;
718         ns->ns_client   = client;
719
720         INIT_LIST_HEAD(&ns->ns_list_chain);
721         INIT_LIST_HEAD(&ns->ns_unused_list);
722         spin_lock_init(&ns->ns_lock);
723         atomic_set(&ns->ns_bref, 0);
724         init_waitqueue_head(&ns->ns_waitq);
725
726         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
727         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
728         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
729
730         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
731         ns->ns_nr_unused          = 0;
732         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
733         ns->ns_max_age            = LDLM_DEFAULT_MAX_ALIVE;
734         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
735         ns->ns_timeouts           = 0;
736         ns->ns_orig_connect_flags = 0;
737         ns->ns_connect_flags      = 0;
738         ns->ns_stopping           = 0;
739         ns->ns_reclaim_start      = 0;
740         rc = ldlm_namespace_proc_register(ns);
741         if (rc != 0) {
742                 CERROR("Can't initialize ns proc, rc %d\n", rc);
743                 GOTO(out_hash, rc);
744         }
745
746         idx = ldlm_namespace_nr_read(client);
747         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
748         if (rc) {
749                 CERROR("Can't initialize lock pool, rc %d\n", rc);
750                 GOTO(out_proc, rc);
751         }
752
753         ldlm_namespace_register(ns, client);
754         RETURN(ns);
755 out_proc:
756         ldlm_namespace_proc_unregister(ns);
757         ldlm_namespace_cleanup(ns, 0);
758 out_hash:
759         cfs_hash_putref(ns->ns_rs_hash);
760 out_ns:
761         OBD_FREE_PTR(ns);
762 out_ref:
763         ldlm_put_ref();
764         RETURN(NULL);
765 }
766 EXPORT_SYMBOL(ldlm_namespace_new);
767
768 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
769
770 /**
771  * Cancel and destroy all locks on a resource.
772  *
773  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
774  * clean up.  This is currently only used for recovery, and we make
775  * certain assumptions as a result--notably, that we shouldn't cancel
776  * locks with refs.
777  */
778 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
779                              __u64 flags)
780 {
781         struct list_head *tmp;
782         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
783         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
784
785         do {
786                 struct ldlm_lock *lock = NULL;
787
788                 /* First, we look for non-cleaned-yet lock
789                  * all cleaned locks are marked by CLEANED flag. */
790                 lock_res(res);
791                 list_for_each(tmp, q) {
792                         lock = list_entry(tmp, struct ldlm_lock,
793                                           l_res_link);
794                         if (ldlm_is_cleaned(lock)) {
795                                 lock = NULL;
796                                 continue;
797                         }
798                         LDLM_LOCK_GET(lock);
799                         ldlm_set_cleaned(lock);
800                         break;
801                 }
802
803                 if (lock == NULL) {
804                         unlock_res(res);
805                         break;
806                 }
807
808                 /* Set CBPENDING so nothing in the cancellation path
809                  * can match this lock. */
810                 ldlm_set_cbpending(lock);
811                 ldlm_set_failed(lock);
812                 lock->l_flags |= flags;
813
814                 /* ... without sending a CANCEL message for local_only. */
815                 if (local_only)
816                         ldlm_set_local_only(lock);
817
818                 if (local_only && (lock->l_readers || lock->l_writers)) {
819                         /* This is a little bit gross, but much better than the
820                          * alternative: pretend that we got a blocking AST from
821                          * the server, so that when the lock is decref'd, it
822                          * will go away ... */
823                         unlock_res(res);
824                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
825                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
826                                 set_current_state(TASK_UNINTERRUPTIBLE);
827                                 schedule_timeout(cfs_time_seconds(4));
828                                 set_current_state(TASK_RUNNING);
829                         }
830                         if (lock->l_completion_ast)
831                                 lock->l_completion_ast(lock,
832                                                        LDLM_FL_FAILED, NULL);
833                         LDLM_LOCK_RELEASE(lock);
834                         continue;
835                 }
836
837                 if (client) {
838                         struct lustre_handle lockh;
839
840                         unlock_res(res);
841                         ldlm_lock2handle(lock, &lockh);
842                         rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
843                         if (rc)
844                                 CERROR("ldlm_cli_cancel: %d\n", rc);
845                 } else {
846                         unlock_res(res);
847                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
848                                    "client node");
849                         ldlm_lock_cancel(lock);
850                 }
851                 LDLM_LOCK_RELEASE(lock);
852         } while (1);
853 }
854
855 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
856                                struct hlist_node *hnode, void *arg)
857 {
858         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
859         __u64 flags = *(__u64 *)arg;
860
861         cleanup_resource(res, &res->lr_granted, flags);
862         cleanup_resource(res, &res->lr_converting, flags);
863         cleanup_resource(res, &res->lr_waiting, flags);
864
865         return 0;
866 }
867
868 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
869                                   struct hlist_node *hnode, void *arg)
870 {
871         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
872
873         lock_res(res);
874         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
875                "(%d) after lock cleanup; forcing cleanup.\n",
876                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
877                atomic_read(&res->lr_refcount) - 1);
878
879         ldlm_resource_dump(D_ERROR, res);
880         unlock_res(res);
881         return 0;
882 }
883
884 /**
885  * Cancel and destroy all locks in the namespace.
886  *
887  * Typically used during evictions when server notified client that it was
888  * evicted and all of its state needs to be destroyed.
889  * Also used during shutdown.
890  */
891 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
892 {
893         if (ns == NULL) {
894                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
895                 return ELDLM_OK;
896         }
897
898         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
899                                  &flags, 0);
900         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
901                                  NULL, 0);
902         return ELDLM_OK;
903 }
904 EXPORT_SYMBOL(ldlm_namespace_cleanup);
905
906 /**
907  * Attempts to free namespace.
908  *
909  * Only used when namespace goes away, like during an unmount.
910  */
911 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
912 {
913         ENTRY;
914
915         /* At shutdown time, don't call the cancellation callback */
916         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
917
918         if (atomic_read(&ns->ns_bref) > 0) {
919                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
920                 int rc;
921                 CDEBUG(D_DLMTRACE,
922                        "dlm namespace %s free waiting on refcount %d\n",
923                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
924 force_wait:
925                 if (force)
926                         lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
927                                           MSEC_PER_SEC) / 4, NULL, NULL);
928
929                 rc = l_wait_event(ns->ns_waitq,
930                                   atomic_read(&ns->ns_bref) == 0, &lwi);
931
932                 /* Forced cleanups should be able to reclaim all references,
933                  * so it's safe to wait forever... we can't leak locks... */
934                 if (force && rc == -ETIMEDOUT) {
935                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
936                                        "namespace with %d resources in use, "
937                                        "(rc=%d)\n", ldlm_ns_name(ns),
938                                        atomic_read(&ns->ns_bref), rc);
939                         GOTO(force_wait, rc);
940                 }
941
942                 if (atomic_read(&ns->ns_bref)) {
943                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
944                                        "with %d resources in use, (rc=%d)\n",
945                                        ldlm_ns_name(ns),
946                                        atomic_read(&ns->ns_bref), rc);
947                         RETURN(ELDLM_NAMESPACE_EXISTS);
948                 }
949                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
950                        ldlm_ns_name(ns));
951         }
952
953         RETURN(ELDLM_OK);
954 }
955
956 /**
957  * Performs various cleanups for passed \a ns to make it drop refc and be
958  * ready for freeing. Waits for refc == 0.
959  *
960  * The following is done:
961  * (0) Unregister \a ns from its list to make inaccessible for potential
962  * users like pools thread and others;
963  * (1) Clear all locks in \a ns.
964  */
965 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
966                                struct obd_import *imp,
967                                int force)
968 {
969         int rc;
970         ENTRY;
971         if (!ns) {
972                 EXIT;
973                 return;
974         }
975
976         spin_lock(&ns->ns_lock);
977         ns->ns_stopping = 1;
978         spin_unlock(&ns->ns_lock);
979
980         /*
981          * Can fail with -EINTR when force == 0 in which case try harder.
982          */
983         rc = __ldlm_namespace_free(ns, force);
984         if (rc != ELDLM_OK) {
985                 if (imp) {
986                         ptlrpc_disconnect_import(imp, 0);
987                         ptlrpc_invalidate_import(imp);
988                 }
989
990                 /*
991                  * With all requests dropped and the import inactive
992                  * we are gaurenteed all reference will be dropped.
993                  */
994                 rc = __ldlm_namespace_free(ns, 1);
995                 LASSERT(rc == 0);
996         }
997         EXIT;
998 }
999 EXPORT_SYMBOL(ldlm_namespace_free_prior);
1000
1001 /**
1002  * Performs freeing memory structures related to \a ns. This is only done
1003  * when ldlm_namespce_free_prior() successfully removed all resources
1004  * referencing \a ns and its refc == 0.
1005  */
1006 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
1007 {
1008         ENTRY;
1009         if (!ns) {
1010                 EXIT;
1011                 return;
1012         }
1013
1014         /* Make sure that nobody can find this ns in its list. */
1015         ldlm_namespace_unregister(ns, ns->ns_client);
1016         /* Fini pool _before_ parent proc dir is removed. This is important as
1017          * ldlm_pool_fini() removes own proc dir which is child to @dir.
1018          * Removing it after @dir may cause oops. */
1019         ldlm_pool_fini(&ns->ns_pool);
1020
1021         ldlm_namespace_proc_unregister(ns);
1022         cfs_hash_putref(ns->ns_rs_hash);
1023         /* Namespace \a ns should be not on list at this time, otherwise
1024          * this will cause issues related to using freed \a ns in poold
1025          * thread. */
1026         LASSERT(list_empty(&ns->ns_list_chain));
1027         OBD_FREE_PTR(ns);
1028         ldlm_put_ref();
1029         EXIT;
1030 }
1031 EXPORT_SYMBOL(ldlm_namespace_free_post);
1032
1033 /**
1034  * Cleanup the resource, and free namespace.
1035  * bug 12864:
1036  * Deadlock issue:
1037  * proc1: destroy import
1038  *        class_disconnect_export(grab cl_sem) ->
1039  *              -> ldlm_namespace_free ->
1040  *              -> lprocfs_remove(grab _lprocfs_lock).
1041  * proc2: read proc info
1042  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1043  *              -> osc_rd_active, etc(grab cl_sem).
1044  *
1045  * So that I have to split the ldlm_namespace_free into two parts - the first
1046  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1047  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1048  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1049  * held.
1050  */
1051 void ldlm_namespace_free(struct ldlm_namespace *ns,
1052                          struct obd_import *imp,
1053                          int force)
1054 {
1055         ldlm_namespace_free_prior(ns, imp, force);
1056         ldlm_namespace_free_post(ns);
1057 }
1058 EXPORT_SYMBOL(ldlm_namespace_free);
1059
1060 void ldlm_namespace_get(struct ldlm_namespace *ns)
1061 {
1062         atomic_inc(&ns->ns_bref);
1063 }
1064
1065 /* This is only for callers that care about refcount */
1066 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1067 {
1068         return atomic_inc_return(&ns->ns_bref);
1069 }
1070
1071 void ldlm_namespace_put(struct ldlm_namespace *ns)
1072 {
1073         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1074                 wake_up(&ns->ns_waitq);
1075                 spin_unlock(&ns->ns_lock);
1076         }
1077 }
1078
1079 /** Register \a ns in the list of namespaces */
1080 void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
1081 {
1082         mutex_lock(ldlm_namespace_lock(client));
1083         LASSERT(list_empty(&ns->ns_list_chain));
1084         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1085         ldlm_namespace_nr_inc(client);
1086         mutex_unlock(ldlm_namespace_lock(client));
1087 }
1088
1089 /** Unregister \a ns from the list of namespaces. */
1090 void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
1091 {
1092         mutex_lock(ldlm_namespace_lock(client));
1093         LASSERT(!list_empty(&ns->ns_list_chain));
1094         /* Some asserts and possibly other parts of the code are still
1095          * using list_empty(&ns->ns_list_chain). This is why it is
1096          * important to use list_del_init() here. */
1097         list_del_init(&ns->ns_list_chain);
1098         ldlm_namespace_nr_dec(client);
1099         mutex_unlock(ldlm_namespace_lock(client));
1100 }
1101
1102 /** Should be called with ldlm_namespace_lock(client) taken. */
1103 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1104                                           enum ldlm_side client)
1105 {
1106         LASSERT(!list_empty(&ns->ns_list_chain));
1107         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1108         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1109 }
1110
1111 /** Should be called with ldlm_namespace_lock(client) taken. */
1112 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1113                                             enum ldlm_side client)
1114 {
1115         LASSERT(!list_empty(&ns->ns_list_chain));
1116         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1117         list_move_tail(&ns->ns_list_chain,
1118                        ldlm_namespace_inactive_list(client));
1119 }
1120
1121 /** Should be called with ldlm_namespace_lock(client) taken. */
1122 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1123 {
1124         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1125         LASSERT(!list_empty(ldlm_namespace_list(client)));
1126         return container_of(ldlm_namespace_list(client)->next,
1127                             struct ldlm_namespace, ns_list_chain);
1128 }
1129
1130 /** Create and initialize new resource. */
1131 static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
1132 {
1133         struct ldlm_resource *res;
1134         int idx;
1135
1136         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1137         if (res == NULL)
1138                 return NULL;
1139
1140         if (ldlm_type == LDLM_EXTENT) {
1141                 OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1142                                sizeof(*res->lr_itree) * LCK_MODE_NUM);
1143                 if (res->lr_itree == NULL) {
1144                         OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1145                         return NULL;
1146                 }
1147                 /* Initialize interval trees for each lock mode. */
1148                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1149                         res->lr_itree[idx].lit_size = 0;
1150                         res->lr_itree[idx].lit_mode = 1 << idx;
1151                         res->lr_itree[idx].lit_root = NULL;
1152                 }
1153         }
1154
1155         INIT_LIST_HEAD(&res->lr_granted);
1156         INIT_LIST_HEAD(&res->lr_converting);
1157         INIT_LIST_HEAD(&res->lr_waiting);
1158
1159         atomic_set(&res->lr_refcount, 1);
1160         spin_lock_init(&res->lr_lock);
1161         lu_ref_init(&res->lr_reference);
1162
1163         /* Since LVB init can be delayed now, there is no longer need to
1164          * immediatelly acquire mutex here. */
1165         mutex_init(&res->lr_lvb_mutex);
1166         res->lr_lvb_initialized = false;
1167
1168         return res;
1169 }
1170
1171 /**
1172  * Return a reference to resource with given name, creating it if necessary.
1173  * Args: namespace with ns_lock unlocked
1174  * Locks: takes and releases NS hash-lock and res->lr_lock
1175  * Returns: referenced, unlocked ldlm_resource or NULL
1176  */
1177 struct ldlm_resource *
1178 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1179                   const struct ldlm_res_id *name, enum ldlm_type type,
1180                   int create)
1181 {
1182         struct hlist_node       *hnode;
1183         struct ldlm_resource    *res = NULL;
1184         struct cfs_hash_bd              bd;
1185         __u64                   version;
1186         int                     ns_refcount = 0;
1187
1188         LASSERT(ns != NULL);
1189         LASSERT(parent == NULL);
1190         LASSERT(ns->ns_rs_hash != NULL);
1191         LASSERT(name->name[0] != 0);
1192
1193         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1194         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1195         if (hnode != NULL) {
1196                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1197                 GOTO(found, res);
1198         }
1199
1200         version = cfs_hash_bd_version_get(&bd);
1201         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1202
1203         if (create == 0)
1204                 return ERR_PTR(-ENOENT);
1205
1206         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1207                  "type: %d\n", type);
1208         res = ldlm_resource_new(type);
1209         if (res == NULL)
1210                 return ERR_PTR(-ENOMEM);
1211
1212         res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1213         res->lr_name = *name;
1214         res->lr_type = type;
1215
1216         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1217         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1218                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1219
1220         if (hnode != NULL) {
1221                 /* Someone won the race and already added the resource. */
1222                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1223                 /* Clean lu_ref for failed resource. */
1224                 lu_ref_fini(&res->lr_reference);
1225                 if (res->lr_itree != NULL)
1226                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1227                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1228                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1229 found:
1230                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1231                 return res;
1232         }
1233         /* We won! Let's add the resource. */
1234         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1235         if (cfs_hash_bd_count_get(&bd) == 1)
1236                 ns_refcount = ldlm_namespace_get_return(ns);
1237
1238         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1239
1240         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1241
1242         /* Let's see if we happened to be the very first resource in this
1243          * namespace. If so, and this is a client namespace, we need to move
1244          * the namespace into the active namespaces list to be patrolled by
1245          * the ldlm_poold. */
1246         if (ns_is_client(ns) && ns_refcount == 1) {
1247                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1248                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1249                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1250         }
1251
1252         return res;
1253 }
1254 EXPORT_SYMBOL(ldlm_resource_get);
1255
1256 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1257 {
1258         LASSERT(res != NULL);
1259         LASSERT(res != LP_POISON);
1260         atomic_inc(&res->lr_refcount);
1261         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1262                atomic_read(&res->lr_refcount));
1263         return res;
1264 }
1265
1266 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1267                                          struct ldlm_resource *res)
1268 {
1269         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1270
1271         if (!list_empty(&res->lr_granted)) {
1272                 ldlm_resource_dump(D_ERROR, res);
1273                 LBUG();
1274         }
1275
1276         if (!list_empty(&res->lr_converting)) {
1277                 ldlm_resource_dump(D_ERROR, res);
1278                 LBUG();
1279         }
1280
1281         if (!list_empty(&res->lr_waiting)) {
1282                 ldlm_resource_dump(D_ERROR, res);
1283                 LBUG();
1284         }
1285
1286         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1287                                bd, &res->lr_hash);
1288         lu_ref_fini(&res->lr_reference);
1289         if (cfs_hash_bd_count_get(bd) == 0)
1290                 ldlm_namespace_put(nsb->nsb_namespace);
1291 }
1292
1293 /* Returns 1 if the resource was freed, 0 if it remains. */
1294 int ldlm_resource_putref(struct ldlm_resource *res)
1295 {
1296         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1297         struct cfs_hash_bd   bd;
1298
1299         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1300         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1301                res, atomic_read(&res->lr_refcount) - 1);
1302
1303         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1304         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1305                 __ldlm_resource_putref_final(&bd, res);
1306                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1307                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1308                         ns->ns_lvbo->lvbo_free(res);
1309                 if (res->lr_itree != NULL)
1310                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1311                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1312                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1313                 return 1;
1314         }
1315         return 0;
1316 }
1317 EXPORT_SYMBOL(ldlm_resource_putref);
1318
1319 /**
1320  * Add a lock into a given resource into specified lock list.
1321  */
1322 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1323                             struct ldlm_lock *lock)
1324 {
1325         check_res_locked(res);
1326
1327         LDLM_DEBUG(lock, "About to add this lock");
1328
1329         if (ldlm_is_destroyed(lock)) {
1330                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1331                 return;
1332         }
1333
1334         LASSERT(list_empty(&lock->l_res_link));
1335
1336         list_add_tail(&lock->l_res_link, head);
1337 }
1338
1339 /**
1340  * Insert a lock into resource after specified lock.
1341  *
1342  * Obtain resource description from the lock we are inserting after.
1343  */
1344 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1345                                      struct ldlm_lock *new)
1346 {
1347         struct ldlm_resource *res = original->l_resource;
1348
1349         check_res_locked(res);
1350
1351         ldlm_resource_dump(D_INFO, res);
1352         LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
1353
1354         if (ldlm_is_destroyed(new)) {
1355                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1356                 goto out;
1357         }
1358
1359         LASSERT(list_empty(&new->l_res_link));
1360
1361         list_add(&new->l_res_link, &original->l_res_link);
1362  out:;
1363 }
1364
1365 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1366 {
1367         int type = lock->l_resource->lr_type;
1368
1369         check_res_locked(lock->l_resource);
1370         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1371                 ldlm_unlink_lock_skiplist(lock);
1372         else if (type == LDLM_EXTENT)
1373                 ldlm_extent_unlink_lock(lock);
1374         list_del_init(&lock->l_res_link);
1375 }
1376 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1377
1378 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1379 {
1380         desc->lr_type = res->lr_type;
1381         desc->lr_name = res->lr_name;
1382 }
1383
1384 /**
1385  * Print information about all locks in all namespaces on this node to debug
1386  * log.
1387  */
1388 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1389 {
1390         struct list_head *tmp;
1391
1392         if (!((libcfs_debug | D_ERROR) & level))
1393                 return;
1394
1395         mutex_lock(ldlm_namespace_lock(client));
1396
1397         list_for_each(tmp, ldlm_namespace_list(client)) {
1398                 struct ldlm_namespace *ns;
1399
1400                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1401                 ldlm_namespace_dump(level, ns);
1402         }
1403
1404         mutex_unlock(ldlm_namespace_lock(client));
1405 }
1406
1407 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1408                               struct hlist_node *hnode, void *arg)
1409 {
1410         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1411         int    level = (int)(unsigned long)arg;
1412
1413         lock_res(res);
1414         ldlm_resource_dump(level, res);
1415         unlock_res(res);
1416
1417         return 0;
1418 }
1419
1420 /**
1421  * Print information about all locks in this namespace on this node to debug
1422  * log.
1423  */
1424 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1425 {
1426         if (!((libcfs_debug | D_ERROR) & level))
1427                 return;
1428
1429         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1430                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1431                ns_is_client(ns) ? "client" : "server");
1432
1433         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1434                 return;
1435
1436         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1437                                  ldlm_res_hash_dump,
1438                                  (void *)(unsigned long)level, 0);
1439         spin_lock(&ns->ns_lock);
1440         ns->ns_next_dump = cfs_time_shift(10);
1441         spin_unlock(&ns->ns_lock);
1442 }
1443
1444 /**
1445  * Print information about all locks in this resource to debug log.
1446  */
1447 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1448 {
1449         struct ldlm_lock *lock;
1450         unsigned int granted = 0;
1451
1452         CLASSERT(RES_NAME_SIZE == 4);
1453
1454         if (!((libcfs_debug | D_ERROR) & level))
1455                 return;
1456
1457         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1458                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1459
1460         if (!list_empty(&res->lr_granted)) {
1461                 CDEBUG(level, "Granted locks (in reverse order):\n");
1462                 list_for_each_entry_reverse(lock, &res->lr_granted,
1463                                                 l_res_link) {
1464                         LDLM_DEBUG_LIMIT(level, lock, "###");
1465                         if (!(level & D_CANTMASK) &&
1466                             ++granted > ldlm_dump_granted_max) {
1467                                 CDEBUG(level, "only dump %d granted locks to "
1468                                        "avoid DDOS.\n", granted);
1469                                 break;
1470                         }
1471                 }
1472         }
1473         if (!list_empty(&res->lr_converting)) {
1474                 CDEBUG(level, "Converting locks:\n");
1475                 list_for_each_entry(lock, &res->lr_converting, l_res_link)
1476                         LDLM_DEBUG_LIMIT(level, lock, "###");
1477         }
1478         if (!list_empty(&res->lr_waiting)) {
1479                 CDEBUG(level, "Waiting locks:\n");
1480                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1481                         LDLM_DEBUG_LIMIT(level, lock, "###");
1482         }
1483 }
1484 EXPORT_SYMBOL(ldlm_resource_dump);