Whamcloud - gitweb
LU-6529 ldlm: reclaim granted locks defensively
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include <lustre_dlm.h>
44 #include <lustre_fid.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49 struct kmem_cache *ldlm_interval_tree_slab;
50
51 int ldlm_srv_namespace_nr = 0;
52 int ldlm_cli_namespace_nr = 0;
53
54 struct mutex ldlm_srv_namespace_lock;
55 struct list_head ldlm_srv_namespace_list;
56
57 struct mutex ldlm_cli_namespace_lock;
58 /* Client Namespaces that have active resources in them.
59  * Once all resources go away, ldlm_poold moves such namespaces to the
60  * inactive list */
61 struct list_head ldlm_cli_active_namespace_list;
62 /* Client namespaces that don't have any locks in them */
63 struct list_head ldlm_cli_inactive_namespace_list;
64
65 static struct proc_dir_entry *ldlm_type_proc_dir;
66 static struct proc_dir_entry *ldlm_ns_proc_dir;
67 struct proc_dir_entry *ldlm_svc_proc_dir;
68
69 /* during debug dump certain amount of granted locks for one resource to avoid
70  * DDOS. */
71 static unsigned int ldlm_dump_granted_max = 256;
72
73 #ifdef CONFIG_PROC_FS
74 static ssize_t
75 lprocfs_dump_ns_seq_write(struct file *file, const char __user *buffer,
76                           size_t count, loff_t *off)
77 {
78         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
80         RETURN(count);
81 }
82 LPROC_SEQ_FOPS_WO_TYPE(ldlm, dump_ns);
83
84 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
85 LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
86
87 /* Lock count is stored in the watermark, and it's display as number of MB
88  * memory consumed by the locks */
89 static int seq_watermark_show(struct seq_file *m, void *data)
90 {
91         __u64 locknr = *(__u64 *)m->private;
92         return seq_printf(m, LPU64"\n",
93                           (locknr * sizeof(struct ldlm_lock)) >> 20);
94 }
95
96 static ssize_t seq_watermark_write(struct file *file,
97                                    const char __user *buffer, size_t count,
98                                    loff_t *off)
99 {
100         __u64 watermark;
101         __u64 *data = ((struct seq_file *)file->private_data)->private;
102         int rc;
103
104         rc = lprocfs_write_frac_u64_helper(buffer, count, &watermark, 1 << 20);
105         if (rc) {
106                 CERROR("Failed to set LDLM watermark, rc = %d.\n", rc);
107                 return rc;
108         } else if (watermark != 0 && watermark < (1 << 20)) {
109                 CERROR("Watermark should be greater than 1MB.\n");
110                 return -EINVAL;
111         }
112
113         do_div(watermark, sizeof(struct ldlm_lock));
114         *data = watermark;
115
116         if (ldlm_watermark_low != 0 && ldlm_watermark_high != 0 &&
117             ldlm_watermark_low > ldlm_watermark_high)
118                 ldlm_watermark_low = ldlm_watermark_high;
119         return count;
120 }
121
122 static int seq_watermark_open(struct inode *inode, struct file *file)
123 {
124         return single_open(file, seq_watermark_show, PDE_DATA(inode));
125 }
126
127 static const struct file_operations ldlm_watermark_fops = {
128         .owner          = THIS_MODULE,
129         .open           = seq_watermark_open,
130         .read           = seq_read,
131         .write          = seq_watermark_write,
132         .llseek         = seq_lseek,
133         .release        = lprocfs_single_release,
134 };
135
136 int ldlm_proc_setup(void)
137 {
138         int rc;
139         struct lprocfs_vars list[] = {
140                 { .name =       "dump_namespaces",
141                   .fops =       &ldlm_dump_ns_fops,
142                   .proc_mode =  0222 },
143                 { .name =       "dump_granted_max",
144                   .fops =       &ldlm_rw_uint_fops,
145                   .data =       &ldlm_dump_granted_max },
146                 { .name =       "cancel_unused_locks_before_replay",
147                   .fops =       &ldlm_rw_uint_fops,
148                   .data =       &ldlm_cancel_unused_locks_before_replay },
149                 { .name =       "watermark_mb_low",
150                   .fops =       &ldlm_watermark_fops,
151                   .data =       &ldlm_watermark_low },
152                 { .name =       "watermark_mb_high",
153                   .fops =       &ldlm_watermark_fops,
154                   .data =       &ldlm_watermark_high },
155                 { NULL }};
156         ENTRY;
157         LASSERT(ldlm_ns_proc_dir == NULL);
158
159         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
160                                               proc_lustre_root,
161                                               NULL, NULL);
162         if (IS_ERR(ldlm_type_proc_dir)) {
163                 CERROR("LProcFS failed in ldlm-init\n");
164                 rc = PTR_ERR(ldlm_type_proc_dir);
165                 GOTO(err, rc);
166         }
167
168         ldlm_ns_proc_dir = lprocfs_register("namespaces",
169                                             ldlm_type_proc_dir,
170                                             NULL, NULL);
171         if (IS_ERR(ldlm_ns_proc_dir)) {
172                 CERROR("LProcFS failed in ldlm-init\n");
173                 rc = PTR_ERR(ldlm_ns_proc_dir);
174                 GOTO(err_type, rc);
175         }
176
177         ldlm_svc_proc_dir = lprocfs_register("services",
178                                              ldlm_type_proc_dir,
179                                              NULL, NULL);
180         if (IS_ERR(ldlm_svc_proc_dir)) {
181                 CERROR("LProcFS failed in ldlm-init\n");
182                 rc = PTR_ERR(ldlm_svc_proc_dir);
183                 GOTO(err_ns, rc);
184         }
185
186         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
187         if (rc != 0) {
188                 CERROR("LProcFS failed in ldlm-init\n");
189                 GOTO(err_svc, rc);
190         }
191
192         RETURN(0);
193
194 err_svc:
195         lprocfs_remove(&ldlm_svc_proc_dir);
196 err_ns:
197         lprocfs_remove(&ldlm_ns_proc_dir);
198 err_type:
199         lprocfs_remove(&ldlm_type_proc_dir);
200 err:
201         ldlm_svc_proc_dir = NULL;
202         RETURN(rc);
203 }
204
205 void ldlm_proc_cleanup(void)
206 {
207         if (ldlm_svc_proc_dir)
208                 lprocfs_remove(&ldlm_svc_proc_dir);
209
210         if (ldlm_ns_proc_dir)
211                 lprocfs_remove(&ldlm_ns_proc_dir);
212
213         if (ldlm_type_proc_dir)
214                 lprocfs_remove(&ldlm_type_proc_dir);
215 }
216
217 static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
218 {
219         struct ldlm_namespace   *ns  = m->private;
220         __u64                   res = 0;
221         struct cfs_hash_bd              bd;
222         int                     i;
223
224         /* result is not strictly consistant */
225         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
226                 res += cfs_hash_bd_count_get(&bd);
227         return lprocfs_u64_seq_show(m, &res);
228 }
229 LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
230
231 static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
232 {
233         struct ldlm_namespace   *ns = m->private;
234         __u64                   locks;
235
236         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
237                                         LPROCFS_FIELDS_FLAGS_SUM);
238         return lprocfs_u64_seq_show(m, &locks);
239 }
240 LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
241
242 static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
243 {
244         struct ldlm_namespace *ns = m->private;
245         __u32 *nr = &ns->ns_max_unused;
246
247         if (ns_connect_lru_resize(ns))
248                 nr = &ns->ns_nr_unused;
249         return lprocfs_uint_seq_show(m, nr);
250 }
251
252 static ssize_t lprocfs_lru_size_seq_write(struct file *file,
253                                           const char __user *buffer,
254                                           size_t count, loff_t *off)
255 {
256         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
257         char dummy[MAX_STRING_SIZE + 1], *end;
258         unsigned long tmp;
259         int lru_resize;
260
261         dummy[MAX_STRING_SIZE] = '\0';
262         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
263                 return -EFAULT;
264
265         if (strncmp(dummy, "clear", 5) == 0) {
266                 CDEBUG(D_DLMTRACE,
267                        "dropping all unused locks from namespace %s\n",
268                        ldlm_ns_name(ns));
269                 if (ns_connect_lru_resize(ns)) {
270                         int canceled, unused  = ns->ns_nr_unused;
271
272                         /* Try to cancel all @ns_nr_unused locks. */
273                         canceled = ldlm_cancel_lru(ns, unused, 0,
274                                                    LDLM_CANCEL_PASSED);
275                         if (canceled < unused) {
276                                 CDEBUG(D_DLMTRACE,
277                                        "not all requested locks are canceled, "
278                                        "requested: %d, canceled: %d\n", unused,
279                                        canceled);
280                                 return -EINVAL;
281                         }
282                 } else {
283                         tmp = ns->ns_max_unused;
284                         ns->ns_max_unused = 0;
285                         ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
286                         ns->ns_max_unused = tmp;
287                 }
288                 return count;
289         }
290
291         tmp = simple_strtoul(dummy, &end, 0);
292         if (dummy == end) {
293                 CERROR("invalid value written\n");
294                 return -EINVAL;
295         }
296         lru_resize = (tmp == 0);
297
298         if (ns_connect_lru_resize(ns)) {
299                 if (!lru_resize)
300                         ns->ns_max_unused = (unsigned int)tmp;
301
302                 if (tmp > ns->ns_nr_unused)
303                         tmp = ns->ns_nr_unused;
304                 tmp = ns->ns_nr_unused - tmp;
305
306                 CDEBUG(D_DLMTRACE,
307                        "changing namespace %s unused locks from %u to %u\n",
308                        ldlm_ns_name(ns), ns->ns_nr_unused,
309                        (unsigned int)tmp);
310                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
311
312                 if (!lru_resize) {
313                         CDEBUG(D_DLMTRACE,
314                                "disable lru_resize for namespace %s\n",
315                                ldlm_ns_name(ns));
316                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
317                 }
318         } else {
319                 CDEBUG(D_DLMTRACE,
320                        "changing namespace %s max_unused from %u to %u\n",
321                        ldlm_ns_name(ns), ns->ns_max_unused,
322                        (unsigned int)tmp);
323                 ns->ns_max_unused = (unsigned int)tmp;
324                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
325
326                 /* Make sure that LRU resize was originally supported before
327                  * turning it on here. */
328                 if (lru_resize &&
329                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
330                         CDEBUG(D_DLMTRACE,
331                                "enable lru_resize for namespace %s\n",
332                                ldlm_ns_name(ns));
333                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
334                 }
335         }
336
337         return count;
338 }
339 LPROC_SEQ_FOPS(lprocfs_lru_size);
340
341 static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
342 {
343         struct ldlm_namespace *ns = m->private;
344         unsigned int supp = ns_connect_cancelset(ns);
345
346         return lprocfs_uint_seq_show(m, &supp);
347 }
348
349 static ssize_t lprocfs_elc_seq_write(struct file *file,
350                                      const char __user *buffer,
351                                      size_t count, loff_t *off)
352 {
353         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
354         unsigned int supp = -1;
355         int rc;
356
357         rc = lprocfs_wr_uint(file, buffer, count, &supp);
358         if (rc < 0)
359                 return rc;
360
361         if (supp == 0)
362                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
363         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
364                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
365         return count;
366 }
367 LPROC_SEQ_FOPS(lprocfs_elc);
368
369 static void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
370 {
371         if (ns->ns_proc_dir_entry == NULL)
372                 CERROR("dlm namespace %s has no procfs dir?\n",
373                        ldlm_ns_name(ns));
374         else
375                 lprocfs_remove(&ns->ns_proc_dir_entry);
376
377         if (ns->ns_stats != NULL)
378                 lprocfs_free_stats(&ns->ns_stats);
379 }
380
381 static int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
382 {
383         struct lprocfs_vars lock_vars[2];
384         char lock_name[MAX_STRING_SIZE + 1];
385         struct proc_dir_entry *ns_pde;
386
387         LASSERT(ns != NULL);
388         LASSERT(ns->ns_rs_hash != NULL);
389
390         if (ns->ns_proc_dir_entry != NULL) {
391                 ns_pde = ns->ns_proc_dir_entry;
392         } else {
393                 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
394                 if (ns_pde == NULL)
395                         return -ENOMEM;
396                 ns->ns_proc_dir_entry = ns_pde;
397         }
398
399         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
400         if (ns->ns_stats == NULL)
401                 return -ENOMEM;
402
403         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
404                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
405
406         lock_name[MAX_STRING_SIZE] = '\0';
407
408         memset(lock_vars, 0, sizeof(lock_vars));
409         lock_vars[0].name = lock_name;
410
411         ldlm_add_var(&lock_vars[0], ns_pde, "resource_count", ns,
412                      &lprocfs_ns_resources_fops);
413         ldlm_add_var(&lock_vars[0], ns_pde, "lock_count", ns,
414                      &lprocfs_ns_locks_fops);
415
416         if (ns_is_client(ns)) {
417                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_unused_count",
418                              &ns->ns_nr_unused, &ldlm_uint_fops);
419                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_size", ns,
420                              &lprocfs_lru_size_fops);
421                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_max_age",
422                              &ns->ns_max_age, &ldlm_rw_uint_fops);
423                 ldlm_add_var(&lock_vars[0], ns_pde, "early_lock_cancel",
424                              ns, &lprocfs_elc_fops);
425         } else {
426                 ldlm_add_var(&lock_vars[0], ns_pde, "ctime_age_limit",
427                              &ns->ns_ctime_age_limit, &ldlm_rw_uint_fops);
428                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_timeouts",
429                              &ns->ns_timeouts, &ldlm_uint_fops);
430                 ldlm_add_var(&lock_vars[0], ns_pde, "max_nolock_bytes",
431                              &ns->ns_max_nolock_size, &ldlm_rw_uint_fops);
432                 ldlm_add_var(&lock_vars[0], ns_pde, "contention_seconds",
433                              &ns->ns_contention_time, &ldlm_rw_uint_fops);
434                 ldlm_add_var(&lock_vars[0], ns_pde, "contended_locks",
435                              &ns->ns_contended_locks, &ldlm_rw_uint_fops);
436                 ldlm_add_var(&lock_vars[0], ns_pde, "max_parallel_ast",
437                              &ns->ns_max_parallel_ast, &ldlm_rw_uint_fops);
438         }
439         return 0;
440 }
441 #undef MAX_STRING_SIZE
442 #else /* CONFIG_PROC_FS */
443
444 #define ldlm_namespace_proc_unregister(ns)      ({;})
445 #define ldlm_namespace_proc_register(ns)        ({0;})
446
447 #endif /* CONFIG_PROC_FS */
448
449 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
450                                   const void *key, unsigned mask)
451 {
452         const struct ldlm_res_id     *id  = key;
453         unsigned                val = 0;
454         unsigned                i;
455
456         for (i = 0; i < RES_NAME_SIZE; i++)
457                 val += id->name[i];
458         return val & mask;
459 }
460
461 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
462                                       const void *key, unsigned mask)
463 {
464         const struct ldlm_res_id *id = key;
465         struct lu_fid       fid;
466         __u32               hash;
467         __u32               val;
468
469         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
470         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
471         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
472
473         hash = fid_flatten32(&fid);
474         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
475         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
476                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
477                 hash += (val >> 5) + (val << 11);
478         } else {
479                 val = fid_oid(&fid);
480         }
481         hash = hash_long(hash, hs->hs_bkt_bits);
482         /* give me another random factor */
483         hash -= hash_long((unsigned long)hs, val % 11 + 3);
484
485         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
486         hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
487
488         return hash & mask;
489 }
490
491 static void *ldlm_res_hop_key(struct hlist_node *hnode)
492 {
493         struct ldlm_resource   *res;
494
495         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
496         return &res->lr_name;
497 }
498
499 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
500 {
501         struct ldlm_resource   *res;
502
503         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
504         return ldlm_res_eq((const struct ldlm_res_id *)key,
505                            (const struct ldlm_res_id *)&res->lr_name);
506 }
507
508 static void *ldlm_res_hop_object(struct hlist_node *hnode)
509 {
510         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
511 }
512
513 static void
514 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
515 {
516         struct ldlm_resource *res;
517
518         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
519         ldlm_resource_getref(res);
520 }
521
522 static void
523 ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
524 {
525         struct ldlm_resource *res;
526
527         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
528         /* cfs_hash_for_each_nolock is the only chance we call it */
529         ldlm_resource_putref_locked(res);
530 }
531
532 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
533 {
534         struct ldlm_resource *res;
535
536         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
537         ldlm_resource_putref(res);
538 }
539
540 static struct cfs_hash_ops ldlm_ns_hash_ops = {
541         .hs_hash        = ldlm_res_hop_hash,
542         .hs_key         = ldlm_res_hop_key,
543         .hs_keycmp      = ldlm_res_hop_keycmp,
544         .hs_keycpy      = NULL,
545         .hs_object      = ldlm_res_hop_object,
546         .hs_get         = ldlm_res_hop_get_locked,
547         .hs_put_locked  = ldlm_res_hop_put_locked,
548         .hs_put         = ldlm_res_hop_put
549 };
550
551 static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
552         .hs_hash        = ldlm_res_hop_fid_hash,
553         .hs_key         = ldlm_res_hop_key,
554         .hs_keycmp      = ldlm_res_hop_keycmp,
555         .hs_keycpy      = NULL,
556         .hs_object      = ldlm_res_hop_object,
557         .hs_get         = ldlm_res_hop_get_locked,
558         .hs_put_locked  = ldlm_res_hop_put_locked,
559         .hs_put         = ldlm_res_hop_put
560 };
561
562 typedef struct {
563         ldlm_ns_type_t  nsd_type;
564         /** hash bucket bits */
565         unsigned        nsd_bkt_bits;
566         /** hash bits */
567         unsigned        nsd_all_bits;
568         /** hash operations */
569         struct cfs_hash_ops *nsd_hops;
570 } ldlm_ns_hash_def_t;
571
572 static ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
573 {
574         {
575                 .nsd_type       = LDLM_NS_TYPE_MDC,
576                 .nsd_bkt_bits   = 11,
577                 .nsd_all_bits   = 16,
578                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
579         },
580         {
581                 .nsd_type       = LDLM_NS_TYPE_MDT,
582                 .nsd_bkt_bits   = 14,
583                 .nsd_all_bits   = 21,
584                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
585         },
586         {
587                 .nsd_type       = LDLM_NS_TYPE_OSC,
588                 .nsd_bkt_bits   = 8,
589                 .nsd_all_bits   = 12,
590                 .nsd_hops       = &ldlm_ns_hash_ops,
591         },
592         {
593                 .nsd_type       = LDLM_NS_TYPE_OST,
594                 .nsd_bkt_bits   = 11,
595                 .nsd_all_bits   = 17,
596                 .nsd_hops       = &ldlm_ns_hash_ops,
597         },
598         {
599                 .nsd_type       = LDLM_NS_TYPE_MGC,
600                 .nsd_bkt_bits   = 4,
601                 .nsd_all_bits   = 4,
602                 .nsd_hops       = &ldlm_ns_hash_ops,
603         },
604         {
605                 .nsd_type       = LDLM_NS_TYPE_MGT,
606                 .nsd_bkt_bits   = 4,
607                 .nsd_all_bits   = 4,
608                 .nsd_hops       = &ldlm_ns_hash_ops,
609         },
610         {
611                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
612         },
613 };
614
615 /**
616  * Create and initialize new empty namespace.
617  */
618 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
619                                           ldlm_side_t client,
620                                           ldlm_appetite_t apt,
621                                           ldlm_ns_type_t ns_type)
622 {
623         struct ldlm_namespace *ns = NULL;
624         struct ldlm_ns_bucket *nsb;
625         ldlm_ns_hash_def_t    *nsd;
626         struct cfs_hash_bd          bd;
627         int                    idx;
628         int                    rc;
629         ENTRY;
630
631         LASSERT(obd != NULL);
632
633         rc = ldlm_get_ref();
634         if (rc) {
635                 CERROR("ldlm_get_ref failed: %d\n", rc);
636                 RETURN(NULL);
637         }
638
639         for (idx = 0;;idx++) {
640                 nsd = &ldlm_ns_hash_defs[idx];
641                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
642                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
643                         GOTO(out_ref, NULL);
644                 }
645
646                 if (nsd->nsd_type == ns_type)
647                         break;
648         }
649
650         OBD_ALLOC_PTR(ns);
651         if (!ns)
652                 GOTO(out_ref, NULL);
653
654         ns->ns_rs_hash = cfs_hash_create(name,
655                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
656                                          nsd->nsd_bkt_bits, sizeof(*nsb),
657                                          CFS_HASH_MIN_THETA,
658                                          CFS_HASH_MAX_THETA,
659                                          nsd->nsd_hops,
660                                          CFS_HASH_DEPTH |
661                                          CFS_HASH_BIGNAME |
662                                          CFS_HASH_SPIN_BKTLOCK |
663                                          CFS_HASH_NO_ITEMREF);
664         if (ns->ns_rs_hash == NULL)
665                 GOTO(out_ns, NULL);
666
667         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
668                 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
669                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
670                 nsb->nsb_namespace = ns;
671                 nsb->nsb_reclaim_start = 0;
672         }
673
674         ns->ns_obd      = obd;
675         ns->ns_appetite = apt;
676         ns->ns_client   = client;
677
678         INIT_LIST_HEAD(&ns->ns_list_chain);
679         INIT_LIST_HEAD(&ns->ns_unused_list);
680         spin_lock_init(&ns->ns_lock);
681         atomic_set(&ns->ns_bref, 0);
682         init_waitqueue_head(&ns->ns_waitq);
683
684         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
685         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
686         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
687
688         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
689         ns->ns_nr_unused          = 0;
690         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
691         ns->ns_max_age            = LDLM_DEFAULT_MAX_ALIVE;
692         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
693         ns->ns_timeouts           = 0;
694         ns->ns_orig_connect_flags = 0;
695         ns->ns_connect_flags      = 0;
696         ns->ns_stopping           = 0;
697         ns->ns_reclaim_start      = 0;
698         rc = ldlm_namespace_proc_register(ns);
699         if (rc != 0) {
700                 CERROR("Can't initialize ns proc, rc %d\n", rc);
701                 GOTO(out_hash, rc);
702         }
703
704         idx = ldlm_namespace_nr_read(client);
705         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
706         if (rc) {
707                 CERROR("Can't initialize lock pool, rc %d\n", rc);
708                 GOTO(out_proc, rc);
709         }
710
711         ldlm_namespace_register(ns, client);
712         RETURN(ns);
713 out_proc:
714         ldlm_namespace_proc_unregister(ns);
715         ldlm_namespace_cleanup(ns, 0);
716 out_hash:
717         cfs_hash_putref(ns->ns_rs_hash);
718 out_ns:
719         OBD_FREE_PTR(ns);
720 out_ref:
721         ldlm_put_ref();
722         RETURN(NULL);
723 }
724 EXPORT_SYMBOL(ldlm_namespace_new);
725
726 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
727
728 /**
729  * Cancel and destroy all locks on a resource.
730  *
731  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
732  * clean up.  This is currently only used for recovery, and we make
733  * certain assumptions as a result--notably, that we shouldn't cancel
734  * locks with refs.
735  */
736 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
737                              __u64 flags)
738 {
739         struct list_head *tmp;
740         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
741         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
742
743         do {
744                 struct ldlm_lock *lock = NULL;
745
746                 /* First, we look for non-cleaned-yet lock
747                  * all cleaned locks are marked by CLEANED flag. */
748                 lock_res(res);
749                 list_for_each(tmp, q) {
750                         lock = list_entry(tmp, struct ldlm_lock,
751                                           l_res_link);
752                         if (ldlm_is_cleaned(lock)) {
753                                 lock = NULL;
754                                 continue;
755                         }
756                         LDLM_LOCK_GET(lock);
757                         ldlm_set_cleaned(lock);
758                         break;
759                 }
760
761                 if (lock == NULL) {
762                         unlock_res(res);
763                         break;
764                 }
765
766                 /* Set CBPENDING so nothing in the cancellation path
767                  * can match this lock. */
768                 ldlm_set_cbpending(lock);
769                 ldlm_set_failed(lock);
770                 lock->l_flags |= flags;
771
772                 /* ... without sending a CANCEL message for local_only. */
773                 if (local_only)
774                         ldlm_set_local_only(lock);
775
776                 if (local_only && (lock->l_readers || lock->l_writers)) {
777                         /* This is a little bit gross, but much better than the
778                          * alternative: pretend that we got a blocking AST from
779                          * the server, so that when the lock is decref'd, it
780                          * will go away ... */
781                         unlock_res(res);
782                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
783                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
784                                 set_current_state(TASK_UNINTERRUPTIBLE);
785                                 schedule_timeout(cfs_time_seconds(4));
786                                 set_current_state(TASK_RUNNING);
787                         }
788                         if (lock->l_completion_ast)
789                                 lock->l_completion_ast(lock,
790                                                        LDLM_FL_FAILED, NULL);
791                         LDLM_LOCK_RELEASE(lock);
792                         continue;
793                 }
794
795                 if (client) {
796                         struct lustre_handle lockh;
797
798                         unlock_res(res);
799                         ldlm_lock2handle(lock, &lockh);
800                         rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
801                         if (rc)
802                                 CERROR("ldlm_cli_cancel: %d\n", rc);
803                 } else {
804                         ldlm_resource_unlink_lock(lock);
805                         unlock_res(res);
806                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
807                                    "client node");
808                         ldlm_lock_destroy(lock);
809                 }
810                 LDLM_LOCK_RELEASE(lock);
811         } while (1);
812 }
813
814 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
815                                struct hlist_node *hnode, void *arg)
816 {
817         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
818         __u64 flags = *(__u64 *)arg;
819
820         cleanup_resource(res, &res->lr_granted, flags);
821         cleanup_resource(res, &res->lr_converting, flags);
822         cleanup_resource(res, &res->lr_waiting, flags);
823
824         return 0;
825 }
826
827 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
828                                   struct hlist_node *hnode, void *arg)
829 {
830         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
831
832         lock_res(res);
833         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
834                "(%d) after lock cleanup; forcing cleanup.\n",
835                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
836                atomic_read(&res->lr_refcount) - 1);
837
838         ldlm_resource_dump(D_ERROR, res);
839         unlock_res(res);
840         return 0;
841 }
842
843 /**
844  * Cancel and destroy all locks in the namespace.
845  *
846  * Typically used during evictions when server notified client that it was
847  * evicted and all of its state needs to be destroyed.
848  * Also used during shutdown.
849  */
850 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
851 {
852         if (ns == NULL) {
853                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
854                 return ELDLM_OK;
855         }
856
857         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
858                                  &flags, 0);
859         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
860                                  NULL, 0);
861         return ELDLM_OK;
862 }
863 EXPORT_SYMBOL(ldlm_namespace_cleanup);
864
865 /**
866  * Attempts to free namespace.
867  *
868  * Only used when namespace goes away, like during an unmount.
869  */
870 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
871 {
872         ENTRY;
873
874         /* At shutdown time, don't call the cancellation callback */
875         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
876
877         if (atomic_read(&ns->ns_bref) > 0) {
878                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
879                 int rc;
880                 CDEBUG(D_DLMTRACE,
881                        "dlm namespace %s free waiting on refcount %d\n",
882                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
883 force_wait:
884                 if (force)
885                         lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
886                                           MSEC_PER_SEC) / 4, NULL, NULL);
887
888                 rc = l_wait_event(ns->ns_waitq,
889                                   atomic_read(&ns->ns_bref) == 0, &lwi);
890
891                 /* Forced cleanups should be able to reclaim all references,
892                  * so it's safe to wait forever... we can't leak locks... */
893                 if (force && rc == -ETIMEDOUT) {
894                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
895                                        "namespace with %d resources in use, "
896                                        "(rc=%d)\n", ldlm_ns_name(ns),
897                                        atomic_read(&ns->ns_bref), rc);
898                         GOTO(force_wait, rc);
899                 }
900
901                 if (atomic_read(&ns->ns_bref)) {
902                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
903                                        "with %d resources in use, (rc=%d)\n",
904                                        ldlm_ns_name(ns),
905                                        atomic_read(&ns->ns_bref), rc);
906                         RETURN(ELDLM_NAMESPACE_EXISTS);
907                 }
908                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
909                        ldlm_ns_name(ns));
910         }
911
912         RETURN(ELDLM_OK);
913 }
914
915 /**
916  * Performs various cleanups for passed \a ns to make it drop refc and be
917  * ready for freeing. Waits for refc == 0.
918  *
919  * The following is done:
920  * (0) Unregister \a ns from its list to make inaccessible for potential
921  * users like pools thread and others;
922  * (1) Clear all locks in \a ns.
923  */
924 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
925                                struct obd_import *imp,
926                                int force)
927 {
928         int rc;
929         ENTRY;
930         if (!ns) {
931                 EXIT;
932                 return;
933         }
934
935         spin_lock(&ns->ns_lock);
936         ns->ns_stopping = 1;
937         spin_unlock(&ns->ns_lock);
938
939         /*
940          * Can fail with -EINTR when force == 0 in which case try harder.
941          */
942         rc = __ldlm_namespace_free(ns, force);
943         if (rc != ELDLM_OK) {
944                 if (imp) {
945                         ptlrpc_disconnect_import(imp, 0);
946                         ptlrpc_invalidate_import(imp);
947                 }
948
949                 /*
950                  * With all requests dropped and the import inactive
951                  * we are gaurenteed all reference will be dropped.
952                  */
953                 rc = __ldlm_namespace_free(ns, 1);
954                 LASSERT(rc == 0);
955         }
956         EXIT;
957 }
958
959 /**
960  * Performs freeing memory structures related to \a ns. This is only done
961  * when ldlm_namespce_free_prior() successfully removed all resources
962  * referencing \a ns and its refc == 0.
963  */
964 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
965 {
966         ENTRY;
967         if (!ns) {
968                 EXIT;
969                 return;
970         }
971
972         /* Make sure that nobody can find this ns in its list. */
973         ldlm_namespace_unregister(ns, ns->ns_client);
974         /* Fini pool _before_ parent proc dir is removed. This is important as
975          * ldlm_pool_fini() removes own proc dir which is child to @dir.
976          * Removing it after @dir may cause oops. */
977         ldlm_pool_fini(&ns->ns_pool);
978
979         ldlm_namespace_proc_unregister(ns);
980         cfs_hash_putref(ns->ns_rs_hash);
981         /* Namespace \a ns should be not on list at this time, otherwise
982          * this will cause issues related to using freed \a ns in poold
983          * thread. */
984         LASSERT(list_empty(&ns->ns_list_chain));
985         OBD_FREE_PTR(ns);
986         ldlm_put_ref();
987         EXIT;
988 }
989
990 /**
991  * Cleanup the resource, and free namespace.
992  * bug 12864:
993  * Deadlock issue:
994  * proc1: destroy import
995  *        class_disconnect_export(grab cl_sem) ->
996  *              -> ldlm_namespace_free ->
997  *              -> lprocfs_remove(grab _lprocfs_lock).
998  * proc2: read proc info
999  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1000  *              -> osc_rd_active, etc(grab cl_sem).
1001  *
1002  * So that I have to split the ldlm_namespace_free into two parts - the first
1003  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1004  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1005  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1006  * held.
1007  */
1008 void ldlm_namespace_free(struct ldlm_namespace *ns,
1009                          struct obd_import *imp,
1010                          int force)
1011 {
1012         ldlm_namespace_free_prior(ns, imp, force);
1013         ldlm_namespace_free_post(ns);
1014 }
1015 EXPORT_SYMBOL(ldlm_namespace_free);
1016
1017 void ldlm_namespace_get(struct ldlm_namespace *ns)
1018 {
1019         atomic_inc(&ns->ns_bref);
1020 }
1021
1022 /* This is only for callers that care about refcount */
1023 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1024 {
1025         return atomic_inc_return(&ns->ns_bref);
1026 }
1027
1028 void ldlm_namespace_put(struct ldlm_namespace *ns)
1029 {
1030         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1031                 wake_up(&ns->ns_waitq);
1032                 spin_unlock(&ns->ns_lock);
1033         }
1034 }
1035
1036 /** Register \a ns in the list of namespaces */
1037 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
1038 {
1039         mutex_lock(ldlm_namespace_lock(client));
1040         LASSERT(list_empty(&ns->ns_list_chain));
1041         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1042         ldlm_namespace_nr_inc(client);
1043         mutex_unlock(ldlm_namespace_lock(client));
1044 }
1045
1046 /** Unregister \a ns from the list of namespaces. */
1047 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
1048 {
1049         mutex_lock(ldlm_namespace_lock(client));
1050         LASSERT(!list_empty(&ns->ns_list_chain));
1051         /* Some asserts and possibly other parts of the code are still
1052          * using list_empty(&ns->ns_list_chain). This is why it is
1053          * important to use list_del_init() here. */
1054         list_del_init(&ns->ns_list_chain);
1055         ldlm_namespace_nr_dec(client);
1056         mutex_unlock(ldlm_namespace_lock(client));
1057 }
1058
1059 /** Should be called with ldlm_namespace_lock(client) taken. */
1060 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1061                                        ldlm_side_t client)
1062 {
1063         LASSERT(!list_empty(&ns->ns_list_chain));
1064         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1065         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1066 }
1067
1068 /** Should be called with ldlm_namespace_lock(client) taken. */
1069 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1070                                          ldlm_side_t client)
1071 {
1072         LASSERT(!list_empty(&ns->ns_list_chain));
1073         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1074         list_move_tail(&ns->ns_list_chain,
1075                        ldlm_namespace_inactive_list(client));
1076 }
1077
1078 /** Should be called with ldlm_namespace_lock(client) taken. */
1079 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
1080 {
1081         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1082         LASSERT(!list_empty(ldlm_namespace_list(client)));
1083         return container_of(ldlm_namespace_list(client)->next,
1084                             struct ldlm_namespace, ns_list_chain);
1085 }
1086
1087 /** Create and initialize new resource. */
1088 static struct ldlm_resource *ldlm_resource_new(ldlm_type_t type)
1089 {
1090         struct ldlm_resource *res;
1091         int idx;
1092
1093         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1094         if (res == NULL)
1095                 return NULL;
1096
1097         if (type == LDLM_EXTENT) {
1098                 OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1099                                sizeof(*res->lr_itree) * LCK_MODE_NUM);
1100                 if (res->lr_itree == NULL) {
1101                         OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1102                         return NULL;
1103                 }
1104                 /* Initialize interval trees for each lock mode. */
1105                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1106                         res->lr_itree[idx].lit_size = 0;
1107                         res->lr_itree[idx].lit_mode = 1 << idx;
1108                         res->lr_itree[idx].lit_root = NULL;
1109                 }
1110         }
1111
1112         INIT_LIST_HEAD(&res->lr_granted);
1113         INIT_LIST_HEAD(&res->lr_converting);
1114         INIT_LIST_HEAD(&res->lr_waiting);
1115
1116         atomic_set(&res->lr_refcount, 1);
1117         spin_lock_init(&res->lr_lock);
1118         lu_ref_init(&res->lr_reference);
1119
1120         /* Since LVB init can be delayed now, there is no longer need to
1121          * immediatelly acquire mutex here. */
1122         mutex_init(&res->lr_lvb_mutex);
1123         res->lr_lvb_initialized = false;
1124
1125         return res;
1126 }
1127
1128 /**
1129  * Return a reference to resource with given name, creating it if necessary.
1130  * Args: namespace with ns_lock unlocked
1131  * Locks: takes and releases NS hash-lock and res->lr_lock
1132  * Returns: referenced, unlocked ldlm_resource or NULL
1133  */
1134 struct ldlm_resource *
1135 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1136                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
1137 {
1138         struct hlist_node       *hnode;
1139         struct ldlm_resource    *res = NULL;
1140         struct cfs_hash_bd              bd;
1141         __u64                   version;
1142         int                     ns_refcount = 0;
1143
1144         LASSERT(ns != NULL);
1145         LASSERT(parent == NULL);
1146         LASSERT(ns->ns_rs_hash != NULL);
1147         LASSERT(name->name[0] != 0);
1148
1149         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1150         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1151         if (hnode != NULL) {
1152                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1153                 GOTO(found, res);
1154         }
1155
1156         version = cfs_hash_bd_version_get(&bd);
1157         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1158
1159         if (create == 0)
1160                 return ERR_PTR(-ENOENT);
1161
1162         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1163                  "type: %d\n", type);
1164         res = ldlm_resource_new(type);
1165         if (res == NULL)
1166                 return ERR_PTR(-ENOMEM);
1167
1168         res->lr_ns_bucket  = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1169         res->lr_name       = *name;
1170         res->lr_type       = type;
1171
1172         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1173         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1174                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1175
1176         if (hnode != NULL) {
1177                 /* Someone won the race and already added the resource. */
1178                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1179                 /* Clean lu_ref for failed resource. */
1180                 lu_ref_fini(&res->lr_reference);
1181                 if (res->lr_itree != NULL)
1182                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1183                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1184                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1185 found:
1186                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1187                 return res;
1188         }
1189         /* We won! Let's add the resource. */
1190         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1191         if (cfs_hash_bd_count_get(&bd) == 1)
1192                 ns_refcount = ldlm_namespace_get_return(ns);
1193
1194         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1195
1196         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1197
1198         /* Let's see if we happened to be the very first resource in this
1199          * namespace. If so, and this is a client namespace, we need to move
1200          * the namespace into the active namespaces list to be patrolled by
1201          * the ldlm_poold. */
1202         if (ns_is_client(ns) && ns_refcount == 1) {
1203                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1204                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1205                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1206         }
1207
1208         return res;
1209 }
1210 EXPORT_SYMBOL(ldlm_resource_get);
1211
1212 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1213 {
1214         LASSERT(res != NULL);
1215         LASSERT(res != LP_POISON);
1216         atomic_inc(&res->lr_refcount);
1217         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1218                atomic_read(&res->lr_refcount));
1219         return res;
1220 }
1221
1222 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1223                                          struct ldlm_resource *res)
1224 {
1225         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1226
1227         if (!list_empty(&res->lr_granted)) {
1228                 ldlm_resource_dump(D_ERROR, res);
1229                 LBUG();
1230         }
1231
1232         if (!list_empty(&res->lr_converting)) {
1233                 ldlm_resource_dump(D_ERROR, res);
1234                 LBUG();
1235         }
1236
1237         if (!list_empty(&res->lr_waiting)) {
1238                 ldlm_resource_dump(D_ERROR, res);
1239                 LBUG();
1240         }
1241
1242         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1243                                bd, &res->lr_hash);
1244         lu_ref_fini(&res->lr_reference);
1245         if (cfs_hash_bd_count_get(bd) == 0)
1246                 ldlm_namespace_put(nsb->nsb_namespace);
1247 }
1248
1249 /* Returns 1 if the resource was freed, 0 if it remains. */
1250 int ldlm_resource_putref(struct ldlm_resource *res)
1251 {
1252         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1253         struct cfs_hash_bd   bd;
1254
1255         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1256         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1257                res, atomic_read(&res->lr_refcount) - 1);
1258
1259         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1260         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1261                 __ldlm_resource_putref_final(&bd, res);
1262                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1263                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1264                         ns->ns_lvbo->lvbo_free(res);
1265                 if (res->lr_itree != NULL)
1266                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1267                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1268                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1269                 return 1;
1270         }
1271         return 0;
1272 }
1273 EXPORT_SYMBOL(ldlm_resource_putref);
1274
1275 /* Returns 1 if the resource was freed, 0 if it remains. */
1276 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1277 {
1278         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1279
1280         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1281         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1282                res, atomic_read(&res->lr_refcount) - 1);
1283
1284         if (atomic_dec_and_test(&res->lr_refcount)) {
1285                 struct cfs_hash_bd bd;
1286
1287                 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1288                                 &res->lr_name, &bd);
1289                 __ldlm_resource_putref_final(&bd, res);
1290                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1291                 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1292                  * so we should never be here while calling cfs_hash_del,
1293                  * cfs_hash_for_each_nolock is the only case we can get
1294                  * here, which is safe to release cfs_hash_bd_lock.
1295                  */
1296                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1297                         ns->ns_lvbo->lvbo_free(res);
1298                 if (res->lr_itree != NULL)
1299                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1300                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1301                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1302
1303                 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1304                 return 1;
1305         }
1306         return 0;
1307 }
1308
1309 /**
1310  * Add a lock into a given resource into specified lock list.
1311  */
1312 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1313                             struct ldlm_lock *lock)
1314 {
1315         check_res_locked(res);
1316
1317         LDLM_DEBUG(lock, "About to add this lock:\n");
1318
1319         if (ldlm_is_destroyed(lock)) {
1320                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1321                 return;
1322         }
1323
1324         LASSERT(list_empty(&lock->l_res_link));
1325
1326         list_add_tail(&lock->l_res_link, head);
1327 }
1328
1329 /**
1330  * Insert a lock into resource after specified lock.
1331  *
1332  * Obtain resource description from the lock we are inserting after.
1333  */
1334 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1335                                      struct ldlm_lock *new)
1336 {
1337         struct ldlm_resource *res = original->l_resource;
1338
1339         check_res_locked(res);
1340
1341         ldlm_resource_dump(D_INFO, res);
1342         LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1343
1344         if (ldlm_is_destroyed(new)) {
1345                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1346                 goto out;
1347         }
1348
1349         LASSERT(list_empty(&new->l_res_link));
1350
1351         list_add(&new->l_res_link, &original->l_res_link);
1352  out:;
1353 }
1354
1355 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1356 {
1357         int type = lock->l_resource->lr_type;
1358
1359         check_res_locked(lock->l_resource);
1360         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1361                 ldlm_unlink_lock_skiplist(lock);
1362         else if (type == LDLM_EXTENT)
1363                 ldlm_extent_unlink_lock(lock);
1364         list_del_init(&lock->l_res_link);
1365 }
1366 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1367
1368 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1369 {
1370         desc->lr_type = res->lr_type;
1371         desc->lr_name = res->lr_name;
1372 }
1373
1374 /**
1375  * Print information about all locks in all namespaces on this node to debug
1376  * log.
1377  */
1378 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1379 {
1380         struct list_head *tmp;
1381
1382         if (!((libcfs_debug | D_ERROR) & level))
1383                 return;
1384
1385         mutex_lock(ldlm_namespace_lock(client));
1386
1387         list_for_each(tmp, ldlm_namespace_list(client)) {
1388                 struct ldlm_namespace *ns;
1389                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1390                 ldlm_namespace_dump(level, ns);
1391         }
1392
1393         mutex_unlock(ldlm_namespace_lock(client));
1394 }
1395
1396 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1397                               struct hlist_node *hnode, void *arg)
1398 {
1399         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1400         int    level = (int)(unsigned long)arg;
1401
1402         lock_res(res);
1403         ldlm_resource_dump(level, res);
1404         unlock_res(res);
1405
1406         return 0;
1407 }
1408
1409 /**
1410  * Print information about all locks in this namespace on this node to debug
1411  * log.
1412  */
1413 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1414 {
1415         if (!((libcfs_debug | D_ERROR) & level))
1416                 return;
1417
1418         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1419                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1420                ns_is_client(ns) ? "client" : "server");
1421
1422         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1423                 return;
1424
1425         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1426                                  ldlm_res_hash_dump,
1427                                  (void *)(unsigned long)level, 0);
1428         spin_lock(&ns->ns_lock);
1429         ns->ns_next_dump = cfs_time_shift(10);
1430         spin_unlock(&ns->ns_lock);
1431 }
1432
1433 /**
1434  * Print information about all locks in this resource to debug log.
1435  */
1436 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1437 {
1438         struct ldlm_lock *lock;
1439         unsigned int granted = 0;
1440
1441         CLASSERT(RES_NAME_SIZE == 4);
1442
1443         if (!((libcfs_debug | D_ERROR) & level))
1444                 return;
1445
1446         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1447                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1448
1449         if (!list_empty(&res->lr_granted)) {
1450                 CDEBUG(level, "Granted locks (in reverse order):\n");
1451                 list_for_each_entry_reverse(lock, &res->lr_granted,
1452                                                 l_res_link) {
1453                         LDLM_DEBUG_LIMIT(level, lock, "###");
1454                         if (!(level & D_CANTMASK) &&
1455                             ++granted > ldlm_dump_granted_max) {
1456                                 CDEBUG(level, "only dump %d granted locks to "
1457                                        "avoid DDOS.\n", granted);
1458                                 break;
1459                         }
1460                 }
1461         }
1462         if (!list_empty(&res->lr_converting)) {
1463                 CDEBUG(level, "Converting locks:\n");
1464                 list_for_each_entry(lock, &res->lr_converting, l_res_link)
1465                         LDLM_DEBUG_LIMIT(level, lock, "###");
1466         }
1467         if (!list_empty(&res->lr_waiting)) {
1468                 CDEBUG(level, "Waiting locks:\n");
1469                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1470                         LDLM_DEBUG_LIMIT(level, lock, "###");
1471         }
1472 }
1473 EXPORT_SYMBOL(ldlm_resource_dump);