Whamcloud - gitweb
LU-6158 mdt: always shrink_capsule in getxattr_all
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include <lustre_dlm.h>
44 #include <lustre_fid.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
47
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49 struct kmem_cache *ldlm_interval_tree_slab;
50
51 int ldlm_srv_namespace_nr = 0;
52 int ldlm_cli_namespace_nr = 0;
53
54 struct mutex ldlm_srv_namespace_lock;
55 struct list_head ldlm_srv_namespace_list;
56
57 struct mutex ldlm_cli_namespace_lock;
58 /* Client Namespaces that have active resources in them.
59  * Once all resources go away, ldlm_poold moves such namespaces to the
60  * inactive list */
61 struct list_head ldlm_cli_active_namespace_list;
62 /* Client namespaces that don't have any locks in them */
63 struct list_head ldlm_cli_inactive_namespace_list;
64
65 static struct proc_dir_entry *ldlm_type_proc_dir;
66 static struct proc_dir_entry *ldlm_ns_proc_dir;
67 struct proc_dir_entry *ldlm_svc_proc_dir;
68
69 /* during debug dump certain amount of granted locks for one resource to avoid
70  * DDOS. */
71 static unsigned int ldlm_dump_granted_max = 256;
72
73 #ifdef CONFIG_PROC_FS
74 static ssize_t
75 lprocfs_dump_ns_seq_write(struct file *file, const char __user *buffer,
76                           size_t count, loff_t *off)
77 {
78         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
80         RETURN(count);
81 }
82 LPROC_SEQ_FOPS_WO_TYPE(ldlm, dump_ns);
83
84 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
85 LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
86
87 /* Lock count is stored in the watermark, and it's display as number of MB
88  * memory consumed by the locks */
89 static int seq_watermark_show(struct seq_file *m, void *data)
90 {
91         __u64 locknr = *(__u64 *)m->private;
92         return seq_printf(m, LPU64"\n",
93                           (locknr * sizeof(struct ldlm_lock)) >> 20);
94 }
95
96 static ssize_t seq_watermark_write(struct file *file,
97                                    const char __user *buffer, size_t count,
98                                    loff_t *off)
99 {
100         __u64 watermark;
101         __u64 *data = ((struct seq_file *)file->private_data)->private;
102         int rc;
103
104         rc = lprocfs_write_frac_u64_helper(buffer, count, &watermark, 1 << 20);
105         if (rc) {
106                 CERROR("Failed to set LDLM watermark, rc = %d.\n", rc);
107                 return rc;
108         } else if (watermark != 0 && watermark < (1 << 20)) {
109                 CERROR("Watermark should be greater than 1MB.\n");
110                 return -EINVAL;
111         }
112
113         do_div(watermark, sizeof(struct ldlm_lock));
114         *data = watermark;
115
116         if (ldlm_watermark_low != 0 && ldlm_watermark_high != 0 &&
117             ldlm_watermark_low > ldlm_watermark_high)
118                 ldlm_watermark_low = ldlm_watermark_high;
119         return count;
120 }
121
122 static int seq_watermark_open(struct inode *inode, struct file *file)
123 {
124         return single_open(file, seq_watermark_show, PDE_DATA(inode));
125 }
126
127 static const struct file_operations ldlm_watermark_fops = {
128         .owner          = THIS_MODULE,
129         .open           = seq_watermark_open,
130         .read           = seq_read,
131         .write          = seq_watermark_write,
132         .llseek         = seq_lseek,
133         .release        = lprocfs_single_release,
134 };
135
136 int ldlm_proc_setup(void)
137 {
138         int rc;
139         struct lprocfs_vars list[] = {
140                 { .name =       "dump_namespaces",
141                   .fops =       &ldlm_dump_ns_fops,
142                   .proc_mode =  0222 },
143                 { .name =       "dump_granted_max",
144                   .fops =       &ldlm_rw_uint_fops,
145                   .data =       &ldlm_dump_granted_max },
146                 { .name =       "cancel_unused_locks_before_replay",
147                   .fops =       &ldlm_rw_uint_fops,
148                   .data =       &ldlm_cancel_unused_locks_before_replay },
149                 { .name =       "watermark_mb_low",
150                   .fops =       &ldlm_watermark_fops,
151                   .data =       &ldlm_watermark_low },
152                 { .name =       "watermark_mb_high",
153                   .fops =       &ldlm_watermark_fops,
154                   .data =       &ldlm_watermark_high },
155                 { NULL }};
156         ENTRY;
157         LASSERT(ldlm_ns_proc_dir == NULL);
158
159         ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
160                                               proc_lustre_root,
161                                               NULL, NULL);
162         if (IS_ERR(ldlm_type_proc_dir)) {
163                 CERROR("LProcFS failed in ldlm-init\n");
164                 rc = PTR_ERR(ldlm_type_proc_dir);
165                 GOTO(err, rc);
166         }
167
168         ldlm_ns_proc_dir = lprocfs_register("namespaces",
169                                             ldlm_type_proc_dir,
170                                             NULL, NULL);
171         if (IS_ERR(ldlm_ns_proc_dir)) {
172                 CERROR("LProcFS failed in ldlm-init\n");
173                 rc = PTR_ERR(ldlm_ns_proc_dir);
174                 GOTO(err_type, rc);
175         }
176
177         ldlm_svc_proc_dir = lprocfs_register("services",
178                                              ldlm_type_proc_dir,
179                                              NULL, NULL);
180         if (IS_ERR(ldlm_svc_proc_dir)) {
181                 CERROR("LProcFS failed in ldlm-init\n");
182                 rc = PTR_ERR(ldlm_svc_proc_dir);
183                 GOTO(err_ns, rc);
184         }
185
186         rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
187         if (rc != 0) {
188                 CERROR("LProcFS failed in ldlm-init\n");
189                 GOTO(err_svc, rc);
190         }
191
192         RETURN(0);
193
194 err_svc:
195         lprocfs_remove(&ldlm_svc_proc_dir);
196 err_ns:
197         lprocfs_remove(&ldlm_ns_proc_dir);
198 err_type:
199         lprocfs_remove(&ldlm_type_proc_dir);
200 err:
201         ldlm_svc_proc_dir = NULL;
202         RETURN(rc);
203 }
204
205 void ldlm_proc_cleanup(void)
206 {
207         if (ldlm_svc_proc_dir)
208                 lprocfs_remove(&ldlm_svc_proc_dir);
209
210         if (ldlm_ns_proc_dir)
211                 lprocfs_remove(&ldlm_ns_proc_dir);
212
213         if (ldlm_type_proc_dir)
214                 lprocfs_remove(&ldlm_type_proc_dir);
215 }
216
217 static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
218 {
219         struct ldlm_namespace   *ns  = m->private;
220         __u64                   res = 0;
221         struct cfs_hash_bd              bd;
222         int                     i;
223
224         /* result is not strictly consistant */
225         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
226                 res += cfs_hash_bd_count_get(&bd);
227         return lprocfs_u64_seq_show(m, &res);
228 }
229 LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
230
231 static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
232 {
233         struct ldlm_namespace   *ns = m->private;
234         __u64                   locks;
235
236         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
237                                         LPROCFS_FIELDS_FLAGS_SUM);
238         return lprocfs_u64_seq_show(m, &locks);
239 }
240 LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
241
242 static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
243 {
244         struct ldlm_namespace *ns = m->private;
245         __u32 *nr = &ns->ns_max_unused;
246
247         if (ns_connect_lru_resize(ns))
248                 nr = &ns->ns_nr_unused;
249         return lprocfs_uint_seq_show(m, nr);
250 }
251
252 static ssize_t lprocfs_lru_size_seq_write(struct file *file,
253                                           const char __user *buffer,
254                                           size_t count, loff_t *off)
255 {
256         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
257         char dummy[MAX_STRING_SIZE + 1], *end;
258         unsigned long tmp;
259         int lru_resize;
260
261         dummy[MAX_STRING_SIZE] = '\0';
262         if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
263                 return -EFAULT;
264
265         if (strncmp(dummy, "clear", 5) == 0) {
266                 CDEBUG(D_DLMTRACE,
267                        "dropping all unused locks from namespace %s\n",
268                        ldlm_ns_name(ns));
269                 if (ns_connect_lru_resize(ns)) {
270                         int canceled, unused  = ns->ns_nr_unused;
271
272                         /* Try to cancel all @ns_nr_unused locks. */
273                         canceled = ldlm_cancel_lru(ns, unused, 0,
274                                                    LDLM_CANCEL_PASSED);
275                         if (canceled < unused) {
276                                 CDEBUG(D_DLMTRACE,
277                                        "not all requested locks are canceled, "
278                                        "requested: %d, canceled: %d\n", unused,
279                                        canceled);
280                                 return -EINVAL;
281                         }
282                 } else {
283                         tmp = ns->ns_max_unused;
284                         ns->ns_max_unused = 0;
285                         ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
286                         ns->ns_max_unused = tmp;
287                 }
288                 return count;
289         }
290
291         tmp = simple_strtoul(dummy, &end, 0);
292         if (dummy == end) {
293                 CERROR("invalid value written\n");
294                 return -EINVAL;
295         }
296         lru_resize = (tmp == 0);
297
298         if (ns_connect_lru_resize(ns)) {
299                 if (!lru_resize)
300                         ns->ns_max_unused = (unsigned int)tmp;
301
302                 if (tmp > ns->ns_nr_unused)
303                         tmp = ns->ns_nr_unused;
304                 tmp = ns->ns_nr_unused - tmp;
305
306                 CDEBUG(D_DLMTRACE,
307                        "changing namespace %s unused locks from %u to %u\n",
308                        ldlm_ns_name(ns), ns->ns_nr_unused,
309                        (unsigned int)tmp);
310                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
311
312                 if (!lru_resize) {
313                         CDEBUG(D_DLMTRACE,
314                                "disable lru_resize for namespace %s\n",
315                                ldlm_ns_name(ns));
316                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
317                 }
318         } else {
319                 CDEBUG(D_DLMTRACE,
320                        "changing namespace %s max_unused from %u to %u\n",
321                        ldlm_ns_name(ns), ns->ns_max_unused,
322                        (unsigned int)tmp);
323                 ns->ns_max_unused = (unsigned int)tmp;
324                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
325
326                 /* Make sure that LRU resize was originally supported before
327                  * turning it on here. */
328                 if (lru_resize &&
329                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
330                         CDEBUG(D_DLMTRACE,
331                                "enable lru_resize for namespace %s\n",
332                                ldlm_ns_name(ns));
333                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
334                 }
335         }
336
337         return count;
338 }
339 LPROC_SEQ_FOPS(lprocfs_lru_size);
340
341 static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
342 {
343         struct ldlm_namespace *ns = m->private;
344         unsigned int supp = ns_connect_cancelset(ns);
345
346         return lprocfs_uint_seq_show(m, &supp);
347 }
348
349 static ssize_t lprocfs_elc_seq_write(struct file *file,
350                                      const char __user *buffer,
351                                      size_t count, loff_t *off)
352 {
353         struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
354         unsigned int supp = -1;
355         int rc;
356
357         rc = lprocfs_wr_uint(file, buffer, count, &supp);
358         if (rc < 0)
359                 return rc;
360
361         if (supp == 0)
362                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
363         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
364                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
365         return count;
366 }
367 LPROC_SEQ_FOPS(lprocfs_elc);
368
369 static void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
370 {
371         if (ns->ns_proc_dir_entry == NULL)
372                 CERROR("dlm namespace %s has no procfs dir?\n",
373                        ldlm_ns_name(ns));
374         else
375                 lprocfs_remove(&ns->ns_proc_dir_entry);
376
377         if (ns->ns_stats != NULL)
378                 lprocfs_free_stats(&ns->ns_stats);
379 }
380
381 static int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
382 {
383         struct lprocfs_vars lock_vars[2];
384         char lock_name[MAX_STRING_SIZE + 1];
385         struct proc_dir_entry *ns_pde;
386
387         LASSERT(ns != NULL);
388         LASSERT(ns->ns_rs_hash != NULL);
389
390         if (ns->ns_proc_dir_entry != NULL) {
391                 ns_pde = ns->ns_proc_dir_entry;
392         } else {
393                 ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
394                 if (ns_pde == NULL)
395                         return -ENOMEM;
396                 ns->ns_proc_dir_entry = ns_pde;
397         }
398
399         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
400         if (ns->ns_stats == NULL)
401                 return -ENOMEM;
402
403         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
404                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
405
406         lock_name[MAX_STRING_SIZE] = '\0';
407
408         memset(lock_vars, 0, sizeof(lock_vars));
409         lock_vars[0].name = lock_name;
410
411         ldlm_add_var(&lock_vars[0], ns_pde, "resource_count", ns,
412                      &lprocfs_ns_resources_fops);
413         ldlm_add_var(&lock_vars[0], ns_pde, "lock_count", ns,
414                      &lprocfs_ns_locks_fops);
415
416         if (ns_is_client(ns)) {
417                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_unused_count",
418                              &ns->ns_nr_unused, &ldlm_uint_fops);
419                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_size", ns,
420                              &lprocfs_lru_size_fops);
421                 ldlm_add_var(&lock_vars[0], ns_pde, "lru_max_age",
422                              &ns->ns_max_age, &ldlm_rw_uint_fops);
423                 ldlm_add_var(&lock_vars[0], ns_pde, "early_lock_cancel",
424                              ns, &lprocfs_elc_fops);
425         } else {
426                 ldlm_add_var(&lock_vars[0], ns_pde, "ctime_age_limit",
427                              &ns->ns_ctime_age_limit, &ldlm_rw_uint_fops);
428                 ldlm_add_var(&lock_vars[0], ns_pde, "lock_timeouts",
429                              &ns->ns_timeouts, &ldlm_uint_fops);
430                 ldlm_add_var(&lock_vars[0], ns_pde, "max_nolock_bytes",
431                              &ns->ns_max_nolock_size, &ldlm_rw_uint_fops);
432                 ldlm_add_var(&lock_vars[0], ns_pde, "contention_seconds",
433                              &ns->ns_contention_time, &ldlm_rw_uint_fops);
434                 ldlm_add_var(&lock_vars[0], ns_pde, "contended_locks",
435                              &ns->ns_contended_locks, &ldlm_rw_uint_fops);
436                 ldlm_add_var(&lock_vars[0], ns_pde, "max_parallel_ast",
437                              &ns->ns_max_parallel_ast, &ldlm_rw_uint_fops);
438         }
439         return 0;
440 }
441 #undef MAX_STRING_SIZE
442 #else /* CONFIG_PROC_FS */
443
444 #define ldlm_namespace_proc_unregister(ns)      ({;})
445 #define ldlm_namespace_proc_register(ns)        ({0;})
446
447 #endif /* CONFIG_PROC_FS */
448
449 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
450                                   const void *key, unsigned mask)
451 {
452         const struct ldlm_res_id     *id  = key;
453         unsigned                val = 0;
454         unsigned                i;
455
456         for (i = 0; i < RES_NAME_SIZE; i++)
457                 val += id->name[i];
458         return val & mask;
459 }
460
461 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
462                                       const void *key, unsigned mask)
463 {
464         const struct ldlm_res_id *id = key;
465         struct lu_fid       fid;
466         __u32               hash;
467         __u32               val;
468
469         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
470         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
471         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
472
473         hash = fid_flatten32(&fid);
474         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
475         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
476                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
477                 hash += (val >> 5) + (val << 11);
478         } else {
479                 val = fid_oid(&fid);
480         }
481         hash = hash_long(hash, hs->hs_bkt_bits);
482         /* give me another random factor */
483         hash -= hash_long((unsigned long)hs, val % 11 + 3);
484
485         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
486         hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
487
488         return hash & mask;
489 }
490
491 static void *ldlm_res_hop_key(struct hlist_node *hnode)
492 {
493         struct ldlm_resource   *res;
494
495         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
496         return &res->lr_name;
497 }
498
499 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
500 {
501         struct ldlm_resource   *res;
502
503         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
504         return ldlm_res_eq((const struct ldlm_res_id *)key,
505                            (const struct ldlm_res_id *)&res->lr_name);
506 }
507
508 static void *ldlm_res_hop_object(struct hlist_node *hnode)
509 {
510         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
511 }
512
513 static void
514 ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
515 {
516         struct ldlm_resource *res;
517
518         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
519         ldlm_resource_getref(res);
520 }
521
522 static void
523 ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
524 {
525         struct ldlm_resource *res;
526
527         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
528         /* cfs_hash_for_each_nolock is the only chance we call it */
529         ldlm_resource_putref_locked(res);
530 }
531
532 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
533 {
534         struct ldlm_resource *res;
535
536         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
537         ldlm_resource_putref(res);
538 }
539
540 static struct cfs_hash_ops ldlm_ns_hash_ops = {
541         .hs_hash        = ldlm_res_hop_hash,
542         .hs_key         = ldlm_res_hop_key,
543         .hs_keycmp      = ldlm_res_hop_keycmp,
544         .hs_keycpy      = NULL,
545         .hs_object      = ldlm_res_hop_object,
546         .hs_get         = ldlm_res_hop_get_locked,
547         .hs_put_locked  = ldlm_res_hop_put_locked,
548         .hs_put         = ldlm_res_hop_put
549 };
550
551 static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
552         .hs_hash        = ldlm_res_hop_fid_hash,
553         .hs_key         = ldlm_res_hop_key,
554         .hs_keycmp      = ldlm_res_hop_keycmp,
555         .hs_keycpy      = NULL,
556         .hs_object      = ldlm_res_hop_object,
557         .hs_get         = ldlm_res_hop_get_locked,
558         .hs_put_locked  = ldlm_res_hop_put_locked,
559         .hs_put         = ldlm_res_hop_put
560 };
561
562 typedef struct {
563         ldlm_ns_type_t  nsd_type;
564         /** hash bucket bits */
565         unsigned        nsd_bkt_bits;
566         /** hash bits */
567         unsigned        nsd_all_bits;
568         /** hash operations */
569         struct cfs_hash_ops *nsd_hops;
570 } ldlm_ns_hash_def_t;
571
572 static ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
573 {
574         {
575                 .nsd_type       = LDLM_NS_TYPE_MDC,
576                 .nsd_bkt_bits   = 11,
577                 .nsd_all_bits   = 16,
578                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
579         },
580         {
581                 .nsd_type       = LDLM_NS_TYPE_MDT,
582                 .nsd_bkt_bits   = 14,
583                 .nsd_all_bits   = 21,
584                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
585         },
586         {
587                 .nsd_type       = LDLM_NS_TYPE_OSC,
588                 .nsd_bkt_bits   = 8,
589                 .nsd_all_bits   = 12,
590                 .nsd_hops       = &ldlm_ns_hash_ops,
591         },
592         {
593                 .nsd_type       = LDLM_NS_TYPE_OST,
594                 .nsd_bkt_bits   = 11,
595                 .nsd_all_bits   = 17,
596                 .nsd_hops       = &ldlm_ns_hash_ops,
597         },
598         {
599                 .nsd_type       = LDLM_NS_TYPE_MGC,
600                 .nsd_bkt_bits   = 4,
601                 .nsd_all_bits   = 4,
602                 .nsd_hops       = &ldlm_ns_hash_ops,
603         },
604         {
605                 .nsd_type       = LDLM_NS_TYPE_MGT,
606                 .nsd_bkt_bits   = 4,
607                 .nsd_all_bits   = 4,
608                 .nsd_hops       = &ldlm_ns_hash_ops,
609         },
610         {
611                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
612         },
613 };
614
615 /**
616  * Create and initialize new empty namespace.
617  */
618 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
619                                           ldlm_side_t client,
620                                           ldlm_appetite_t apt,
621                                           ldlm_ns_type_t ns_type)
622 {
623         struct ldlm_namespace *ns = NULL;
624         struct ldlm_ns_bucket *nsb;
625         ldlm_ns_hash_def_t    *nsd;
626         struct cfs_hash_bd          bd;
627         int                    idx;
628         int                    rc;
629         ENTRY;
630
631         LASSERT(obd != NULL);
632
633         rc = ldlm_get_ref();
634         if (rc) {
635                 CERROR("ldlm_get_ref failed: %d\n", rc);
636                 RETURN(NULL);
637         }
638
639         for (idx = 0;;idx++) {
640                 nsd = &ldlm_ns_hash_defs[idx];
641                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
642                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
643                         GOTO(out_ref, NULL);
644                 }
645
646                 if (nsd->nsd_type == ns_type)
647                         break;
648         }
649
650         OBD_ALLOC_PTR(ns);
651         if (!ns)
652                 GOTO(out_ref, NULL);
653
654         ns->ns_rs_hash = cfs_hash_create(name,
655                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
656                                          nsd->nsd_bkt_bits, sizeof(*nsb),
657                                          CFS_HASH_MIN_THETA,
658                                          CFS_HASH_MAX_THETA,
659                                          nsd->nsd_hops,
660                                          CFS_HASH_DEPTH |
661                                          CFS_HASH_BIGNAME |
662                                          CFS_HASH_SPIN_BKTLOCK |
663                                          CFS_HASH_NO_ITEMREF);
664         if (ns->ns_rs_hash == NULL)
665                 GOTO(out_ns, NULL);
666
667         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
668                 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
669                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
670                 nsb->nsb_namespace = ns;
671                 nsb->nsb_reclaim_start = 0;
672         }
673
674         ns->ns_obd      = obd;
675         ns->ns_appetite = apt;
676         ns->ns_client   = client;
677
678         INIT_LIST_HEAD(&ns->ns_list_chain);
679         INIT_LIST_HEAD(&ns->ns_unused_list);
680         spin_lock_init(&ns->ns_lock);
681         atomic_set(&ns->ns_bref, 0);
682         init_waitqueue_head(&ns->ns_waitq);
683
684         ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
685         ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
686         ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
687
688         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
689         ns->ns_nr_unused          = 0;
690         ns->ns_max_unused         = LDLM_DEFAULT_LRU_SIZE;
691         ns->ns_max_age            = LDLM_DEFAULT_MAX_ALIVE;
692         ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
693         ns->ns_timeouts           = 0;
694         ns->ns_orig_connect_flags = 0;
695         ns->ns_connect_flags      = 0;
696         ns->ns_stopping           = 0;
697         ns->ns_reclaim_start      = 0;
698         rc = ldlm_namespace_proc_register(ns);
699         if (rc != 0) {
700                 CERROR("Can't initialize ns proc, rc %d\n", rc);
701                 GOTO(out_hash, rc);
702         }
703
704         idx = ldlm_namespace_nr_read(client);
705         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
706         if (rc) {
707                 CERROR("Can't initialize lock pool, rc %d\n", rc);
708                 GOTO(out_proc, rc);
709         }
710
711         ldlm_namespace_register(ns, client);
712         RETURN(ns);
713 out_proc:
714         ldlm_namespace_proc_unregister(ns);
715         ldlm_namespace_cleanup(ns, 0);
716 out_hash:
717         cfs_hash_putref(ns->ns_rs_hash);
718 out_ns:
719         OBD_FREE_PTR(ns);
720 out_ref:
721         ldlm_put_ref();
722         RETURN(NULL);
723 }
724 EXPORT_SYMBOL(ldlm_namespace_new);
725
726 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
727
728 /**
729  * Cancel and destroy all locks on a resource.
730  *
731  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
732  * clean up.  This is currently only used for recovery, and we make
733  * certain assumptions as a result--notably, that we shouldn't cancel
734  * locks with refs.
735  */
736 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
737                              __u64 flags)
738 {
739         struct list_head *tmp;
740         int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
741         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
742
743         do {
744                 struct ldlm_lock *lock = NULL;
745
746                 /* First, we look for non-cleaned-yet lock
747                  * all cleaned locks are marked by CLEANED flag. */
748                 lock_res(res);
749                 list_for_each(tmp, q) {
750                         lock = list_entry(tmp, struct ldlm_lock,
751                                           l_res_link);
752                         if (ldlm_is_cleaned(lock)) {
753                                 lock = NULL;
754                                 continue;
755                         }
756                         LDLM_LOCK_GET(lock);
757                         ldlm_set_cleaned(lock);
758                         break;
759                 }
760
761                 if (lock == NULL) {
762                         unlock_res(res);
763                         break;
764                 }
765
766                 /* Set CBPENDING so nothing in the cancellation path
767                  * can match this lock. */
768                 ldlm_set_cbpending(lock);
769                 ldlm_set_failed(lock);
770                 lock->l_flags |= flags;
771
772                 /* ... without sending a CANCEL message for local_only. */
773                 if (local_only)
774                         ldlm_set_local_only(lock);
775
776                 if (local_only && (lock->l_readers || lock->l_writers)) {
777                         /* This is a little bit gross, but much better than the
778                          * alternative: pretend that we got a blocking AST from
779                          * the server, so that when the lock is decref'd, it
780                          * will go away ... */
781                         unlock_res(res);
782                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
783                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
784                                 set_current_state(TASK_UNINTERRUPTIBLE);
785                                 schedule_timeout(cfs_time_seconds(4));
786                                 set_current_state(TASK_RUNNING);
787                         }
788                         if (lock->l_completion_ast)
789                                 lock->l_completion_ast(lock,
790                                                        LDLM_FL_FAILED, NULL);
791                         LDLM_LOCK_RELEASE(lock);
792                         continue;
793                 }
794
795                 if (client) {
796                         struct lustre_handle lockh;
797
798                         unlock_res(res);
799                         ldlm_lock2handle(lock, &lockh);
800                         rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
801                         if (rc)
802                                 CERROR("ldlm_cli_cancel: %d\n", rc);
803                 } else {
804                         unlock_res(res);
805                         LDLM_DEBUG(lock, "Freeing a lock still held by a "
806                                    "client node");
807                         ldlm_lock_cancel(lock);
808                 }
809                 LDLM_LOCK_RELEASE(lock);
810         } while (1);
811 }
812
813 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
814                                struct hlist_node *hnode, void *arg)
815 {
816         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
817         __u64 flags = *(__u64 *)arg;
818
819         cleanup_resource(res, &res->lr_granted, flags);
820         cleanup_resource(res, &res->lr_converting, flags);
821         cleanup_resource(res, &res->lr_waiting, flags);
822
823         return 0;
824 }
825
826 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
827                                   struct hlist_node *hnode, void *arg)
828 {
829         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
830
831         lock_res(res);
832         CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
833                "(%d) after lock cleanup; forcing cleanup.\n",
834                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
835                atomic_read(&res->lr_refcount) - 1);
836
837         ldlm_resource_dump(D_ERROR, res);
838         unlock_res(res);
839         return 0;
840 }
841
842 /**
843  * Cancel and destroy all locks in the namespace.
844  *
845  * Typically used during evictions when server notified client that it was
846  * evicted and all of its state needs to be destroyed.
847  * Also used during shutdown.
848  */
849 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
850 {
851         if (ns == NULL) {
852                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
853                 return ELDLM_OK;
854         }
855
856         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
857                                  &flags, 0);
858         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
859                                  NULL, 0);
860         return ELDLM_OK;
861 }
862 EXPORT_SYMBOL(ldlm_namespace_cleanup);
863
864 /**
865  * Attempts to free namespace.
866  *
867  * Only used when namespace goes away, like during an unmount.
868  */
869 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
870 {
871         ENTRY;
872
873         /* At shutdown time, don't call the cancellation callback */
874         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
875
876         if (atomic_read(&ns->ns_bref) > 0) {
877                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
878                 int rc;
879                 CDEBUG(D_DLMTRACE,
880                        "dlm namespace %s free waiting on refcount %d\n",
881                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
882 force_wait:
883                 if (force)
884                         lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
885                                           MSEC_PER_SEC) / 4, NULL, NULL);
886
887                 rc = l_wait_event(ns->ns_waitq,
888                                   atomic_read(&ns->ns_bref) == 0, &lwi);
889
890                 /* Forced cleanups should be able to reclaim all references,
891                  * so it's safe to wait forever... we can't leak locks... */
892                 if (force && rc == -ETIMEDOUT) {
893                         LCONSOLE_ERROR("Forced cleanup waiting for %s "
894                                        "namespace with %d resources in use, "
895                                        "(rc=%d)\n", ldlm_ns_name(ns),
896                                        atomic_read(&ns->ns_bref), rc);
897                         GOTO(force_wait, rc);
898                 }
899
900                 if (atomic_read(&ns->ns_bref)) {
901                         LCONSOLE_ERROR("Cleanup waiting for %s namespace "
902                                        "with %d resources in use, (rc=%d)\n",
903                                        ldlm_ns_name(ns),
904                                        atomic_read(&ns->ns_bref), rc);
905                         RETURN(ELDLM_NAMESPACE_EXISTS);
906                 }
907                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
908                        ldlm_ns_name(ns));
909         }
910
911         RETURN(ELDLM_OK);
912 }
913
914 /**
915  * Performs various cleanups for passed \a ns to make it drop refc and be
916  * ready for freeing. Waits for refc == 0.
917  *
918  * The following is done:
919  * (0) Unregister \a ns from its list to make inaccessible for potential
920  * users like pools thread and others;
921  * (1) Clear all locks in \a ns.
922  */
923 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
924                                struct obd_import *imp,
925                                int force)
926 {
927         int rc;
928         ENTRY;
929         if (!ns) {
930                 EXIT;
931                 return;
932         }
933
934         spin_lock(&ns->ns_lock);
935         ns->ns_stopping = 1;
936         spin_unlock(&ns->ns_lock);
937
938         /*
939          * Can fail with -EINTR when force == 0 in which case try harder.
940          */
941         rc = __ldlm_namespace_free(ns, force);
942         if (rc != ELDLM_OK) {
943                 if (imp) {
944                         ptlrpc_disconnect_import(imp, 0);
945                         ptlrpc_invalidate_import(imp);
946                 }
947
948                 /*
949                  * With all requests dropped and the import inactive
950                  * we are gaurenteed all reference will be dropped.
951                  */
952                 rc = __ldlm_namespace_free(ns, 1);
953                 LASSERT(rc == 0);
954         }
955         EXIT;
956 }
957 EXPORT_SYMBOL(ldlm_namespace_free_prior);
958
959 /**
960  * Performs freeing memory structures related to \a ns. This is only done
961  * when ldlm_namespce_free_prior() successfully removed all resources
962  * referencing \a ns and its refc == 0.
963  */
964 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
965 {
966         ENTRY;
967         if (!ns) {
968                 EXIT;
969                 return;
970         }
971
972         /* Make sure that nobody can find this ns in its list. */
973         ldlm_namespace_unregister(ns, ns->ns_client);
974         /* Fini pool _before_ parent proc dir is removed. This is important as
975          * ldlm_pool_fini() removes own proc dir which is child to @dir.
976          * Removing it after @dir may cause oops. */
977         ldlm_pool_fini(&ns->ns_pool);
978
979         ldlm_namespace_proc_unregister(ns);
980         cfs_hash_putref(ns->ns_rs_hash);
981         /* Namespace \a ns should be not on list at this time, otherwise
982          * this will cause issues related to using freed \a ns in poold
983          * thread. */
984         LASSERT(list_empty(&ns->ns_list_chain));
985         OBD_FREE_PTR(ns);
986         ldlm_put_ref();
987         EXIT;
988 }
989 EXPORT_SYMBOL(ldlm_namespace_free_post);
990
991 /**
992  * Cleanup the resource, and free namespace.
993  * bug 12864:
994  * Deadlock issue:
995  * proc1: destroy import
996  *        class_disconnect_export(grab cl_sem) ->
997  *              -> ldlm_namespace_free ->
998  *              -> lprocfs_remove(grab _lprocfs_lock).
999  * proc2: read proc info
1000  *        lprocfs_fops_read(grab _lprocfs_lock) ->
1001  *              -> osc_rd_active, etc(grab cl_sem).
1002  *
1003  * So that I have to split the ldlm_namespace_free into two parts - the first
1004  * part ldlm_namespace_free_prior is used to cleanup the resource which is
1005  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
1006  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
1007  * held.
1008  */
1009 void ldlm_namespace_free(struct ldlm_namespace *ns,
1010                          struct obd_import *imp,
1011                          int force)
1012 {
1013         ldlm_namespace_free_prior(ns, imp, force);
1014         ldlm_namespace_free_post(ns);
1015 }
1016 EXPORT_SYMBOL(ldlm_namespace_free);
1017
1018 void ldlm_namespace_get(struct ldlm_namespace *ns)
1019 {
1020         atomic_inc(&ns->ns_bref);
1021 }
1022
1023 /* This is only for callers that care about refcount */
1024 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1025 {
1026         return atomic_inc_return(&ns->ns_bref);
1027 }
1028
1029 void ldlm_namespace_put(struct ldlm_namespace *ns)
1030 {
1031         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1032                 wake_up(&ns->ns_waitq);
1033                 spin_unlock(&ns->ns_lock);
1034         }
1035 }
1036
1037 /** Register \a ns in the list of namespaces */
1038 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
1039 {
1040         mutex_lock(ldlm_namespace_lock(client));
1041         LASSERT(list_empty(&ns->ns_list_chain));
1042         list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
1043         ldlm_namespace_nr_inc(client);
1044         mutex_unlock(ldlm_namespace_lock(client));
1045 }
1046
1047 /** Unregister \a ns from the list of namespaces. */
1048 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
1049 {
1050         mutex_lock(ldlm_namespace_lock(client));
1051         LASSERT(!list_empty(&ns->ns_list_chain));
1052         /* Some asserts and possibly other parts of the code are still
1053          * using list_empty(&ns->ns_list_chain). This is why it is
1054          * important to use list_del_init() here. */
1055         list_del_init(&ns->ns_list_chain);
1056         ldlm_namespace_nr_dec(client);
1057         mutex_unlock(ldlm_namespace_lock(client));
1058 }
1059
1060 /** Should be called with ldlm_namespace_lock(client) taken. */
1061 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1062                                        ldlm_side_t client)
1063 {
1064         LASSERT(!list_empty(&ns->ns_list_chain));
1065         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1066         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1067 }
1068
1069 /** Should be called with ldlm_namespace_lock(client) taken. */
1070 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1071                                          ldlm_side_t client)
1072 {
1073         LASSERT(!list_empty(&ns->ns_list_chain));
1074         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1075         list_move_tail(&ns->ns_list_chain,
1076                        ldlm_namespace_inactive_list(client));
1077 }
1078
1079 /** Should be called with ldlm_namespace_lock(client) taken. */
1080 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
1081 {
1082         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1083         LASSERT(!list_empty(ldlm_namespace_list(client)));
1084         return container_of(ldlm_namespace_list(client)->next,
1085                             struct ldlm_namespace, ns_list_chain);
1086 }
1087
1088 /** Create and initialize new resource. */
1089 static struct ldlm_resource *ldlm_resource_new(ldlm_type_t type)
1090 {
1091         struct ldlm_resource *res;
1092         int idx;
1093
1094         OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1095         if (res == NULL)
1096                 return NULL;
1097
1098         if (type == LDLM_EXTENT) {
1099                 OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
1100                                sizeof(*res->lr_itree) * LCK_MODE_NUM);
1101                 if (res->lr_itree == NULL) {
1102                         OBD_SLAB_FREE_PTR(res, ldlm_resource_slab);
1103                         return NULL;
1104                 }
1105                 /* Initialize interval trees for each lock mode. */
1106                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1107                         res->lr_itree[idx].lit_size = 0;
1108                         res->lr_itree[idx].lit_mode = 1 << idx;
1109                         res->lr_itree[idx].lit_root = NULL;
1110                 }
1111         }
1112
1113         INIT_LIST_HEAD(&res->lr_granted);
1114         INIT_LIST_HEAD(&res->lr_converting);
1115         INIT_LIST_HEAD(&res->lr_waiting);
1116
1117         atomic_set(&res->lr_refcount, 1);
1118         spin_lock_init(&res->lr_lock);
1119         lu_ref_init(&res->lr_reference);
1120
1121         /* Since LVB init can be delayed now, there is no longer need to
1122          * immediatelly acquire mutex here. */
1123         mutex_init(&res->lr_lvb_mutex);
1124         res->lr_lvb_initialized = false;
1125
1126         return res;
1127 }
1128
1129 /**
1130  * Return a reference to resource with given name, creating it if necessary.
1131  * Args: namespace with ns_lock unlocked
1132  * Locks: takes and releases NS hash-lock and res->lr_lock
1133  * Returns: referenced, unlocked ldlm_resource or NULL
1134  */
1135 struct ldlm_resource *
1136 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1137                   const struct ldlm_res_id *name, ldlm_type_t type, int create)
1138 {
1139         struct hlist_node       *hnode;
1140         struct ldlm_resource    *res = NULL;
1141         struct cfs_hash_bd              bd;
1142         __u64                   version;
1143         int                     ns_refcount = 0;
1144
1145         LASSERT(ns != NULL);
1146         LASSERT(parent == NULL);
1147         LASSERT(ns->ns_rs_hash != NULL);
1148         LASSERT(name->name[0] != 0);
1149
1150         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1151         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1152         if (hnode != NULL) {
1153                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1154                 GOTO(found, res);
1155         }
1156
1157         version = cfs_hash_bd_version_get(&bd);
1158         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1159
1160         if (create == 0)
1161                 return ERR_PTR(-ENOENT);
1162
1163         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1164                  "type: %d\n", type);
1165         res = ldlm_resource_new(type);
1166         if (res == NULL)
1167                 return ERR_PTR(-ENOMEM);
1168
1169         res->lr_ns_bucket  = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1170         res->lr_name       = *name;
1171         res->lr_type       = type;
1172
1173         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1174         hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1175                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1176
1177         if (hnode != NULL) {
1178                 /* Someone won the race and already added the resource. */
1179                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1180                 /* Clean lu_ref for failed resource. */
1181                 lu_ref_fini(&res->lr_reference);
1182                 if (res->lr_itree != NULL)
1183                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1184                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1185                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1186 found:
1187                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1188                 return res;
1189         }
1190         /* We won! Let's add the resource. */
1191         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1192         if (cfs_hash_bd_count_get(&bd) == 1)
1193                 ns_refcount = ldlm_namespace_get_return(ns);
1194
1195         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1196
1197         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1198
1199         /* Let's see if we happened to be the very first resource in this
1200          * namespace. If so, and this is a client namespace, we need to move
1201          * the namespace into the active namespaces list to be patrolled by
1202          * the ldlm_poold. */
1203         if (ns_is_client(ns) && ns_refcount == 1) {
1204                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1205                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1206                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1207         }
1208
1209         return res;
1210 }
1211 EXPORT_SYMBOL(ldlm_resource_get);
1212
1213 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1214 {
1215         LASSERT(res != NULL);
1216         LASSERT(res != LP_POISON);
1217         atomic_inc(&res->lr_refcount);
1218         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1219                atomic_read(&res->lr_refcount));
1220         return res;
1221 }
1222
1223 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1224                                          struct ldlm_resource *res)
1225 {
1226         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1227
1228         if (!list_empty(&res->lr_granted)) {
1229                 ldlm_resource_dump(D_ERROR, res);
1230                 LBUG();
1231         }
1232
1233         if (!list_empty(&res->lr_converting)) {
1234                 ldlm_resource_dump(D_ERROR, res);
1235                 LBUG();
1236         }
1237
1238         if (!list_empty(&res->lr_waiting)) {
1239                 ldlm_resource_dump(D_ERROR, res);
1240                 LBUG();
1241         }
1242
1243         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1244                                bd, &res->lr_hash);
1245         lu_ref_fini(&res->lr_reference);
1246         if (cfs_hash_bd_count_get(bd) == 0)
1247                 ldlm_namespace_put(nsb->nsb_namespace);
1248 }
1249
1250 /* Returns 1 if the resource was freed, 0 if it remains. */
1251 int ldlm_resource_putref(struct ldlm_resource *res)
1252 {
1253         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1254         struct cfs_hash_bd   bd;
1255
1256         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1257         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1258                res, atomic_read(&res->lr_refcount) - 1);
1259
1260         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1261         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1262                 __ldlm_resource_putref_final(&bd, res);
1263                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1264                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1265                         ns->ns_lvbo->lvbo_free(res);
1266                 if (res->lr_itree != NULL)
1267                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1268                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1269                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1270                 return 1;
1271         }
1272         return 0;
1273 }
1274 EXPORT_SYMBOL(ldlm_resource_putref);
1275
1276 /* Returns 1 if the resource was freed, 0 if it remains. */
1277 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1278 {
1279         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1280
1281         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1282         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1283                res, atomic_read(&res->lr_refcount) - 1);
1284
1285         if (atomic_dec_and_test(&res->lr_refcount)) {
1286                 struct cfs_hash_bd bd;
1287
1288                 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1289                                 &res->lr_name, &bd);
1290                 __ldlm_resource_putref_final(&bd, res);
1291                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1292                 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1293                  * so we should never be here while calling cfs_hash_del,
1294                  * cfs_hash_for_each_nolock is the only case we can get
1295                  * here, which is safe to release cfs_hash_bd_lock.
1296                  */
1297                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1298                         ns->ns_lvbo->lvbo_free(res);
1299                 if (res->lr_itree != NULL)
1300                         OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab,
1301                                       sizeof(*res->lr_itree) * LCK_MODE_NUM);
1302                 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1303
1304                 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1305                 return 1;
1306         }
1307         return 0;
1308 }
1309
1310 /**
1311  * Add a lock into a given resource into specified lock list.
1312  */
1313 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1314                             struct ldlm_lock *lock)
1315 {
1316         check_res_locked(res);
1317
1318         LDLM_DEBUG(lock, "About to add this lock:\n");
1319
1320         if (ldlm_is_destroyed(lock)) {
1321                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1322                 return;
1323         }
1324
1325         LASSERT(list_empty(&lock->l_res_link));
1326
1327         list_add_tail(&lock->l_res_link, head);
1328 }
1329
1330 /**
1331  * Insert a lock into resource after specified lock.
1332  *
1333  * Obtain resource description from the lock we are inserting after.
1334  */
1335 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1336                                      struct ldlm_lock *new)
1337 {
1338         struct ldlm_resource *res = original->l_resource;
1339
1340         check_res_locked(res);
1341
1342         ldlm_resource_dump(D_INFO, res);
1343         LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1344
1345         if (ldlm_is_destroyed(new)) {
1346                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1347                 goto out;
1348         }
1349
1350         LASSERT(list_empty(&new->l_res_link));
1351
1352         list_add(&new->l_res_link, &original->l_res_link);
1353  out:;
1354 }
1355
1356 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1357 {
1358         int type = lock->l_resource->lr_type;
1359
1360         check_res_locked(lock->l_resource);
1361         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1362                 ldlm_unlink_lock_skiplist(lock);
1363         else if (type == LDLM_EXTENT)
1364                 ldlm_extent_unlink_lock(lock);
1365         list_del_init(&lock->l_res_link);
1366 }
1367 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1368
1369 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1370 {
1371         desc->lr_type = res->lr_type;
1372         desc->lr_name = res->lr_name;
1373 }
1374
1375 /**
1376  * Print information about all locks in all namespaces on this node to debug
1377  * log.
1378  */
1379 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1380 {
1381         struct list_head *tmp;
1382
1383         if (!((libcfs_debug | D_ERROR) & level))
1384                 return;
1385
1386         mutex_lock(ldlm_namespace_lock(client));
1387
1388         list_for_each(tmp, ldlm_namespace_list(client)) {
1389                 struct ldlm_namespace *ns;
1390                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1391                 ldlm_namespace_dump(level, ns);
1392         }
1393
1394         mutex_unlock(ldlm_namespace_lock(client));
1395 }
1396
1397 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1398                               struct hlist_node *hnode, void *arg)
1399 {
1400         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1401         int    level = (int)(unsigned long)arg;
1402
1403         lock_res(res);
1404         ldlm_resource_dump(level, res);
1405         unlock_res(res);
1406
1407         return 0;
1408 }
1409
1410 /**
1411  * Print information about all locks in this namespace on this node to debug
1412  * log.
1413  */
1414 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1415 {
1416         if (!((libcfs_debug | D_ERROR) & level))
1417                 return;
1418
1419         CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1420                ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1421                ns_is_client(ns) ? "client" : "server");
1422
1423         if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1424                 return;
1425
1426         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1427                                  ldlm_res_hash_dump,
1428                                  (void *)(unsigned long)level, 0);
1429         spin_lock(&ns->ns_lock);
1430         ns->ns_next_dump = cfs_time_shift(10);
1431         spin_unlock(&ns->ns_lock);
1432 }
1433
1434 /**
1435  * Print information about all locks in this resource to debug log.
1436  */
1437 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1438 {
1439         struct ldlm_lock *lock;
1440         unsigned int granted = 0;
1441
1442         CLASSERT(RES_NAME_SIZE == 4);
1443
1444         if (!((libcfs_debug | D_ERROR) & level))
1445                 return;
1446
1447         CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1448                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1449
1450         if (!list_empty(&res->lr_granted)) {
1451                 CDEBUG(level, "Granted locks (in reverse order):\n");
1452                 list_for_each_entry_reverse(lock, &res->lr_granted,
1453                                                 l_res_link) {
1454                         LDLM_DEBUG_LIMIT(level, lock, "###");
1455                         if (!(level & D_CANTMASK) &&
1456                             ++granted > ldlm_dump_granted_max) {
1457                                 CDEBUG(level, "only dump %d granted locks to "
1458                                        "avoid DDOS.\n", granted);
1459                                 break;
1460                         }
1461                 }
1462         }
1463         if (!list_empty(&res->lr_converting)) {
1464                 CDEBUG(level, "Converting locks:\n");
1465                 list_for_each_entry(lock, &res->lr_converting, l_res_link)
1466                         LDLM_DEBUG_LIMIT(level, lock, "###");
1467         }
1468         if (!list_empty(&res->lr_waiting)) {
1469                 CDEBUG(level, "Waiting locks:\n");
1470                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1471                         LDLM_DEBUG_LIMIT(level, lock, "###");
1472         }
1473 }
1474 EXPORT_SYMBOL(ldlm_resource_dump);