4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_resource.c
38 * Author: Phil Schwan <phil@clusterfs.com>
39 * Author: Peter Braam <braam@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_LDLM
44 # include <lustre_dlm.h>
46 # include <liblustre.h>
49 #include <lustre_fid.h>
50 #include <obd_class.h>
51 #include "ldlm_internal.h"
53 cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
55 cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
56 cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);
58 struct mutex ldlm_srv_namespace_lock;
59 CFS_LIST_HEAD(ldlm_srv_namespace_list);
61 struct mutex ldlm_cli_namespace_lock;
62 CFS_LIST_HEAD(ldlm_cli_namespace_list);
64 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
65 cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
66 cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
68 extern unsigned int ldlm_cancel_unused_locks_before_replay;
70 /* during debug dump certain amount of granted locks for one resource to avoid
72 unsigned int ldlm_dump_granted_max = 256;
75 static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
76 unsigned long count, void *data)
78 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
83 int ldlm_proc_setup(void)
86 struct lprocfs_vars list[] = {
87 { "dump_namespaces", NULL, ldlm_proc_dump_ns, NULL },
89 lprocfs_rd_uint, lprocfs_wr_uint,
90 &ldlm_dump_granted_max, NULL },
91 { "cancel_unused_locks_before_replay",
92 lprocfs_rd_uint, lprocfs_wr_uint,
93 &ldlm_cancel_unused_locks_before_replay, NULL },
96 LASSERT(ldlm_ns_proc_dir == NULL);
98 ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
101 if (IS_ERR(ldlm_type_proc_dir)) {
102 CERROR("LProcFS failed in ldlm-init\n");
103 rc = PTR_ERR(ldlm_type_proc_dir);
107 ldlm_ns_proc_dir = lprocfs_register("namespaces",
110 if (IS_ERR(ldlm_ns_proc_dir)) {
111 CERROR("LProcFS failed in ldlm-init\n");
112 rc = PTR_ERR(ldlm_ns_proc_dir);
116 ldlm_svc_proc_dir = lprocfs_register("services",
119 if (IS_ERR(ldlm_svc_proc_dir)) {
120 CERROR("LProcFS failed in ldlm-init\n");
121 rc = PTR_ERR(ldlm_svc_proc_dir);
125 rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
130 lprocfs_remove(&ldlm_ns_proc_dir);
132 lprocfs_remove(&ldlm_type_proc_dir);
134 ldlm_svc_proc_dir = NULL;
138 void ldlm_proc_cleanup(void)
140 if (ldlm_svc_proc_dir)
141 lprocfs_remove(&ldlm_svc_proc_dir);
143 if (ldlm_ns_proc_dir)
144 lprocfs_remove(&ldlm_ns_proc_dir);
146 if (ldlm_type_proc_dir)
147 lprocfs_remove(&ldlm_type_proc_dir);
150 static int lprocfs_rd_ns_resources(char *page, char **start, off_t off,
151 int count, int *eof, void *data)
153 struct ldlm_namespace *ns = data;
158 /* result is not strictly consistant */
159 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
160 res += cfs_hash_bd_count_get(&bd);
161 return lprocfs_rd_u64(page, start, off, count, eof, &res);
164 static int lprocfs_rd_ns_locks(char *page, char **start, off_t off,
165 int count, int *eof, void *data)
167 struct ldlm_namespace *ns = data;
170 locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
171 LPROCFS_FIELDS_FLAGS_SUM);
172 return lprocfs_rd_u64(page, start, off, count, eof, &locks);
175 static int lprocfs_rd_lru_size(char *page, char **start, off_t off,
176 int count, int *eof, void *data)
178 struct ldlm_namespace *ns = data;
179 __u32 *nr = &ns->ns_max_unused;
181 if (ns_connect_lru_resize(ns))
182 nr = &ns->ns_nr_unused;
183 return lprocfs_rd_uint(page, start, off, count, eof, nr);
186 static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
187 unsigned long count, void *data)
189 struct ldlm_namespace *ns = data;
190 char dummy[MAX_STRING_SIZE + 1], *end;
194 dummy[MAX_STRING_SIZE] = '\0';
195 if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
198 if (strncmp(dummy, "clear", 5) == 0) {
200 "dropping all unused locks from namespace %s\n",
202 if (ns_connect_lru_resize(ns)) {
203 int canceled, unused = ns->ns_nr_unused;
205 /* Try to cancel all @ns_nr_unused locks. */
206 canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC,
208 if (canceled < unused) {
210 "not all requested locks are canceled, "
211 "requested: %d, canceled: %d\n", unused,
216 tmp = ns->ns_max_unused;
217 ns->ns_max_unused = 0;
218 ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
219 ns->ns_max_unused = tmp;
224 tmp = simple_strtoul(dummy, &end, 0);
226 CERROR("invalid value written\n");
229 lru_resize = (tmp == 0);
231 if (ns_connect_lru_resize(ns)) {
233 ns->ns_max_unused = (unsigned int)tmp;
235 if (tmp > ns->ns_nr_unused)
236 tmp = ns->ns_nr_unused;
237 tmp = ns->ns_nr_unused - tmp;
240 "changing namespace %s unused locks from %u to %u\n",
241 ldlm_ns_name(ns), ns->ns_nr_unused,
243 ldlm_cancel_lru(ns, tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
247 "disable lru_resize for namespace %s\n",
249 ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
253 "changing namespace %s max_unused from %u to %u\n",
254 ldlm_ns_name(ns), ns->ns_max_unused,
256 ns->ns_max_unused = (unsigned int)tmp;
257 ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
259 /* Make sure that originally lru resize was supported before
260 * turning it on here. */
262 (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
264 "enable lru_resize for namespace %s\n",
266 ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
273 static int lprocfs_rd_elc(char *page, char **start, off_t off,
274 int count, int *eof, void *data)
276 struct ldlm_namespace *ns = data;
277 unsigned int supp = ns_connect_cancelset(ns);
279 return lprocfs_rd_uint(page, start, off, count, eof, &supp);
282 static int lprocfs_wr_elc(struct file *file, const char *buffer,
283 unsigned long count, void *data)
285 struct ldlm_namespace *ns = data;
286 unsigned int supp = -1;
289 rc = lprocfs_wr_uint(file, buffer, count, &supp);
294 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
295 else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
296 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
300 void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
302 struct proc_dir_entry *dir;
304 dir = lprocfs_srch(ldlm_ns_proc_dir, ldlm_ns_name(ns));
306 CERROR("dlm namespace %s has no procfs dir?\n",
309 lprocfs_remove(&dir);
312 if (ns->ns_stats != NULL)
313 lprocfs_free_stats(&ns->ns_stats);
316 int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
318 struct lprocfs_vars lock_vars[2];
319 char lock_name[MAX_STRING_SIZE + 1];
322 LASSERT(ns->ns_rs_hash != NULL);
324 ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
325 if (ns->ns_stats == NULL)
328 lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
329 LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
331 lock_name[MAX_STRING_SIZE] = '\0';
333 memset(lock_vars, 0, sizeof(lock_vars));
334 lock_vars[0].name = lock_name;
336 snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count",
338 lock_vars[0].data = ns;
339 lock_vars[0].read_fptr = lprocfs_rd_ns_resources;
340 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
342 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count",
344 lock_vars[0].data = ns;
345 lock_vars[0].read_fptr = lprocfs_rd_ns_locks;
346 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
348 if (ns_is_client(ns)) {
349 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
351 lock_vars[0].data = &ns->ns_nr_unused;
352 lock_vars[0].read_fptr = lprocfs_rd_uint;
353 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
355 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_size",
357 lock_vars[0].data = ns;
358 lock_vars[0].read_fptr = lprocfs_rd_lru_size;
359 lock_vars[0].write_fptr = lprocfs_wr_lru_size;
360 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
362 snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_max_age",
364 lock_vars[0].data = &ns->ns_max_age;
365 lock_vars[0].read_fptr = lprocfs_rd_uint;
366 lock_vars[0].write_fptr = lprocfs_wr_uint;
367 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
369 snprintf(lock_name, MAX_STRING_SIZE, "%s/early_lock_cancel",
371 lock_vars[0].data = ns;
372 lock_vars[0].read_fptr = lprocfs_rd_elc;
373 lock_vars[0].write_fptr = lprocfs_wr_elc;
374 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
376 snprintf(lock_name, MAX_STRING_SIZE, "%s/ctime_age_limit",
378 lock_vars[0].data = &ns->ns_ctime_age_limit;
379 lock_vars[0].read_fptr = lprocfs_rd_uint;
380 lock_vars[0].write_fptr = lprocfs_wr_uint;
381 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
383 snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_timeouts",
385 lock_vars[0].data = &ns->ns_timeouts;
386 lock_vars[0].read_fptr = lprocfs_rd_uint;
387 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
389 snprintf(lock_name, MAX_STRING_SIZE, "%s/max_nolock_bytes",
391 lock_vars[0].data = &ns->ns_max_nolock_size;
392 lock_vars[0].read_fptr = lprocfs_rd_uint;
393 lock_vars[0].write_fptr = lprocfs_wr_uint;
394 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
396 snprintf(lock_name, MAX_STRING_SIZE, "%s/contention_seconds",
398 lock_vars[0].data = &ns->ns_contention_time;
399 lock_vars[0].read_fptr = lprocfs_rd_uint;
400 lock_vars[0].write_fptr = lprocfs_wr_uint;
401 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
403 snprintf(lock_name, MAX_STRING_SIZE, "%s/contended_locks",
405 lock_vars[0].data = &ns->ns_contended_locks;
406 lock_vars[0].read_fptr = lprocfs_rd_uint;
407 lock_vars[0].write_fptr = lprocfs_wr_uint;
408 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
410 snprintf(lock_name, MAX_STRING_SIZE, "%s/max_parallel_ast",
412 lock_vars[0].data = &ns->ns_max_parallel_ast;
413 lock_vars[0].read_fptr = lprocfs_rd_uint;
414 lock_vars[0].write_fptr = lprocfs_wr_uint;
415 lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
419 #undef MAX_STRING_SIZE
422 #define ldlm_namespace_proc_unregister(ns) ({;})
423 #define ldlm_namespace_proc_register(ns) ({0;})
427 static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
428 const void *key, unsigned mask)
430 const struct ldlm_res_id *id = key;
434 for (i = 0; i < RES_NAME_SIZE; i++)
439 static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
440 const void *key, unsigned mask)
442 const struct ldlm_res_id *id = key;
447 fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
448 fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
449 fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
451 hash = fid_flatten32(&fid);
452 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
453 if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
454 val = id->name[LUSTRE_RES_ID_HSH_OFF];
455 hash += (val >> 5) + (val << 11);
459 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
460 /* give me another random factor */
461 hash -= cfs_hash_long((unsigned long)hs, val % 11 + 3);
463 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
464 hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
469 static void *ldlm_res_hop_key(cfs_hlist_node_t *hnode)
471 struct ldlm_resource *res;
473 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
474 return &res->lr_name;
477 static int ldlm_res_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
479 struct ldlm_resource *res;
481 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
482 return ldlm_res_eq((const struct ldlm_res_id *)key,
483 (const struct ldlm_res_id *)&res->lr_name);
486 static void *ldlm_res_hop_object(cfs_hlist_node_t *hnode)
488 return cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
491 static void ldlm_res_hop_get_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
493 struct ldlm_resource *res;
495 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
496 ldlm_resource_getref(res);
499 static void ldlm_res_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
501 struct ldlm_resource *res;
503 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
504 /* cfs_hash_for_each_nolock is the only chance we call it */
505 ldlm_resource_putref_locked(res);
508 static void ldlm_res_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
510 struct ldlm_resource *res;
512 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
513 ldlm_resource_putref(res);
516 cfs_hash_ops_t ldlm_ns_hash_ops = {
517 .hs_hash = ldlm_res_hop_hash,
518 .hs_key = ldlm_res_hop_key,
519 .hs_keycmp = ldlm_res_hop_keycmp,
521 .hs_object = ldlm_res_hop_object,
522 .hs_get = ldlm_res_hop_get_locked,
523 .hs_put_locked = ldlm_res_hop_put_locked,
524 .hs_put = ldlm_res_hop_put
527 cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
528 .hs_hash = ldlm_res_hop_fid_hash,
529 .hs_key = ldlm_res_hop_key,
530 .hs_keycmp = ldlm_res_hop_keycmp,
532 .hs_object = ldlm_res_hop_object,
533 .hs_get = ldlm_res_hop_get_locked,
534 .hs_put_locked = ldlm_res_hop_put_locked,
535 .hs_put = ldlm_res_hop_put
539 ldlm_ns_type_t nsd_type;
540 /** hash bucket bits */
541 unsigned nsd_bkt_bits;
543 unsigned nsd_all_bits;
544 /** hash operations */
545 cfs_hash_ops_t *nsd_hops;
546 } ldlm_ns_hash_def_t;
548 ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
551 .nsd_type = LDLM_NS_TYPE_MDC,
554 .nsd_hops = &ldlm_ns_fid_hash_ops,
557 .nsd_type = LDLM_NS_TYPE_MDT,
560 .nsd_hops = &ldlm_ns_fid_hash_ops,
563 .nsd_type = LDLM_NS_TYPE_OSC,
566 .nsd_hops = &ldlm_ns_hash_ops,
569 .nsd_type = LDLM_NS_TYPE_OST,
572 .nsd_hops = &ldlm_ns_hash_ops,
575 .nsd_type = LDLM_NS_TYPE_MGC,
578 .nsd_hops = &ldlm_ns_hash_ops,
581 .nsd_type = LDLM_NS_TYPE_MGT,
584 .nsd_hops = &ldlm_ns_hash_ops,
587 .nsd_type = LDLM_NS_TYPE_UNKNOWN,
591 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
594 ldlm_ns_type_t ns_type)
596 struct ldlm_namespace *ns = NULL;
597 struct ldlm_ns_bucket *nsb;
598 ldlm_ns_hash_def_t *nsd;
604 LASSERT(obd != NULL);
608 CERROR("ldlm_get_ref failed: %d\n", rc);
612 for (idx = 0;;idx++) {
613 nsd = &ldlm_ns_hash_defs[idx];
614 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
615 CERROR("Unknown type %d for ns %s\n", ns_type, name);
619 if (nsd->nsd_type == ns_type)
627 ns->ns_rs_hash = cfs_hash_create(name,
628 nsd->nsd_all_bits, nsd->nsd_all_bits,
629 nsd->nsd_bkt_bits, sizeof(*nsb),
635 CFS_HASH_SPIN_BKTLOCK |
636 CFS_HASH_NO_ITEMREF);
637 if (ns->ns_rs_hash == NULL)
640 cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
641 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
642 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
643 nsb->nsb_namespace = ns;
647 ns->ns_appetite = apt;
648 ns->ns_client = client;
650 CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
651 CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
652 spin_lock_init(&ns->ns_lock);
653 cfs_atomic_set(&ns->ns_bref, 0);
654 cfs_waitq_init(&ns->ns_waitq);
656 ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
657 ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
658 ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
660 ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
661 ns->ns_nr_unused = 0;
662 ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
663 ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
664 ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
666 ns->ns_orig_connect_flags = 0;
667 ns->ns_connect_flags = 0;
669 rc = ldlm_namespace_proc_register(ns);
671 CERROR("Can't initialize ns proc, rc %d\n", rc);
675 idx = cfs_atomic_read(ldlm_namespace_nr(client));
676 rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
678 CERROR("Can't initialize lock pool, rc %d\n", rc);
682 ldlm_namespace_register(ns, client);
685 ldlm_namespace_proc_unregister(ns);
686 ldlm_namespace_cleanup(ns, 0);
688 cfs_hash_putref(ns->ns_rs_hash);
695 EXPORT_SYMBOL(ldlm_namespace_new);
697 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
699 /* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup.
700 * This is currently only used for recovery, and we make certain assumptions
701 * as a result--notably, that we shouldn't cancel locks with refs. -phil */
702 static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
706 int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
707 bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
710 struct ldlm_lock *lock = NULL;
712 /* first, we look for non-cleaned-yet lock
713 * all cleaned locks are marked by CLEANED flag */
715 cfs_list_for_each(tmp, q) {
716 lock = cfs_list_entry(tmp, struct ldlm_lock,
718 if (lock->l_flags & LDLM_FL_CLEANED) {
723 lock->l_flags |= LDLM_FL_CLEANED;
732 /* Set CBPENDING so nothing in the cancellation path
733 * can match this lock */
734 lock->l_flags |= LDLM_FL_CBPENDING;
735 lock->l_flags |= LDLM_FL_FAILED;
736 lock->l_flags |= flags;
738 /* ... without sending a CANCEL message for local_only. */
740 lock->l_flags |= LDLM_FL_LOCAL_ONLY;
742 if (local_only && (lock->l_readers || lock->l_writers)) {
743 /* This is a little bit gross, but much better than the
744 * alternative: pretend that we got a blocking AST from
745 * the server, so that when the lock is decref'd, it
746 * will go away ... */
748 LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
749 if (lock->l_completion_ast)
750 lock->l_completion_ast(lock, 0, NULL);
751 LDLM_LOCK_RELEASE(lock);
756 struct lustre_handle lockh;
759 ldlm_lock2handle(lock, &lockh);
760 rc = ldlm_cli_cancel(&lockh);
762 CERROR("ldlm_cli_cancel: %d\n", rc);
764 ldlm_resource_unlink_lock(lock);
766 LDLM_DEBUG(lock, "Freeing a lock still held by a "
768 ldlm_lock_destroy(lock);
770 LDLM_LOCK_RELEASE(lock);
774 static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
775 cfs_hlist_node_t *hnode, void *arg)
777 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
778 __u64 flags = *(__u64 *)arg;
780 cleanup_resource(res, &res->lr_granted, flags);
781 cleanup_resource(res, &res->lr_converting, flags);
782 cleanup_resource(res, &res->lr_waiting, flags);
787 static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
788 cfs_hlist_node_t *hnode, void *arg)
790 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
792 CERROR("Namespace %s resource refcount nonzero "
793 "(%d) after lock cleanup; forcing "
795 ldlm_ns_name(ldlm_res_to_ns(res)),
796 cfs_atomic_read(&res->lr_refcount) - 1);
798 CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
799 LPU64") (rc: %d)\n", res,
800 res->lr_name.name[0], res->lr_name.name[1],
801 res->lr_name.name[2], res->lr_name.name[3],
802 cfs_atomic_read(&res->lr_refcount) - 1);
806 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
809 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
813 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
814 cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
817 EXPORT_SYMBOL(ldlm_namespace_cleanup);
819 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
823 /* At shutdown time, don't call the cancellation callback */
824 ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
826 if (cfs_atomic_read(&ns->ns_bref) > 0) {
827 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
830 "dlm namespace %s free waiting on refcount %d\n",
831 ldlm_ns_name(ns), cfs_atomic_read(&ns->ns_bref));
834 lwi = LWI_TIMEOUT(obd_timeout * CFS_HZ / 4, NULL, NULL);
836 rc = l_wait_event(ns->ns_waitq,
837 cfs_atomic_read(&ns->ns_bref) == 0, &lwi);
839 /* Forced cleanups should be able to reclaim all references,
840 * so it's safe to wait forever... we can't leak locks... */
841 if (force && rc == -ETIMEDOUT) {
842 LCONSOLE_ERROR("Forced cleanup waiting for %s "
843 "namespace with %d resources in use, "
844 "(rc=%d)\n", ldlm_ns_name(ns),
845 cfs_atomic_read(&ns->ns_bref), rc);
846 GOTO(force_wait, rc);
849 if (cfs_atomic_read(&ns->ns_bref)) {
850 LCONSOLE_ERROR("Cleanup waiting for %s namespace "
851 "with %d resources in use, (rc=%d)\n",
853 cfs_atomic_read(&ns->ns_bref), rc);
854 RETURN(ELDLM_NAMESPACE_EXISTS);
856 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
864 * Performs various cleanups for passed \a ns to make it drop refc and be ready
865 * for freeing. Waits for refc == 0.
867 * The following is done:
868 * (0) Unregister \a ns from its list to make inaccessible for potential users
869 * like pools thread and others;
870 * (1) Clear all locks in \a ns.
872 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
873 struct obd_import *imp,
883 spin_lock(&ns->ns_lock);
885 spin_unlock(&ns->ns_lock);
888 * Can fail with -EINTR when force == 0 in which case try harder.
890 rc = __ldlm_namespace_free(ns, force);
891 if (rc != ELDLM_OK) {
893 ptlrpc_disconnect_import(imp, 0);
894 ptlrpc_invalidate_import(imp);
898 * With all requests dropped and the import inactive
899 * we are gaurenteed all reference will be dropped.
901 rc = __ldlm_namespace_free(ns, 1);
908 * Performs freeing memory structures related to \a ns. This is only done when
909 * ldlm_namespce_free_prior() successfully removed all resources referencing
910 * \a ns and its refc == 0.
912 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
922 * Make sure that nobody can find this ns in its list.
924 ldlm_namespace_unregister(ns, ns->ns_client);
926 * Fini pool _before_ parent proc dir is removed. This is important as
927 * ldlm_pool_fini() removes own proc dir which is child to @dir. Removing
928 * it after @dir may cause oops.
930 ldlm_pool_fini(&ns->ns_pool);
932 ldlm_namespace_proc_unregister(ns);
933 cfs_hash_putref(ns->ns_rs_hash);
935 * Namespace \a ns should be not on list in this time, otherwise this
936 * will cause issues realted to using freed \a ns in pools thread.
938 LASSERT(cfs_list_empty(&ns->ns_list_chain));
945 /* Cleanup the resource, and free namespace.
948 * proc1: destroy import
949 * class_disconnect_export(grab cl_sem) ->
950 * -> ldlm_namespace_free ->
951 * -> lprocfs_remove(grab _lprocfs_lock).
952 * proc2: read proc info
953 * lprocfs_fops_read(grab _lprocfs_lock) ->
954 * -> osc_rd_active, etc(grab cl_sem).
956 * So that I have to split the ldlm_namespace_free into two parts - the first
957 * part ldlm_namespace_free_prior is used to cleanup the resource which is
958 * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
959 * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
962 void ldlm_namespace_free(struct ldlm_namespace *ns,
963 struct obd_import *imp,
966 ldlm_namespace_free_prior(ns, imp, force);
967 ldlm_namespace_free_post(ns);
969 EXPORT_SYMBOL(ldlm_namespace_free);
971 void ldlm_namespace_get(struct ldlm_namespace *ns)
973 cfs_atomic_inc(&ns->ns_bref);
975 EXPORT_SYMBOL(ldlm_namespace_get);
977 void ldlm_namespace_put(struct ldlm_namespace *ns)
979 if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
980 cfs_waitq_signal(&ns->ns_waitq);
981 spin_unlock(&ns->ns_lock);
984 EXPORT_SYMBOL(ldlm_namespace_put);
986 /* Register @ns in the list of namespaces */
987 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
989 mutex_lock(ldlm_namespace_lock(client));
990 LASSERT(cfs_list_empty(&ns->ns_list_chain));
991 cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
992 cfs_atomic_inc(ldlm_namespace_nr(client));
993 mutex_unlock(ldlm_namespace_lock(client));
996 /* Unregister @ns from the list of namespaces */
997 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
999 mutex_lock(ldlm_namespace_lock(client));
1000 LASSERT(!cfs_list_empty(&ns->ns_list_chain));
1002 * Some asserts and possibly other parts of code still using
1003 * list_empty(&ns->ns_list_chain). This is why it is important
1004 * to use list_del_init() here.
1006 cfs_list_del_init(&ns->ns_list_chain);
1007 cfs_atomic_dec(ldlm_namespace_nr(client));
1008 mutex_unlock(ldlm_namespace_lock(client));
1011 /* Should be called under ldlm_namespace_lock(client) taken */
1012 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
1014 LASSERT(!cfs_list_empty(&ns->ns_list_chain));
1015 LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
1016 cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1019 /* Should be called under ldlm_namespace_lock(client) taken */
1020 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
1022 LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
1023 LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
1024 return container_of(ldlm_namespace_list(client)->next,
1025 struct ldlm_namespace, ns_list_chain);
1028 static struct ldlm_resource *ldlm_resource_new(void)
1030 struct ldlm_resource *res;
1033 OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, CFS_ALLOC_IO);
1037 CFS_INIT_LIST_HEAD(&res->lr_granted);
1038 CFS_INIT_LIST_HEAD(&res->lr_converting);
1039 CFS_INIT_LIST_HEAD(&res->lr_waiting);
1041 /* initialize interval trees for each lock mode*/
1042 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1043 res->lr_itree[idx].lit_size = 0;
1044 res->lr_itree[idx].lit_mode = 1 << idx;
1045 res->lr_itree[idx].lit_root = NULL;
1048 cfs_atomic_set(&res->lr_refcount, 1);
1049 spin_lock_init(&res->lr_lock);
1050 lu_ref_init(&res->lr_reference);
1052 /* one who creates the resource must unlock
1053 * the mutex after lvb initialization */
1054 mutex_init(&res->lr_lvb_mutex);
1055 mutex_lock(&res->lr_lvb_mutex);
1060 /* Args: unlocked namespace
1061 * * Locks: takes and releases NS hash-lock and res->lr_lock
1062 * * Returns: referenced, unlocked ldlm_resource or NULL */
1063 struct ldlm_resource *
1064 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1065 const struct ldlm_res_id *name, ldlm_type_t type, int create)
1067 cfs_hlist_node_t *hnode;
1068 struct ldlm_resource *res;
1072 LASSERT(ns != NULL);
1073 LASSERT(parent == NULL);
1074 LASSERT(ns->ns_rs_hash != NULL);
1075 LASSERT(name->name[0] != 0);
1077 cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1078 hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1079 if (hnode != NULL) {
1080 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1081 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
1082 /* synchronize WRT resource creation */
1083 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1084 mutex_lock(&res->lr_lvb_mutex);
1085 mutex_unlock(&res->lr_lvb_mutex);
1090 version = cfs_hash_bd_version_get(&bd);
1091 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1096 LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1097 "type: %d\n", type);
1098 res = ldlm_resource_new();
1102 res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1103 res->lr_name = *name;
1104 res->lr_type = type;
1105 res->lr_most_restr = LCK_NL;
1107 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1108 hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
1109 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1111 if (hnode != NULL) {
1112 /* someone won the race and added the resource before */
1113 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1114 /* clean lu_ref for failed resource */
1115 lu_ref_fini(&res->lr_reference);
1116 /* We have taken lr_lvb_mutex. Drop it. */
1117 mutex_unlock(&res->lr_lvb_mutex);
1118 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1120 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
1121 /* synchronize WRT resource creation */
1122 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1123 mutex_lock(&res->lr_lvb_mutex);
1124 mutex_unlock(&res->lr_lvb_mutex);
1128 /* we won! let's add the resource */
1129 cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1130 if (cfs_hash_bd_count_get(&bd) == 1)
1131 ldlm_namespace_get(ns);
1133 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1134 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1137 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1138 rc = ns->ns_lvbo->lvbo_init(res);
1140 CERROR("lvbo_init failed for resource "
1141 LPU64": rc %d\n", name->name[0], rc);
1144 /* we create resource with locked lr_lvb_mutex */
1145 mutex_unlock(&res->lr_lvb_mutex);
1149 EXPORT_SYMBOL(ldlm_resource_get);
1151 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1153 LASSERT(res != NULL);
1154 LASSERT(res != LP_POISON);
1155 cfs_atomic_inc(&res->lr_refcount);
1156 CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1157 cfs_atomic_read(&res->lr_refcount));
1161 static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
1162 struct ldlm_resource *res)
1164 struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1166 if (!cfs_list_empty(&res->lr_granted)) {
1167 ldlm_resource_dump(D_ERROR, res);
1171 if (!cfs_list_empty(&res->lr_converting)) {
1172 ldlm_resource_dump(D_ERROR, res);
1176 if (!cfs_list_empty(&res->lr_waiting)) {
1177 ldlm_resource_dump(D_ERROR, res);
1181 cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1183 lu_ref_fini(&res->lr_reference);
1184 if (cfs_hash_bd_count_get(bd) == 0)
1185 ldlm_namespace_put(nsb->nsb_namespace);
1188 /* Returns 1 if the resource was freed, 0 if it remains. */
1189 int ldlm_resource_putref(struct ldlm_resource *res)
1191 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1194 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1195 CDEBUG(D_INFO, "putref res: %p count: %d\n",
1196 res, cfs_atomic_read(&res->lr_refcount) - 1);
1198 cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1199 if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1200 __ldlm_resource_putref_final(&bd, res);
1201 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1202 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1203 ns->ns_lvbo->lvbo_free(res);
1204 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1209 EXPORT_SYMBOL(ldlm_resource_putref);
1211 /* Returns 1 if the resource was freed, 0 if it remains. */
1212 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1214 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1216 LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1217 CDEBUG(D_INFO, "putref res: %p count: %d\n",
1218 res, cfs_atomic_read(&res->lr_refcount) - 1);
1220 if (cfs_atomic_dec_and_test(&res->lr_refcount)) {
1223 cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1224 &res->lr_name, &bd);
1225 __ldlm_resource_putref_final(&bd, res);
1226 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1227 /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1228 * so we should never be here while calling cfs_hash_del,
1229 * cfs_hash_for_each_nolock is the only case we can get
1230 * here, which is safe to release cfs_hash_bd_lock.
1232 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1233 ns->ns_lvbo->lvbo_free(res);
1234 OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
1236 cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1242 void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
1243 struct ldlm_lock *lock)
1245 check_res_locked(res);
1247 LDLM_DEBUG(lock, "About to add this lock:\n");
1249 if (lock->l_destroyed) {
1250 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1254 LASSERT(cfs_list_empty(&lock->l_res_link));
1256 cfs_list_add_tail(&lock->l_res_link, head);
1259 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1260 struct ldlm_lock *new)
1262 struct ldlm_resource *res = original->l_resource;
1264 check_res_locked(res);
1266 ldlm_resource_dump(D_INFO, res);
1267 LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1269 if (new->l_destroyed) {
1270 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1274 LASSERT(cfs_list_empty(&new->l_res_link));
1276 cfs_list_add(&new->l_res_link, &original->l_res_link);
1280 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1282 int type = lock->l_resource->lr_type;
1284 check_res_locked(lock->l_resource);
1285 if (type == LDLM_IBITS || type == LDLM_PLAIN)
1286 ldlm_unlink_lock_skiplist(lock);
1287 else if (type == LDLM_EXTENT)
1288 ldlm_extent_unlink_lock(lock);
1289 cfs_list_del_init(&lock->l_res_link);
1291 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1293 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1295 desc->lr_type = res->lr_type;
1296 desc->lr_name = res->lr_name;
1299 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1303 if (!((libcfs_debug | D_ERROR) & level))
1306 mutex_lock(ldlm_namespace_lock(client));
1308 cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
1309 struct ldlm_namespace *ns;
1310 ns = cfs_list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1311 ldlm_namespace_dump(level, ns);
1314 mutex_unlock(ldlm_namespace_lock(client));
1316 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1318 static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1319 cfs_hlist_node_t *hnode, void *arg)
1321 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1322 int level = (int)(unsigned long)arg;
1325 ldlm_resource_dump(level, res);
1331 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1333 if (!((libcfs_debug | D_ERROR) & level))
1336 CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1337 ldlm_ns_name(ns), cfs_atomic_read(&ns->ns_bref),
1338 ns_is_client(ns) ? "client" : "server");
1340 if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
1343 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1345 (void *)(unsigned long)level);
1346 spin_lock(&ns->ns_lock);
1347 ns->ns_next_dump = cfs_time_shift(10);
1348 spin_unlock(&ns->ns_lock);
1350 EXPORT_SYMBOL(ldlm_namespace_dump);
1352 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1354 struct ldlm_lock *lock;
1355 unsigned int granted = 0;
1357 CLASSERT(RES_NAME_SIZE == 4);
1359 if (!((libcfs_debug | D_ERROR) & level))
1362 CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
1363 ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
1364 res->lr_name.name[2], res->lr_name.name[3],
1365 cfs_atomic_read(&res->lr_refcount));
1367 if (!cfs_list_empty(&res->lr_granted)) {
1368 CDEBUG(level, "Granted locks (in reverse order):\n");
1369 cfs_list_for_each_entry_reverse(lock, &res->lr_granted,
1371 LDLM_DEBUG_LIMIT(level, lock, "###");
1372 if (!(level & D_CANTMASK) &&
1373 ++granted > ldlm_dump_granted_max) {
1374 CDEBUG(level, "only dump %d granted locks to "
1375 "avoid DDOS.\n", granted);
1380 if (!cfs_list_empty(&res->lr_converting)) {
1381 CDEBUG(level, "Converting locks:\n");
1382 cfs_list_for_each_entry(lock, &res->lr_converting, l_res_link)
1383 LDLM_DEBUG_LIMIT(level, lock, "###");
1385 if (!cfs_list_empty(&res->lr_waiting)) {
1386 CDEBUG(level, "Waiting locks:\n");
1387 cfs_list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1388 LDLM_DEBUG_LIMIT(level, lock, "###");