1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <libcfs/libcfs.h>
31 # include <linux/lustre_intent.h>
33 # include <liblustre.h>
34 # include <libcfs/kp30.h>
37 #include <obd_class.h>
38 #include "ldlm_internal.h"
40 //struct lustre_lock ldlm_everything_lock;
43 char *ldlm_lockname[] = {
54 char *ldlm_typename[] = {
61 char *ldlm_it2str(int it)
68 case (IT_OPEN | IT_CREAT):
81 CERROR("Unknown intent %d\n", it);
86 extern cfs_mem_cache_t *ldlm_lock_slab;
88 static ldlm_processing_policy ldlm_processing_policy_table[] = {
89 [LDLM_PLAIN] ldlm_process_plain_lock,
90 [LDLM_EXTENT] ldlm_process_extent_lock,
92 [LDLM_FLOCK] ldlm_process_flock_lock,
94 [LDLM_IBITS] ldlm_process_inodebits_lock,
97 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
99 return ldlm_processing_policy_table[res->lr_type];
102 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
108 * REFCOUNTED LOCK OBJECTS
113 * Lock refcounts, during creation:
114 * - one special one for allocation, dec'd only once in destroy
115 * - one for being a lock that's in-use
116 * - one for the addref associated with a new lock
118 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
120 atomic_inc(&lock->l_refc);
124 void ldlm_lock_put(struct ldlm_lock *lock)
128 LASSERT(lock->l_resource != LP_POISON);
129 LASSERT(atomic_read(&lock->l_refc) > 0);
130 if (atomic_dec_and_test(&lock->l_refc)) {
131 struct ldlm_resource *res;
134 "final lock_put on destroyed lock, freeing it.\n");
136 lock_res_and_lock(lock);
137 res = lock->l_resource;
138 LASSERT(lock->l_destroyed);
139 LASSERT(list_empty(&lock->l_res_link));
142 LDLM_LOCK_PUT(lock->l_parent);
143 unlock_res_and_lock(lock);
145 atomic_dec(&res->lr_namespace->ns_locks);
146 ldlm_resource_putref(res);
147 lock->l_resource = NULL;
149 class_export_put(lock->l_export);
151 if (lock->l_lvb_data != NULL)
152 OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
154 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
160 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
163 if (!list_empty(&lock->l_lru)) {
164 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
165 list_del_init(&lock->l_lru);
166 lock->l_resource->lr_namespace->ns_nr_unused--;
167 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
173 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
177 spin_lock(&lock->l_resource->lr_namespace->ns_unused_lock);
178 rc = ldlm_lock_remove_from_lru_nolock(lock);
179 spin_unlock(&lock->l_resource->lr_namespace->ns_unused_lock);
184 /* This used to have a 'strict' flag, which recovery would use to mark an
185 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
186 * shall explain why it's gone: with the new hash table scheme, once you call
187 * ldlm_lock_destroy, you can never drop your final references on this lock.
188 * Because it's not in the hash table anymore. -phil */
189 int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
193 if (!list_empty(&lock->l_children)) {
194 LDLM_ERROR(lock, "still has children (%p)!",
195 lock->l_children.next);
196 ldlm_lock_dump(D_ERROR, lock, 0);
199 if (lock->l_readers || lock->l_writers) {
200 LDLM_ERROR(lock, "lock still has references");
201 ldlm_lock_dump(D_ERROR, lock, 0);
205 if (!list_empty(&lock->l_res_link)) {
206 LDLM_ERROR(lock, "lock still on resource");
207 ldlm_lock_dump(D_ERROR, lock, 0);
211 if (lock->l_destroyed) {
212 LASSERT(list_empty(&lock->l_lru));
216 lock->l_destroyed = 1;
219 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
220 list_del_init(&lock->l_export_chain);
222 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
224 ldlm_lock_remove_from_lru(lock);
225 class_handle_unhash(&lock->l_handle);
228 /* Wake anyone waiting for this lock */
229 /* FIXME: I should probably add yet another flag, instead of using
230 * l_export to only call this on clients */
232 class_export_put(lock->l_export);
233 lock->l_export = NULL;
234 if (lock->l_export && lock->l_completion_ast)
235 lock->l_completion_ast(lock, 0);
241 void ldlm_lock_destroy(struct ldlm_lock *lock)
245 lock_res_and_lock(lock);
246 first = ldlm_lock_destroy_internal(lock);
247 unlock_res_and_lock(lock);
249 /* drop reference from hashtable only for first destroy */
255 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
259 first = ldlm_lock_destroy_internal(lock);
260 /* drop reference from hashtable only for first destroy */
266 /* this is called by portals_handle2object with the handle lock taken */
267 static void lock_handle_addref(void *lock)
269 LDLM_LOCK_GET((struct ldlm_lock *)lock);
273 * usage: pass in a resource on which you have done ldlm_resource_get
274 * pass in a parent lock on which you have done a ldlm_lock_get
275 * after return, ldlm_*_put the resource and parent
276 * returns: lock with refcount 2 - one for current caller and one for remote
278 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
279 struct ldlm_resource *resource)
281 struct ldlm_lock *lock;
284 if (resource == NULL)
287 OBD_SLAB_ALLOC(lock, ldlm_lock_slab, CFS_ALLOC_IO, sizeof(*lock));
291 lock->l_resource = ldlm_resource_getref(resource);
293 atomic_set(&lock->l_refc, 2);
294 CFS_INIT_LIST_HEAD(&lock->l_children);
295 CFS_INIT_LIST_HEAD(&lock->l_res_link);
296 CFS_INIT_LIST_HEAD(&lock->l_lru);
297 CFS_INIT_LIST_HEAD(&lock->l_export_chain);
298 CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
299 CFS_INIT_LIST_HEAD(&lock->l_tmp);
300 CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
301 CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
302 cfs_waitq_init(&lock->l_waitq);
303 lock->l_blocking_lock = NULL;
306 atomic_inc(&resource->lr_namespace->ns_locks);
308 if (parent != NULL) {
309 spin_lock(&resource->lr_namespace->ns_hash_lock);
310 lock->l_parent = LDLM_LOCK_GET(parent);
311 list_add(&lock->l_childof, &parent->l_children);
312 spin_unlock(&resource->lr_namespace->ns_hash_lock);
315 CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
316 class_handle_hash(&lock->l_handle, lock_handle_addref);
321 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
322 struct ldlm_res_id new_resid)
324 struct ldlm_resource *oldres = lock->l_resource;
325 struct ldlm_resource *newres;
329 LASSERT(ns->ns_client != 0);
331 lock_res_and_lock(lock);
332 if (memcmp(&new_resid, &lock->l_resource->lr_name,
333 sizeof(lock->l_resource->lr_name)) == 0) {
335 unlock_res_and_lock(lock);
339 LASSERT(new_resid.name[0] != 0);
341 /* This function assumes that the lock isn't on any lists */
342 LASSERT(list_empty(&lock->l_res_link));
344 type = oldres->lr_type;
345 unlock_res_and_lock(lock);
347 newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
348 if (newres == NULL) {
353 lock_res_and_lock(lock);
354 LASSERT(memcmp(&new_resid, &lock->l_resource->lr_name,
355 sizeof(lock->l_resource->lr_name)) != 0);
357 lock->l_resource = newres;
360 unlock_bitlock(lock);
362 /* ...and the flowers are still standing! */
363 ldlm_resource_putref(oldres);
372 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
374 lockh->cookie = lock->l_handle.h_cookie;
377 /* if flags: atomically get the lock and set the flags.
378 * Return NULL if flag already set
381 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
384 struct ldlm_namespace *ns;
385 struct ldlm_lock *lock = NULL, *retval = NULL;
390 lock = class_handle2object(handle->cookie);
394 LASSERT(lock->l_resource != NULL);
395 ns = lock->l_resource->lr_namespace;
398 lock_res_and_lock(lock);
400 /* It's unlikely but possible that someone marked the lock as
401 * destroyed after we did handle2object on it */
402 if (lock->l_destroyed) {
403 unlock_res_and_lock(lock);
404 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
409 if (flags && (lock->l_flags & flags)) {
410 unlock_res_and_lock(lock);
416 lock->l_flags |= flags;
418 unlock_res_and_lock(lock);
425 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
426 const struct lustre_handle *handle)
428 struct ldlm_lock *retval = NULL;
429 retval = __ldlm_handle2lock(handle, 0);
433 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
435 struct obd_export *exp = lock->l_export?:lock->l_conn_export;
436 /* INODEBITS_INTEROP: If the other side does not support
437 * inodebits, reply with a plain lock descriptor.
439 if ((lock->l_resource->lr_type == LDLM_IBITS) &&
440 (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
441 struct ldlm_resource res = *lock->l_resource;
443 /* Make sure all the right bits are set in this lock we
444 are going to pass to client */
445 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
446 (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
447 "Inappropriate inode lock bits during "
448 "conversion " LPU64 "\n",
449 lock->l_policy_data.l_inodebits.bits);
450 res.lr_type = LDLM_PLAIN;
451 ldlm_res2desc(&res, &desc->l_resource);
452 /* Convert "new" lock mode to something old client can
454 if ((lock->l_req_mode == LCK_CR) ||
455 (lock->l_req_mode == LCK_CW))
456 desc->l_req_mode = LCK_PR;
458 desc->l_req_mode = lock->l_req_mode;
459 if ((lock->l_granted_mode == LCK_CR) ||
460 (lock->l_granted_mode == LCK_CW)) {
461 desc->l_granted_mode = LCK_PR;
463 /* We never grant PW/EX locks to clients */
464 LASSERT((lock->l_granted_mode != LCK_PW) &&
465 (lock->l_granted_mode != LCK_EX));
466 desc->l_granted_mode = lock->l_granted_mode;
469 /* We do not copy policy here, because there is no
470 policy for plain locks */
472 ldlm_res2desc(lock->l_resource, &desc->l_resource);
473 desc->l_req_mode = lock->l_req_mode;
474 desc->l_granted_mode = lock->l_granted_mode;
475 desc->l_policy_data = lock->l_policy_data;
479 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
480 struct list_head *work_list)
482 if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
483 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
484 lock->l_flags |= LDLM_FL_AST_SENT;
485 /* If the enqueuing client said so, tell the AST recipient to
486 * discard dirty data, rather than writing back. */
487 if (new->l_flags & LDLM_AST_DISCARD_DATA)
488 lock->l_flags |= LDLM_FL_DISCARD_DATA;
489 LASSERT(list_empty(&lock->l_bl_ast));
490 list_add(&lock->l_bl_ast, work_list);
492 LASSERT(lock->l_blocking_lock == NULL);
493 lock->l_blocking_lock = LDLM_LOCK_GET(new);
497 void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
499 if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
500 lock->l_flags |= LDLM_FL_CP_REQD;
501 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
502 LASSERT(list_empty(&lock->l_cp_ast));
503 list_add(&lock->l_cp_ast, work_list);
508 /* must be called with lr_lock held */
509 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
510 struct list_head *work_list)
513 check_res_locked(lock->l_resource);
515 ldlm_add_bl_work_item(lock, new, work_list);
517 ldlm_add_cp_work_item(lock, work_list);
521 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
523 struct ldlm_lock *lock;
525 lock = ldlm_handle2lock(lockh);
526 LASSERT(lock != NULL);
527 ldlm_lock_addref_internal(lock, mode);
531 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
533 ldlm_lock_remove_from_lru(lock);
534 if (mode & (LCK_NL | LCK_CR | LCK_PR))
536 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
538 lock->l_last_used = cfs_time_current();
540 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
543 /* only called for local locks */
544 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
546 lock_res_and_lock(lock);
547 ldlm_lock_addref_internal_nolock(lock, mode);
548 unlock_res_and_lock(lock);
551 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
553 struct ldlm_namespace *ns;
556 lock_res_and_lock(lock);
558 ns = lock->l_resource->lr_namespace;
560 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
561 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
562 LASSERT(lock->l_readers > 0);
565 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) {
566 LASSERT(lock->l_writers > 0);
570 if (lock->l_flags & LDLM_FL_LOCAL &&
571 !lock->l_readers && !lock->l_writers) {
572 /* If this is a local lock on a server namespace and this was
573 * the last reference, cancel the lock. */
574 CDEBUG(D_INFO, "forcing cancel of local lock\n");
575 lock->l_flags |= LDLM_FL_CBPENDING;
578 if (!lock->l_readers && !lock->l_writers &&
579 (lock->l_flags & LDLM_FL_CBPENDING)) {
580 /* If we received a blocked AST and this was the last reference,
581 * run the callback. */
582 if (ns->ns_client == LDLM_NAMESPACE_SERVER && lock->l_export)
583 CERROR("FL_CBPENDING set on non-local lock--just a "
586 LDLM_DEBUG(lock, "final decref done on cbpending lock");
588 LDLM_LOCK_GET(lock); /* dropped by bl thread */
589 ldlm_lock_remove_from_lru(lock);
590 unlock_res_and_lock(lock);
591 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
592 ldlm_bl_to_thread(ns, NULL, lock) != 0)
593 ldlm_handle_bl_callback(ns, NULL, lock);
594 } else if (ns->ns_client == LDLM_NAMESPACE_CLIENT &&
595 !lock->l_readers && !lock->l_writers &&
596 !(lock->l_flags & LDLM_FL_NO_LRU)) {
597 /* If this is a client-side namespace and this was the last
598 * reference, put it on the LRU. */
599 LASSERT(list_empty(&lock->l_lru));
600 LASSERT(ns->ns_nr_unused >= 0);
601 spin_lock(&ns->ns_unused_lock);
602 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
604 spin_unlock(&ns->ns_unused_lock);
605 unlock_res_and_lock(lock);
606 ldlm_cancel_lru(ns, LDLM_ASYNC);
608 unlock_res_and_lock(lock);
611 LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */
616 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
618 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
619 LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
620 ldlm_lock_decref_internal(lock, mode);
624 /* This will drop a lock reference and mark it for destruction, but will not
625 * necessarily cancel the lock before returning. */
626 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
628 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
631 LASSERT(lock != NULL);
633 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
634 lock_res_and_lock(lock);
635 lock->l_flags |= LDLM_FL_CBPENDING;
636 unlock_res_and_lock(lock);
637 ldlm_lock_decref_internal(lock, mode);
642 * - ldlm_lock_enqueue
643 * - ldlm_reprocess_queue
644 * - ldlm_lock_convert
646 * must be called with lr_lock held
648 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
650 struct ldlm_resource *res = lock->l_resource;
653 check_res_locked(res);
655 lock->l_granted_mode = lock->l_req_mode;
656 ldlm_resource_add_lock(res, &res->lr_granted, lock);
658 if (lock->l_granted_mode < res->lr_most_restr)
659 res->lr_most_restr = lock->l_granted_mode;
661 if (work_list && lock->l_completion_ast != NULL)
662 ldlm_add_ast_work_item(lock, NULL, work_list);
667 /* returns a referenced lock or NULL. See the flag descriptions below, in the
668 * comment above ldlm_lock_match */
669 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
670 ldlm_policy_data_t *policy,
671 struct ldlm_lock *old_lock, int flags)
673 struct ldlm_lock *lock;
674 struct list_head *tmp;
676 list_for_each(tmp, queue) {
677 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
679 if (lock == old_lock)
682 /* llite sometimes wants to match locks that will be
683 * canceled when their users drop, but we allow it to match
684 * if it passes in CBPENDING and the lock still has users.
685 * this is generally only going to be used by children
686 * whose parents already hold a lock so forward progress
687 * can still happen. */
688 if (lock->l_flags & LDLM_FL_CBPENDING &&
689 !(flags & LDLM_FL_CBPENDING))
691 if (lock->l_flags & LDLM_FL_CBPENDING &&
692 lock->l_readers == 0 && lock->l_writers == 0)
695 if (!(lock->l_req_mode & mode))
698 if (lock->l_resource->lr_type == LDLM_EXTENT &&
699 (lock->l_policy_data.l_extent.start >
700 policy->l_extent.start ||
701 lock->l_policy_data.l_extent.end < policy->l_extent.end))
704 if (unlikely(mode == LCK_GROUP) &&
705 lock->l_resource->lr_type == LDLM_EXTENT &&
706 lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
709 /* We match if we have existing lock with same or wider set
711 if (lock->l_resource->lr_type == LDLM_IBITS &&
712 ((lock->l_policy_data.l_inodebits.bits &
713 policy->l_inodebits.bits) !=
714 policy->l_inodebits.bits))
717 if (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED))
720 if ((flags & LDLM_FL_LOCAL_ONLY) &&
721 !(lock->l_flags & LDLM_FL_LOCAL))
724 if (flags & LDLM_FL_TEST_LOCK)
727 ldlm_lock_addref_internal_nolock(lock, mode);
734 void ldlm_lock_allow_match(struct ldlm_lock *lock)
736 lock_res_and_lock(lock);
737 lock->l_flags |= LDLM_FL_CAN_MATCH;
738 cfs_waitq_signal(&lock->l_waitq);
739 unlock_res_and_lock(lock);
742 /* Can be called in two ways:
744 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
745 * for a duplicate of.
747 * Otherwise, all of the fields must be filled in, to match against.
749 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
750 * server (ie, connh is NULL)
751 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
752 * list will be considered
753 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
754 * to be canceled can still be matched as long as they still have reader
755 * or writer refernces
756 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
757 * just tell us if we would have matched.
759 * Returns 1 if it finds an already-existing lock that is compatible; in this
760 * case, lockh is filled in with a addref()ed lock
762 * we also check security context, if that failed we simply return 0 (to keep
763 * caller code unchanged), the context failure will be discovered by caller
766 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
767 struct ldlm_res_id *res_id, ldlm_type_t type,
768 ldlm_policy_data_t *policy, ldlm_mode_t mode,
769 struct lustre_handle *lockh)
771 struct ldlm_resource *res;
772 struct ldlm_lock *lock, *old_lock = NULL;
777 old_lock = ldlm_handle2lock(lockh);
780 ns = old_lock->l_resource->lr_namespace;
781 res_id = &old_lock->l_resource->lr_name;
782 type = old_lock->l_resource->lr_type;
783 mode = old_lock->l_req_mode;
786 res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
788 LASSERT(old_lock == NULL);
794 lock = search_queue(&res->lr_granted, mode, policy, old_lock, flags);
797 if (flags & LDLM_FL_BLOCK_GRANTED)
799 lock = search_queue(&res->lr_converting, mode, policy, old_lock, flags);
802 lock = search_queue(&res->lr_waiting, mode, policy, old_lock, flags);
809 ldlm_resource_putref(res);
812 ldlm_lock2handle(lock, lockh);
813 if (!(lock->l_flags & LDLM_FL_CAN_MATCH)) {
814 struct l_wait_info lwi;
815 if (lock->l_completion_ast) {
816 int err = lock->l_completion_ast(lock,
817 LDLM_FL_WAIT_NOREPROC,
820 if (flags & LDLM_FL_TEST_LOCK)
823 ldlm_lock_decref_internal(lock, mode);
829 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout), NULL,
830 LWI_ON_SIGNAL_NOOP, NULL);
832 /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
833 l_wait_event(lock->l_waitq,
834 (lock->l_flags & LDLM_FL_CAN_MATCH), &lwi);
839 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
840 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
841 res_id->name[2] : policy->l_extent.start,
842 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
843 res_id->name[3] : policy->l_extent.end);
845 /* check user's security context */
846 if (lock->l_conn_export &&
847 sptlrpc_import_check_ctx(
848 class_exp2cliimp(lock->l_conn_export))) {
849 if (!(flags & LDLM_FL_TEST_LOCK))
850 ldlm_lock_decref_internal(lock, mode);
854 if (flags & LDLM_FL_TEST_LOCK)
856 } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
857 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
858 LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
859 type, mode, res_id->name[0], res_id->name[1],
860 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
861 res_id->name[2] :policy->l_extent.start,
862 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
863 res_id->name[3] : policy->l_extent.end);
866 LDLM_LOCK_PUT(old_lock);
871 /* Returns a referenced lock */
872 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
873 const struct lustre_handle *parent_lock_handle,
874 const struct ldlm_res_id res_id,
877 ldlm_blocking_callback blocking,
878 ldlm_completion_callback completion,
879 ldlm_glimpse_callback glimpse,
880 void *data, __u32 lvb_len)
882 struct ldlm_resource *res, *parent_res = NULL;
883 struct ldlm_lock *lock, *parent_lock = NULL;
886 if (parent_lock_handle) {
887 parent_lock = ldlm_handle2lock(parent_lock_handle);
889 parent_res = parent_lock->l_resource;
892 res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
896 lock = ldlm_lock_new(parent_lock, res);
897 ldlm_resource_putref(res);
898 if (parent_lock != NULL)
899 LDLM_LOCK_PUT(parent_lock);
904 lock->l_req_mode = mode;
905 lock->l_ast_data = data;
906 lock->l_blocking_ast = blocking;
907 lock->l_completion_ast = completion;
908 lock->l_glimpse_ast = glimpse;
909 lock->l_pid = cfs_curproc_pid();
912 lock->l_lvb_len = lvb_len;
913 OBD_ALLOC(lock->l_lvb_data, lvb_len);
914 if (lock->l_lvb_data == NULL) {
915 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
923 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
924 struct ldlm_lock **lockp,
925 void *cookie, int *flags)
927 struct ldlm_lock *lock = *lockp;
928 struct ldlm_resource *res = lock->l_resource;
929 int local = res->lr_namespace->ns_client;
930 ldlm_processing_policy policy;
931 ldlm_error_t rc = ELDLM_OK;
934 do_gettimeofday(&lock->l_enqueued_time);
935 /* policies are not executed on the client or during replay */
936 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
937 && !local && ns->ns_policy) {
938 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
940 if (rc == ELDLM_LOCK_REPLACED) {
941 /* The lock that was returned has already been granted,
942 * and placed into lockp. If it's not the same as the
943 * one we passed in, then destroy the old one and our
944 * work here is done. */
945 if (lock != *lockp) {
946 ldlm_lock_destroy(lock);
949 *flags |= LDLM_FL_LOCK_CHANGED;
951 } else if (rc != ELDLM_OK ||
952 (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
953 ldlm_lock_destroy(lock);
958 lock_res_and_lock(lock);
959 if (local && lock->l_req_mode == lock->l_granted_mode) {
960 /* The server returned a blocked lock, but it was granted before
961 * we got a chance to actually enqueue it. We don't need to do
963 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
964 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
968 /* Some flags from the enqueue want to make it into the AST, via the
970 lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
972 /* This distinction between local lock trees is very important; a client
973 * namespace only has information about locks taken by that client, and
974 * thus doesn't have enough information to decide for itself if it can
975 * be granted (below). In this case, we do exactly what the server
976 * tells us to do, as dictated by the 'flags'.
978 * We do exactly the same thing during recovery, when the server is
979 * more or less trusting the clients not to lie.
981 * FIXME (bug 268): Detect obvious lies by checking compatibility in
982 * granted/converting queues. */
983 ldlm_resource_unlink_lock(lock);
985 if (*flags & LDLM_FL_BLOCK_CONV)
986 ldlm_resource_add_lock(res, &res->lr_converting, lock);
987 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
988 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
990 ldlm_grant_lock(lock, NULL);
992 } else if (*flags & LDLM_FL_REPLAY) {
993 if (*flags & LDLM_FL_BLOCK_CONV) {
994 ldlm_resource_add_lock(res, &res->lr_converting, lock);
996 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
997 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
999 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1000 ldlm_grant_lock(lock, NULL);
1001 GOTO(out, ELDLM_OK);
1003 /* If no flags, fall through to normal enqueue path. */
1006 policy = ldlm_processing_policy_table[res->lr_type];
1007 policy(lock, flags, 1, &rc, NULL);
1010 unlock_res_and_lock(lock);
1014 /* Must be called with namespace taken: queue is waiting or converting. */
1015 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
1016 struct list_head *work_list)
1018 struct list_head *tmp, *pos;
1019 ldlm_processing_policy policy;
1021 int rc = LDLM_ITER_CONTINUE;
1025 check_res_locked(res);
1027 policy = ldlm_processing_policy_table[res->lr_type];
1030 list_for_each_safe(tmp, pos, queue) {
1031 struct ldlm_lock *pending;
1032 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
1034 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1037 rc = policy(pending, &flags, 0, &err, work_list);
1038 if (rc != LDLM_ITER_CONTINUE)
1045 int ldlm_run_bl_ast_work(struct list_head *rpc_list)
1047 struct list_head *tmp, *pos;
1048 struct ldlm_lock_desc d;
1049 int rc = 0, retval = 0;
1052 list_for_each_safe(tmp, pos, rpc_list) {
1053 struct ldlm_lock *lock =
1054 list_entry(tmp, struct ldlm_lock, l_bl_ast);
1056 /* nobody should touch l_bl_ast */
1057 lock_res_and_lock(lock);
1058 list_del_init(&lock->l_bl_ast);
1060 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1061 LASSERT(lock->l_bl_ast_run == 0);
1062 LASSERT(lock->l_blocking_lock);
1063 lock->l_bl_ast_run++;
1064 unlock_res_and_lock(lock);
1066 ldlm_lock2desc(lock->l_blocking_lock, &d);
1068 LDLM_LOCK_PUT(lock->l_blocking_lock);
1069 lock->l_blocking_lock = NULL;
1070 rc = lock->l_blocking_ast(lock, &d, NULL, LDLM_CB_BLOCKING);
1072 if (rc == -ERESTART)
1075 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
1076 "disconnect client\n");
1077 LDLM_LOCK_PUT(lock);
1082 int ldlm_run_cp_ast_work(struct list_head *rpc_list)
1084 struct list_head *tmp, *pos;
1085 int rc = 0, retval = 0;
1088 /* It's possible to receive a completion AST before we've set
1089 * the l_completion_ast pointer: either because the AST arrived
1090 * before the reply, or simply because there's a small race
1091 * window between receiving the reply and finishing the local
1092 * enqueue. (bug 842)
1094 * This can't happen with the blocking_ast, however, because we
1095 * will never call the local blocking_ast until we drop our
1096 * reader/writer reference, which we won't do until we get the
1097 * reply and finish enqueueing. */
1099 list_for_each_safe(tmp, pos, rpc_list) {
1100 struct ldlm_lock *lock =
1101 list_entry(tmp, struct ldlm_lock, l_cp_ast);
1103 /* nobody should touch l_cp_ast */
1104 lock_res_and_lock(lock);
1105 list_del_init(&lock->l_cp_ast);
1106 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1107 lock->l_flags &= ~LDLM_FL_CP_REQD;
1108 unlock_res_and_lock(lock);
1110 if (lock->l_completion_ast != NULL)
1111 rc = lock->l_completion_ast(lock, 0, 0);
1112 if (rc == -ERESTART)
1115 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
1116 "disconnect client\n");
1117 LDLM_LOCK_PUT(lock);
1122 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1124 ldlm_reprocess_all(res);
1125 return LDLM_ITER_CONTINUE;
1128 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1130 struct list_head *tmp;
1137 spin_lock(&ns->ns_hash_lock);
1138 for (i = 0; i < RES_HASH_SIZE; i++) {
1139 tmp = ns->ns_hash[i].next;
1140 while (tmp != &(ns->ns_hash[i])) {
1141 struct ldlm_resource *res =
1142 list_entry(tmp, struct ldlm_resource, lr_hash);
1144 ldlm_resource_getref(res);
1145 spin_unlock(&ns->ns_hash_lock);
1147 rc = reprocess_one_queue(res, NULL);
1149 spin_lock(&ns->ns_hash_lock);
1151 ldlm_resource_putref_locked(res);
1153 if (rc == LDLM_ITER_STOP)
1158 spin_unlock(&ns->ns_hash_lock);
1162 void ldlm_reprocess_all(struct ldlm_resource *res)
1164 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1168 /* Local lock trees don't get reprocessed. */
1169 if (res->lr_namespace->ns_client) {
1176 rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1177 if (rc == LDLM_ITER_CONTINUE)
1178 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1181 rc = ldlm_run_cp_ast_work(&rpc_list);
1182 if (rc == -ERESTART) {
1183 LASSERT(list_empty(&rpc_list));
1189 void ldlm_cancel_callback(struct ldlm_lock *lock)
1191 check_res_locked(lock->l_resource);
1192 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1193 lock->l_flags |= LDLM_FL_CANCEL;
1194 if (lock->l_blocking_ast) {
1195 // l_check_no_ns_lock(ns);
1196 unlock_res_and_lock(lock);
1197 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1199 lock_res_and_lock(lock);
1201 LDLM_DEBUG(lock, "no blocking ast");
1206 void ldlm_lock_cancel(struct ldlm_lock *lock)
1208 struct ldlm_resource *res;
1209 struct ldlm_namespace *ns;
1212 ldlm_del_waiting_lock(lock);
1213 lock_res_and_lock(lock);
1215 res = lock->l_resource;
1216 ns = res->lr_namespace;
1218 /* Please do not, no matter how tempting, remove this LBUG without
1219 * talking to me first. -phik */
1220 if (lock->l_readers || lock->l_writers) {
1221 LDLM_ERROR(lock, "lock still has references");
1225 ldlm_cancel_callback(lock);
1227 ldlm_resource_unlink_lock(lock);
1228 unlock_res_and_lock(lock);
1230 ldlm_lock_destroy(lock);
1235 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1237 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1243 lock->l_ast_data = data;
1244 LDLM_LOCK_PUT(lock);
1248 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1250 struct ldlm_lock *lock;
1251 struct ldlm_resource *res;
1253 spin_lock(&exp->exp_ldlm_data.led_lock);
1254 while(!list_empty(&exp->exp_ldlm_data.led_held_locks)) {
1255 lock = list_entry(exp->exp_ldlm_data.led_held_locks.next,
1256 struct ldlm_lock, l_export_chain);
1257 res = ldlm_resource_getref(lock->l_resource);
1258 LDLM_LOCK_GET(lock);
1259 spin_unlock(&exp->exp_ldlm_data.led_lock);
1261 LDLM_DEBUG(lock, "export %p", exp);
1262 ldlm_lock_cancel(lock);
1263 ldlm_reprocess_all(res);
1265 ldlm_resource_putref(res);
1266 LDLM_LOCK_PUT(lock);
1267 spin_lock(&exp->exp_ldlm_data.led_lock);
1269 spin_unlock(&exp->exp_ldlm_data.led_lock);
1272 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1275 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1276 struct ldlm_resource *res;
1277 struct ldlm_namespace *ns;
1283 if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1284 *flags |= LDLM_FL_BLOCK_GRANTED;
1285 RETURN(lock->l_resource);
1288 LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR,
1289 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1291 lock_res_and_lock(lock);
1293 res = lock->l_resource;
1294 ns = res->lr_namespace;
1296 old_mode = lock->l_req_mode;
1297 lock->l_req_mode = new_mode;
1298 ldlm_resource_unlink_lock(lock);
1300 /* If this is a local resource, put it on the appropriate list. */
1301 if (res->lr_namespace->ns_client) {
1302 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1303 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1305 /* This should never happen, because of the way the
1306 * server handles conversions. */
1307 LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1311 ldlm_grant_lock(lock, &rpc_list);
1313 /* FIXME: completion handling not with ns_lock held ! */
1314 if (lock->l_completion_ast)
1315 lock->l_completion_ast(lock, 0, NULL);
1319 ldlm_processing_policy policy;
1320 policy = ldlm_processing_policy_table[res->lr_type];
1321 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1322 if (rc == LDLM_ITER_STOP) {
1323 lock->l_req_mode = old_mode;
1324 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1327 *flags |= LDLM_FL_BLOCK_GRANTED;
1331 unlock_res_and_lock(lock);
1334 ldlm_run_cp_ast_work(&rpc_list);
1338 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1340 struct obd_device *obd = NULL;
1342 if (!((libcfs_debug | D_ERROR) & level))
1346 CDEBUG(level, " NULL LDLM lock\n");
1350 CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1351 lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1353 if (lock->l_conn_export != NULL)
1354 obd = lock->l_conn_export->exp_obd;
1355 if (lock->l_export && lock->l_export->exp_connection) {
1356 CDEBUG(level, " Node: NID %s (rhandle: "LPX64")\n",
1357 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1358 lock->l_remote_handle.cookie);
1359 } else if (obd == NULL) {
1360 CDEBUG(level, " Node: local\n");
1362 struct obd_import *imp = obd->u.cli.cl_import;
1363 CDEBUG(level, " Node: NID %s (rhandle: "LPX64")\n",
1364 libcfs_nid2str(imp->imp_connection->c_peer.nid),
1365 lock->l_remote_handle.cookie);
1367 CDEBUG(level, " Resource: %p ("LPU64"/"LPU64"/"LPU64")\n",
1369 lock->l_resource->lr_name.name[0],
1370 lock->l_resource->lr_name.name[1],
1371 lock->l_resource->lr_name.name[2]);
1372 CDEBUG(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1373 "write: %d flags: %#x\n", ldlm_lockname[lock->l_req_mode],
1374 ldlm_lockname[lock->l_granted_mode],
1375 atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1377 if (lock->l_resource->lr_type == LDLM_EXTENT)
1378 CDEBUG(level, " Extent: "LPU64" -> "LPU64
1379 " (req "LPU64"-"LPU64")\n",
1380 lock->l_policy_data.l_extent.start,
1381 lock->l_policy_data.l_extent.end,
1382 lock->l_req_extent.start, lock->l_req_extent.end);
1383 else if (lock->l_resource->lr_type == LDLM_FLOCK)
1384 CDEBUG(level, " Pid: %d Extent: "LPU64" -> "LPU64"\n",
1385 lock->l_policy_data.l_flock.pid,
1386 lock->l_policy_data.l_flock.start,
1387 lock->l_policy_data.l_flock.end);
1388 else if (lock->l_resource->lr_type == LDLM_IBITS)
1389 CDEBUG(level, " Bits: "LPX64"\n",
1390 lock->l_policy_data.l_inodebits.bits);
1393 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1395 struct ldlm_lock *lock;
1397 lock = ldlm_handle2lock(lockh);
1401 ldlm_lock_dump(D_OTHER, lock, 0);
1403 LDLM_LOCK_PUT(lock);
1406 void cdebug_va(cfs_debug_limit_state_t *cdls, __u32 mask,
1407 const char *file, const char *func, const int line,
1408 const char *fmt, va_list args);
1409 void cdebug(cfs_debug_limit_state_t *cdls, __u32 mask,
1410 const char *file, const char *func, const int line,
1411 const char *fmt, ...);
1414 ldlm_lock_debug(cfs_debug_limit_state_t *cdls,
1415 __u32 level, struct ldlm_lock *lock,
1416 const char *file, const char *func, const int line,
1421 va_start(args, fmt);
1422 cdebug_va(cdls, level, file, func, line, fmt, args);
1425 if (lock->l_resource == NULL) {
1426 cdebug(cdls, level, file, func, line,
1427 " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1428 "res: \?\? rrc=\?\? type: \?\?\? flags: %x remote: "
1429 LPX64" expref: %d pid: %u\n", lock,
1430 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1431 lock->l_readers, lock->l_writers,
1432 ldlm_lockname[lock->l_granted_mode],
1433 ldlm_lockname[lock->l_req_mode],
1434 lock->l_flags, lock->l_remote_handle.cookie,
1436 atomic_read(&lock->l_export->exp_refcount) : -99,
1441 switch (lock->l_resource->lr_type) {
1443 cdebug(cdls, level, file, func, line,
1444 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1445 "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
1446 "] (req "LPU64"->"LPU64") flags: %x remote: "LPX64
1447 " expref: %d pid: %u\n",
1448 lock->l_resource->lr_namespace->ns_name, lock,
1449 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1450 lock->l_readers, lock->l_writers,
1451 ldlm_lockname[lock->l_granted_mode],
1452 ldlm_lockname[lock->l_req_mode],
1453 lock->l_resource->lr_name.name[0],
1454 lock->l_resource->lr_name.name[1],
1455 atomic_read(&lock->l_resource->lr_refcount),
1456 ldlm_typename[lock->l_resource->lr_type],
1457 lock->l_policy_data.l_extent.start,
1458 lock->l_policy_data.l_extent.end,
1459 lock->l_req_extent.start, lock->l_req_extent.end,
1460 lock->l_flags, lock->l_remote_handle.cookie,
1462 atomic_read(&lock->l_export->exp_refcount) : -99,
1467 cdebug(cdls, level, file, func, line,
1468 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1469 "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
1470 "["LPU64"->"LPU64"] flags: %x remote: "LPX64
1471 " expref: %d pid: %u\n",
1472 lock->l_resource->lr_namespace->ns_name, lock,
1473 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1474 lock->l_readers, lock->l_writers,
1475 ldlm_lockname[lock->l_granted_mode],
1476 ldlm_lockname[lock->l_req_mode],
1477 lock->l_resource->lr_name.name[0],
1478 lock->l_resource->lr_name.name[1],
1479 atomic_read(&lock->l_resource->lr_refcount),
1480 ldlm_typename[lock->l_resource->lr_type],
1481 lock->l_policy_data.l_flock.pid,
1482 lock->l_policy_data.l_flock.start,
1483 lock->l_policy_data.l_flock.end,
1484 lock->l_flags, lock->l_remote_handle.cookie,
1486 atomic_read(&lock->l_export->exp_refcount) : -99,
1491 cdebug(cdls, level, file, func, line,
1492 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1493 "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
1494 "flags: %x remote: "LPX64" expref: %d "
1496 lock->l_resource->lr_namespace->ns_name,
1497 lock, lock->l_handle.h_cookie,
1498 atomic_read (&lock->l_refc),
1499 lock->l_readers, lock->l_writers,
1500 ldlm_lockname[lock->l_granted_mode],
1501 ldlm_lockname[lock->l_req_mode],
1502 lock->l_resource->lr_name.name[0],
1503 lock->l_resource->lr_name.name[1],
1504 lock->l_policy_data.l_inodebits.bits,
1505 atomic_read(&lock->l_resource->lr_refcount),
1506 ldlm_typename[lock->l_resource->lr_type],
1507 lock->l_flags, lock->l_remote_handle.cookie,
1509 atomic_read(&lock->l_export->exp_refcount) : -99,
1514 cdebug(cdls, level, file, func, line,
1515 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1516 "res: "LPU64"/"LPU64" rrc: %d type: %s flags: %x "
1517 "remote: "LPX64" expref: %d pid: %u\n",
1518 lock->l_resource->lr_namespace->ns_name,
1519 lock, lock->l_handle.h_cookie,
1520 atomic_read (&lock->l_refc),
1521 lock->l_readers, lock->l_writers,
1522 ldlm_lockname[lock->l_granted_mode],
1523 ldlm_lockname[lock->l_req_mode],
1524 lock->l_resource->lr_name.name[0],
1525 lock->l_resource->lr_name.name[1],
1526 atomic_read(&lock->l_resource->lr_refcount),
1527 ldlm_typename[lock->l_resource->lr_type],
1528 lock->l_flags, lock->l_remote_handle.cookie,
1530 atomic_read(&lock->l_export->exp_refcount) : -99,
1535 EXPORT_SYMBOL(ldlm_lock_debug);