4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_lock.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_LDLM
44 #include <libcfs/libcfs.h>
45 #include <obd_class.h>
46 #include "ldlm_internal.h"
49 char *ldlm_lockname[] = {
57 [LCK_GROUP] = "GROUP",
60 EXPORT_SYMBOL(ldlm_lockname);
62 char *ldlm_typename[] = {
64 [LDLM_EXTENT] = "EXT",
69 static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
70 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
71 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
72 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local,
73 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
76 static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
77 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire,
78 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
79 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire,
80 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire,
84 * Converts lock policy from local format to on the wire lock_desc format
86 void ldlm_convert_policy_to_wire(ldlm_type_t type,
87 const ldlm_policy_data_t *lpolicy,
88 ldlm_wire_policy_data_t *wpolicy)
90 ldlm_policy_local_to_wire_t convert;
92 convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
94 convert(lpolicy, wpolicy);
98 * Converts lock policy from on the wire lock_desc format to local format
100 void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
101 const ldlm_wire_policy_data_t *wpolicy,
102 ldlm_policy_data_t *lpolicy)
104 ldlm_policy_wire_to_local_t convert;
106 convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
108 convert(wpolicy, lpolicy);
111 char *ldlm_it2str(int it)
118 case (IT_OPEN | IT_CREAT):
133 CERROR("Unknown intent %d\n", it);
137 EXPORT_SYMBOL(ldlm_it2str);
139 extern struct kmem_cache *ldlm_lock_slab;
141 #ifdef HAVE_SERVER_SUPPORT
142 static ldlm_processing_policy ldlm_processing_policy_table[] = {
143 [LDLM_PLAIN] = ldlm_process_plain_lock,
144 [LDLM_EXTENT] = ldlm_process_extent_lock,
145 [LDLM_FLOCK] = ldlm_process_flock_lock,
146 [LDLM_IBITS] = ldlm_process_inodebits_lock,
149 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
151 return ldlm_processing_policy_table[res->lr_type];
153 EXPORT_SYMBOL(ldlm_get_processing_policy);
154 #endif /* HAVE_SERVER_SUPPORT */
156 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
160 EXPORT_SYMBOL(ldlm_register_intent);
163 * REFCOUNTED LOCK OBJECTS
168 * Get a reference on a lock.
170 * Lock refcounts, during creation:
171 * - one special one for allocation, dec'd only once in destroy
172 * - one for being a lock that's in-use
173 * - one for the addref associated with a new lock
175 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
177 atomic_inc(&lock->l_refc);
180 EXPORT_SYMBOL(ldlm_lock_get);
183 * Release lock reference.
185 * Also frees the lock if it was last reference.
187 void ldlm_lock_put(struct ldlm_lock *lock)
191 LASSERT(lock->l_resource != LP_POISON);
192 LASSERT(atomic_read(&lock->l_refc) > 0);
193 if (atomic_dec_and_test(&lock->l_refc)) {
194 struct ldlm_resource *res;
197 "final lock_put on destroyed lock, freeing it.");
199 res = lock->l_resource;
200 LASSERT(ldlm_is_destroyed(lock));
201 LASSERT(list_empty(&lock->l_res_link));
202 LASSERT(list_empty(&lock->l_pending_chain));
204 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
206 lu_ref_del(&res->lr_reference, "lock", lock);
207 ldlm_resource_putref(res);
208 lock->l_resource = NULL;
209 if (lock->l_export) {
210 class_export_lock_put(lock->l_export, lock);
211 lock->l_export = NULL;
214 if (lock->l_lvb_data != NULL)
215 OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
217 ldlm_interval_free(ldlm_interval_detach(lock));
218 lu_ref_fini(&lock->l_reference);
219 OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
224 EXPORT_SYMBOL(ldlm_lock_put);
227 * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
229 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
232 if (!list_empty(&lock->l_lru)) {
233 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
235 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
236 list_del_init(&lock->l_lru);
237 LASSERT(ns->ns_nr_unused > 0);
245 * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
247 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
249 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
253 if (ldlm_is_ns_srv(lock)) {
254 LASSERT(list_empty(&lock->l_lru));
258 spin_lock(&ns->ns_lock);
259 rc = ldlm_lock_remove_from_lru_nolock(lock);
260 spin_unlock(&ns->ns_lock);
266 * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked.
268 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
270 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
272 lock->l_last_used = cfs_time_current();
273 LASSERT(list_empty(&lock->l_lru));
274 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
275 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
276 ldlm_clear_skipped(lock);
277 LASSERT(ns->ns_nr_unused >= 0);
282 * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks
285 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
287 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
290 spin_lock(&ns->ns_lock);
291 ldlm_lock_add_to_lru_nolock(lock);
292 spin_unlock(&ns->ns_lock);
297 * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
298 * the LRU. Performs necessary LRU locking
300 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
302 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
305 if (ldlm_is_ns_srv(lock)) {
306 LASSERT(list_empty(&lock->l_lru));
311 spin_lock(&ns->ns_lock);
312 if (!list_empty(&lock->l_lru)) {
313 ldlm_lock_remove_from_lru_nolock(lock);
314 ldlm_lock_add_to_lru_nolock(lock);
316 spin_unlock(&ns->ns_lock);
321 * Helper to destroy a locked lock.
323 * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock
324 * Must be called with l_lock and lr_lock held.
326 * Does not actually free the lock data, but rather marks the lock as
327 * destroyed by setting l_destroyed field in the lock to 1. Destroys a
328 * handle->lock association too, so that the lock can no longer be found
329 * and removes the lock from LRU list. Actual lock freeing occurs when
330 * last lock reference goes away.
332 * Original comment (of some historical value):
333 * This used to have a 'strict' flag, which recovery would use to mark an
334 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
335 * shall explain why it's gone: with the new hash table scheme, once you call
336 * ldlm_lock_destroy, you can never drop your final references on this lock.
337 * Because it's not in the hash table anymore. -phil
339 static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
343 if (lock->l_readers || lock->l_writers) {
344 LDLM_ERROR(lock, "lock still has references");
348 if (!list_empty(&lock->l_res_link)) {
349 LDLM_ERROR(lock, "lock still on resource");
353 if (ldlm_is_destroyed(lock)) {
354 LASSERT(list_empty(&lock->l_lru));
358 ldlm_set_destroyed(lock);
360 if (lock->l_export && lock->l_export->exp_lock_hash) {
361 /* NB: it's safe to call cfs_hash_del() even lock isn't
362 * in exp_lock_hash. */
363 /* In the function below, .hs_keycmp resolves to
364 * ldlm_export_lock_keycmp() */
365 /* coverity[overrun-buffer-val] */
366 cfs_hash_del(lock->l_export->exp_lock_hash,
367 &lock->l_remote_handle, &lock->l_exp_hash);
370 ldlm_lock_remove_from_lru(lock);
371 class_handle_unhash(&lock->l_handle);
374 /* Wake anyone waiting for this lock */
375 /* FIXME: I should probably add yet another flag, instead of using
376 * l_export to only call this on clients */
378 class_export_put(lock->l_export);
379 lock->l_export = NULL;
380 if (lock->l_export && lock->l_completion_ast)
381 lock->l_completion_ast(lock, 0);
388 * Destroys a LDLM lock \a lock. Performs necessary locking first.
390 void ldlm_lock_destroy(struct ldlm_lock *lock)
394 lock_res_and_lock(lock);
395 first = ldlm_lock_destroy_internal(lock);
396 unlock_res_and_lock(lock);
398 /* drop reference from hashtable only for first destroy */
400 lu_ref_del(&lock->l_reference, "hash", lock);
401 LDLM_LOCK_RELEASE(lock);
407 * Destroys a LDLM lock \a lock that is already locked.
409 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
413 first = ldlm_lock_destroy_internal(lock);
414 /* drop reference from hashtable only for first destroy */
416 lu_ref_del(&lock->l_reference, "hash", lock);
417 LDLM_LOCK_RELEASE(lock);
422 /* this is called by portals_handle2object with the handle lock taken */
423 static void lock_handle_addref(void *lock)
425 LDLM_LOCK_GET((struct ldlm_lock *)lock);
428 static void lock_handle_free(void *lock, int size)
430 LASSERT(size == sizeof(struct ldlm_lock));
431 OBD_SLAB_FREE(lock, ldlm_lock_slab, size);
434 static struct portals_handle_ops lock_handle_ops = {
435 .hop_addref = lock_handle_addref,
436 .hop_free = lock_handle_free,
441 * Allocate and initialize new lock structure.
443 * usage: pass in a resource on which you have done ldlm_resource_get
444 * new lock will take over the refcount.
445 * returns: lock with refcount 2 - one for current caller and one for remote
447 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
449 struct ldlm_lock *lock;
452 if (resource == NULL)
455 OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS);
459 spin_lock_init(&lock->l_lock);
460 lock->l_resource = resource;
461 lu_ref_add(&resource->lr_reference, "lock", lock);
463 atomic_set(&lock->l_refc, 2);
464 INIT_LIST_HEAD(&lock->l_res_link);
465 INIT_LIST_HEAD(&lock->l_lru);
466 INIT_LIST_HEAD(&lock->l_pending_chain);
467 INIT_LIST_HEAD(&lock->l_bl_ast);
468 INIT_LIST_HEAD(&lock->l_cp_ast);
469 INIT_LIST_HEAD(&lock->l_rk_ast);
470 init_waitqueue_head(&lock->l_waitq);
471 lock->l_blocking_lock = NULL;
472 INIT_LIST_HEAD(&lock->l_sl_mode);
473 INIT_LIST_HEAD(&lock->l_sl_policy);
474 INIT_HLIST_NODE(&lock->l_exp_hash);
475 INIT_HLIST_NODE(&lock->l_exp_flock_hash);
477 lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
479 INIT_LIST_HEAD(&lock->l_handle.h_link);
480 class_handle_hash(&lock->l_handle, &lock_handle_ops);
482 lu_ref_init(&lock->l_reference);
483 lu_ref_add(&lock->l_reference, "hash", lock);
484 lock->l_callback_timeout = 0;
486 #if LUSTRE_TRACKS_LOCK_EXP_REFS
487 INIT_LIST_HEAD(&lock->l_exp_refs_link);
488 lock->l_exp_refs_nr = 0;
489 lock->l_exp_refs_target = NULL;
491 INIT_LIST_HEAD(&lock->l_exp_list);
497 * Moves LDLM lock \a lock to another resource.
498 * This is used on client when server returns some other lock than requested
499 * (typically as a result of intent operation)
501 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
502 const struct ldlm_res_id *new_resid)
504 struct ldlm_resource *oldres = lock->l_resource;
505 struct ldlm_resource *newres;
509 LASSERT(ns_is_client(ns));
511 lock_res_and_lock(lock);
512 if (memcmp(new_resid, &lock->l_resource->lr_name,
513 sizeof(lock->l_resource->lr_name)) == 0) {
515 unlock_res_and_lock(lock);
519 LASSERT(new_resid->name[0] != 0);
521 /* This function assumes that the lock isn't on any lists */
522 LASSERT(list_empty(&lock->l_res_link));
524 type = oldres->lr_type;
525 unlock_res_and_lock(lock);
527 newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
529 RETURN(PTR_ERR(newres));
531 lu_ref_add(&newres->lr_reference, "lock", lock);
533 * To flip the lock from the old to the new resource, lock, oldres and
534 * newres have to be locked. Resource spin-locks are nested within
535 * lock->l_lock, and are taken in the memory address order to avoid
538 spin_lock(&lock->l_lock);
539 oldres = lock->l_resource;
540 if (oldres < newres) {
542 lock_res_nested(newres, LRT_NEW);
545 lock_res_nested(oldres, LRT_NEW);
547 LASSERT(memcmp(new_resid, &oldres->lr_name,
548 sizeof oldres->lr_name) != 0);
549 lock->l_resource = newres;
551 unlock_res_and_lock(lock);
553 /* ...and the flowers are still standing! */
554 lu_ref_del(&oldres->lr_reference, "lock", lock);
555 ldlm_resource_putref(oldres);
560 /** \defgroup ldlm_handles LDLM HANDLES
561 * Ways to get hold of locks without any addresses.
566 * Fills in handle for LDLM lock \a lock into supplied \a lockh
567 * Does not take any references.
569 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
571 lockh->cookie = lock->l_handle.h_cookie;
573 EXPORT_SYMBOL(ldlm_lock2handle);
576 * Obtain a lock reference by handle.
578 * if \a flags: atomically get the lock and set the flags.
579 * Return NULL if flag already set
581 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
584 struct ldlm_lock *lock;
589 lock = class_handle2object(handle->cookie, NULL);
593 /* It's unlikely but possible that someone marked the lock as
594 * destroyed after we did handle2object on it */
595 if ((flags == 0) && !ldlm_is_destroyed(lock)) {
596 lu_ref_add(&lock->l_reference, "handle", current);
600 lock_res_and_lock(lock);
602 LASSERT(lock->l_resource != NULL);
604 lu_ref_add_atomic(&lock->l_reference, "handle", current);
605 if (unlikely(ldlm_is_destroyed(lock))) {
606 unlock_res_and_lock(lock);
607 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
612 /* If we're setting flags, make sure none of them are already set. */
614 if ((lock->l_flags & flags) != 0) {
615 unlock_res_and_lock(lock);
620 lock->l_flags |= flags;
623 unlock_res_and_lock(lock);
626 EXPORT_SYMBOL(__ldlm_handle2lock);
627 /** @} ldlm_handles */
630 * Fill in "on the wire" representation for given LDLM lock into supplied
631 * lock descriptor \a desc structure.
633 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
635 ldlm_res2desc(lock->l_resource, &desc->l_resource);
636 desc->l_req_mode = lock->l_req_mode;
637 desc->l_granted_mode = lock->l_granted_mode;
638 ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
639 &lock->l_policy_data,
640 &desc->l_policy_data);
644 * Add a lock to list of conflicting locks to send AST to.
646 * Only add if we have not sent a blocking AST to the lock yet.
648 static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
649 struct list_head *work_list)
651 if (!ldlm_is_ast_sent(lock)) {
652 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
653 ldlm_set_ast_sent(lock);
654 /* If the enqueuing client said so, tell the AST recipient to
655 * discard dirty data, rather than writing back. */
656 if (ldlm_is_ast_discard_data(new))
657 ldlm_set_discard_data(lock);
658 LASSERT(list_empty(&lock->l_bl_ast));
659 list_add(&lock->l_bl_ast, work_list);
661 LASSERT(lock->l_blocking_lock == NULL);
662 lock->l_blocking_lock = LDLM_LOCK_GET(new);
667 * Add a lock to list of just granted locks to send completion AST to.
669 static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
670 struct list_head *work_list)
672 if (!ldlm_is_cp_reqd(lock)) {
673 ldlm_set_cp_reqd(lock);
674 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
675 LASSERT(list_empty(&lock->l_cp_ast));
676 list_add(&lock->l_cp_ast, work_list);
682 * Aggregator function to add AST work items into a list. Determines
683 * what sort of an AST work needs to be done and calls the proper
685 * Must be called with lr_lock held.
687 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
688 struct list_head *work_list)
691 check_res_locked(lock->l_resource);
693 ldlm_add_bl_work_item(lock, new, work_list);
695 ldlm_add_cp_work_item(lock, work_list);
700 * Add specified reader/writer reference to LDLM lock with handle \a lockh.
701 * r/w reference type is determined by \a mode
702 * Calls ldlm_lock_addref_internal.
704 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
706 struct ldlm_lock *lock;
708 lock = ldlm_handle2lock(lockh);
709 LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
710 ldlm_lock_addref_internal(lock, mode);
713 EXPORT_SYMBOL(ldlm_lock_addref);
717 * Add specified reader/writer reference to LDLM lock \a lock.
718 * r/w reference type is determined by \a mode
719 * Removes lock from LRU if it is there.
720 * Assumes the LDLM lock is already locked.
722 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
724 ldlm_lock_remove_from_lru(lock);
725 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
727 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
729 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
731 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
734 lu_ref_add_atomic(&lock->l_reference, "user", lock);
735 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
739 * Attempts to add reader/writer reference to a lock with handle \a lockh, and
740 * fails if lock is already LDLM_FL_CBPENDING or destroyed.
742 * \retval 0 success, lock was addref-ed
744 * \retval -EAGAIN lock is being canceled.
746 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
748 struct ldlm_lock *lock;
752 lock = ldlm_handle2lock(lockh);
754 lock_res_and_lock(lock);
755 if (lock->l_readers != 0 || lock->l_writers != 0 ||
756 !ldlm_is_cbpending(lock)) {
757 ldlm_lock_addref_internal_nolock(lock, mode);
760 unlock_res_and_lock(lock);
765 EXPORT_SYMBOL(ldlm_lock_addref_try);
768 * Add specified reader/writer reference to LDLM lock \a lock.
769 * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
770 * Only called for local locks.
772 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
774 lock_res_and_lock(lock);
775 ldlm_lock_addref_internal_nolock(lock, mode);
776 unlock_res_and_lock(lock);
780 * Removes reader/writer reference for LDLM lock \a lock.
781 * Assumes LDLM lock is already locked.
782 * only called in ldlm_flock_destroy and for local locks.
783 * Does NOT add lock to LRU if no r/w references left to accomodate flock locks
784 * that cannot be placed in LRU.
786 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
788 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
789 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
790 LASSERT(lock->l_readers > 0);
791 lu_ref_del(&lock->l_reference, "reader", lock);
794 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
795 LASSERT(lock->l_writers > 0);
796 lu_ref_del(&lock->l_reference, "writer", lock);
800 lu_ref_del(&lock->l_reference, "user", lock);
801 LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */
805 * Removes reader/writer reference for LDLM lock \a lock.
806 * Locks LDLM lock first.
807 * If the lock is determined to be client lock on a client and r/w refcount
808 * drops to zero and the lock is not blocked, the lock is added to LRU lock
810 * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
812 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
814 struct ldlm_namespace *ns;
817 lock_res_and_lock(lock);
819 ns = ldlm_lock_to_ns(lock);
821 ldlm_lock_decref_internal_nolock(lock, mode);
823 if (ldlm_is_local(lock) &&
824 !lock->l_readers && !lock->l_writers) {
825 /* If this is a local lock on a server namespace and this was
826 * the last reference, cancel the lock. */
827 CDEBUG(D_INFO, "forcing cancel of local lock\n");
828 ldlm_set_cbpending(lock);
831 if (!lock->l_readers && !lock->l_writers &&
832 (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
833 /* If we received a blocked AST and this was the last reference,
835 * Group locks are special:
836 * They must not go in LRU, but they are not called back
837 * like non-group locks, instead they are manually released.
838 * They have an l_writers reference which they keep until
839 * they are manually released, so we remove them when they have
840 * no more reader or writer references. - LU-6368 */
841 if (ldlm_is_ns_srv(lock) && lock->l_export)
842 CERROR("FL_CBPENDING set on non-local lock--just a "
845 LDLM_DEBUG(lock, "final decref done on cbpending lock");
847 LDLM_LOCK_GET(lock); /* dropped by bl thread */
848 ldlm_lock_remove_from_lru(lock);
849 unlock_res_and_lock(lock);
851 if (ldlm_is_fail_loc(lock))
852 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
854 if (ldlm_is_atomic_cb(lock) ||
855 ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
856 ldlm_handle_bl_callback(ns, NULL, lock);
857 } else if (ns_is_client(ns) &&
858 !lock->l_readers && !lock->l_writers &&
859 !ldlm_is_no_lru(lock) &&
860 !ldlm_is_bl_ast(lock)) {
862 LDLM_DEBUG(lock, "add lock into lru list");
864 /* If this is a client-side namespace and this was the last
865 * reference, put it on the LRU. */
866 ldlm_lock_add_to_lru(lock);
867 unlock_res_and_lock(lock);
869 if (ldlm_is_fail_loc(lock))
870 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
872 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
873 * are not supported by the server, otherwise, it is done on
875 if (!exp_connect_cancelset(lock->l_conn_export) &&
876 !ns_connect_lru_resize(ns))
877 ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
879 LDLM_DEBUG(lock, "do not add lock into lru list");
880 unlock_res_and_lock(lock);
887 * Decrease reader/writer refcount for LDLM lock with handle \a lockh
889 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
891 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
892 LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
893 ldlm_lock_decref_internal(lock, mode);
896 EXPORT_SYMBOL(ldlm_lock_decref);
899 * Decrease reader/writer refcount for LDLM lock with handle
900 * \a lockh and mark it for subsequent cancellation once r/w refcount
901 * drops to zero instead of putting into LRU.
904 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
906 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
909 LASSERT(lock != NULL);
911 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
912 lock_res_and_lock(lock);
913 ldlm_set_cbpending(lock);
914 unlock_res_and_lock(lock);
915 ldlm_lock_decref_internal(lock, mode);
918 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
920 struct sl_insert_point {
921 struct list_head *res_link;
922 struct list_head *mode_link;
923 struct list_head *policy_link;
927 * Finds a position to insert the new lock into granted lock list.
929 * Used for locks eligible for skiplist optimization.
932 * queue [input]: the granted list where search acts on;
933 * req [input]: the lock whose position to be located;
934 * prev [output]: positions within 3 lists to insert @req to
938 * - ldlm_grant_lock_with_skiplist
940 static void search_granted_lock(struct list_head *queue,
941 struct ldlm_lock *req,
942 struct sl_insert_point *prev)
944 struct list_head *tmp;
945 struct ldlm_lock *lock, *mode_end, *policy_end;
948 list_for_each(tmp, queue) {
949 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
951 mode_end = list_entry(lock->l_sl_mode.prev,
952 struct ldlm_lock, l_sl_mode);
954 if (lock->l_req_mode != req->l_req_mode) {
955 /* jump to last lock of mode group */
956 tmp = &mode_end->l_res_link;
960 /* suitable mode group is found */
961 if (lock->l_resource->lr_type == LDLM_PLAIN) {
962 /* insert point is last lock of the mode group */
963 prev->res_link = &mode_end->l_res_link;
964 prev->mode_link = &mode_end->l_sl_mode;
965 prev->policy_link = &req->l_sl_policy;
968 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
971 list_entry(lock->l_sl_policy.prev,
975 if (lock->l_policy_data.l_inodebits.bits ==
976 req->l_policy_data.l_inodebits.bits) {
977 /* insert point is last lock of
978 * the policy group */
980 &policy_end->l_res_link;
982 &policy_end->l_sl_mode;
984 &policy_end->l_sl_policy;
989 if (policy_end == mode_end)
990 /* done with mode group */
993 /* go to next policy group within mode group */
994 tmp = policy_end->l_res_link.next;
995 lock = list_entry(tmp, struct ldlm_lock,
997 } /* loop over policy groups within the mode group */
999 /* insert point is last lock of the mode group,
1000 * new policy group is started */
1001 prev->res_link = &mode_end->l_res_link;
1002 prev->mode_link = &mode_end->l_sl_mode;
1003 prev->policy_link = &req->l_sl_policy;
1007 LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
1012 /* insert point is last lock on the queue,
1013 * new mode group and new policy group are started */
1014 prev->res_link = queue->prev;
1015 prev->mode_link = &req->l_sl_mode;
1016 prev->policy_link = &req->l_sl_policy;
1022 * Add a lock into resource granted list after a position described by
1025 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
1026 struct sl_insert_point *prev)
1028 struct ldlm_resource *res = lock->l_resource;
1031 check_res_locked(res);
1033 ldlm_resource_dump(D_INFO, res);
1034 LDLM_DEBUG(lock, "About to add lock:");
1036 if (ldlm_is_destroyed(lock)) {
1037 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1041 LASSERT(list_empty(&lock->l_res_link));
1042 LASSERT(list_empty(&lock->l_sl_mode));
1043 LASSERT(list_empty(&lock->l_sl_policy));
1046 * lock->link == prev->link means lock is first starting the group.
1047 * Don't re-add to itself to suppress kernel warnings.
1049 if (&lock->l_res_link != prev->res_link)
1050 list_add(&lock->l_res_link, prev->res_link);
1051 if (&lock->l_sl_mode != prev->mode_link)
1052 list_add(&lock->l_sl_mode, prev->mode_link);
1053 if (&lock->l_sl_policy != prev->policy_link)
1054 list_add(&lock->l_sl_policy, prev->policy_link);
1060 * Add a lock to granted list on a resource maintaining skiplist
1063 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
1065 struct sl_insert_point prev;
1068 LASSERT(lock->l_req_mode == lock->l_granted_mode);
1070 search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
1071 ldlm_granted_list_add_lock(lock, &prev);
1076 * Perform lock granting bookkeeping.
1078 * Includes putting the lock into granted list and updating lock mode.
1080 * - ldlm_lock_enqueue
1081 * - ldlm_reprocess_queue
1082 * - ldlm_lock_convert
1084 * must be called with lr_lock held
1086 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
1088 struct ldlm_resource *res = lock->l_resource;
1091 check_res_locked(res);
1093 lock->l_granted_mode = lock->l_req_mode;
1095 if (work_list && lock->l_completion_ast != NULL)
1096 ldlm_add_ast_work_item(lock, NULL, work_list);
1098 /* We should not add locks to granted list in the following cases:
1099 * - this is an UNLOCK but not a real lock;
1100 * - this is a TEST lock;
1101 * - this is a F_CANCELLK lock (async flock has req_mode == 0)
1102 * - this is a deadlock (flock cannot be granted) */
1103 if (lock->l_req_mode == 0 ||
1104 lock->l_req_mode == LCK_NL ||
1105 ldlm_is_test_lock(lock) ||
1106 ldlm_is_flock_deadlock(lock))
1109 if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
1110 ldlm_grant_lock_with_skiplist(lock);
1111 else if (res->lr_type == LDLM_EXTENT)
1112 ldlm_extent_add_lock(res, lock);
1114 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1116 if (lock->l_granted_mode < res->lr_most_restr)
1117 res->lr_most_restr = lock->l_granted_mode;
1119 ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
1124 * Describe the overlap between two locks. itree_overlap_cb data.
1126 struct lock_match_data {
1127 struct ldlm_lock *lmd_old;
1128 struct ldlm_lock *lmd_lock;
1129 ldlm_mode_t *lmd_mode;
1130 ldlm_policy_data_t *lmd_policy;
1136 * Check if the given @lock meets the criteria for a match.
1137 * A reference on the lock is taken if matched.
1139 * \param lock test-against this lock
1140 * \param data parameters
1142 static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
1144 ldlm_policy_data_t *lpol = &lock->l_policy_data;
1147 if (lock == data->lmd_old)
1148 return INTERVAL_ITER_STOP;
1150 /* Check if this lock can be matched.
1151 * Used by LU-2919(exclusive open) for open lease lock */
1152 if (ldlm_is_excl(lock))
1153 return INTERVAL_ITER_CONT;
1155 /* llite sometimes wants to match locks that will be
1156 * canceled when their users drop, but we allow it to match
1157 * if it passes in CBPENDING and the lock still has users.
1158 * this is generally only going to be used by children
1159 * whose parents already hold a lock so forward progress
1160 * can still happen. */
1161 if (ldlm_is_cbpending(lock) &&
1162 !(data->lmd_flags & LDLM_FL_CBPENDING))
1163 return INTERVAL_ITER_CONT;
1164 if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
1165 lock->l_readers == 0 && lock->l_writers == 0)
1166 return INTERVAL_ITER_CONT;
1168 if (!(lock->l_req_mode & *data->lmd_mode))
1169 return INTERVAL_ITER_CONT;
1170 match = lock->l_req_mode;
1172 switch (lock->l_resource->lr_type) {
1174 if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
1175 lpol->l_extent.end < data->lmd_policy->l_extent.end)
1176 return INTERVAL_ITER_CONT;
1178 if (unlikely(match == LCK_GROUP) &&
1179 data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
1180 lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
1181 return INTERVAL_ITER_CONT;
1184 /* We match if we have existing lock with same or wider set
1186 if ((lpol->l_inodebits.bits &
1187 data->lmd_policy->l_inodebits.bits) !=
1188 data->lmd_policy->l_inodebits.bits)
1189 return INTERVAL_ITER_CONT;
1195 /* We match if we have existing lock with same or wider set
1197 if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
1198 return INTERVAL_ITER_CONT;
1200 if ((data->lmd_flags & LDLM_FL_LOCAL_ONLY) &&
1201 !ldlm_is_local(lock))
1202 return INTERVAL_ITER_CONT;
1204 if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
1205 LDLM_LOCK_GET(lock);
1206 ldlm_lock_touch_in_lru(lock);
1208 ldlm_lock_addref_internal_nolock(lock, match);
1211 *data->lmd_mode = match;
1212 data->lmd_lock = lock;
1214 return INTERVAL_ITER_STOP;
1217 static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
1219 struct ldlm_interval *node = to_ldlm_interval(in);
1220 struct lock_match_data *data = args;
1221 struct ldlm_lock *lock;
1224 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
1225 rc = lock_matches(lock, data);
1226 if (rc == INTERVAL_ITER_STOP)
1227 return INTERVAL_ITER_STOP;
1229 return INTERVAL_ITER_CONT;
1233 * Search for a lock with given parameters in interval trees.
1235 * \param res search for a lock in this resource
1236 * \param data parameters
1238 * \retval a referenced lock or NULL.
1240 static struct ldlm_lock *search_itree(struct ldlm_resource *res,
1241 struct lock_match_data *data)
1243 struct interval_node_extent ext = {
1244 .start = data->lmd_policy->l_extent.start,
1245 .end = data->lmd_policy->l_extent.end
1249 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1250 struct ldlm_interval_tree *tree = &res->lr_itree[idx];
1252 if (tree->lit_root == NULL)
1255 if (!(tree->lit_mode & *data->lmd_mode))
1258 interval_search(tree->lit_root, &ext,
1259 itree_overlap_cb, data);
1261 return data->lmd_lock;
1266 * Search for a lock with given properties in a queue.
1268 * \param queue search for a lock in this queue
1269 * \param data parameters
1271 * \retval a referenced lock or NULL.
1273 static struct ldlm_lock *search_queue(struct list_head *queue,
1274 struct lock_match_data *data)
1276 struct ldlm_lock *lock;
1279 list_for_each_entry(lock, queue, l_res_link) {
1280 rc = lock_matches(lock, data);
1281 if (rc == INTERVAL_ITER_STOP)
1282 return data->lmd_lock;
1287 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
1289 if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
1290 lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
1291 wake_up_all(&lock->l_waitq);
1294 EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
1296 void ldlm_lock_fail_match(struct ldlm_lock *lock)
1298 lock_res_and_lock(lock);
1299 ldlm_lock_fail_match_locked(lock);
1300 unlock_res_and_lock(lock);
1304 * Mark lock as "matchable" by OST.
1306 * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
1308 * Assumes LDLM lock is already locked.
1310 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1312 ldlm_set_lvb_ready(lock);
1313 wake_up_all(&lock->l_waitq);
1315 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
1318 * Mark lock as "matchable" by OST.
1319 * Locks the lock and then \see ldlm_lock_allow_match_locked
1321 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1323 lock_res_and_lock(lock);
1324 ldlm_lock_allow_match_locked(lock);
1325 unlock_res_and_lock(lock);
1327 EXPORT_SYMBOL(ldlm_lock_allow_match);
1330 * Attempt to find a lock with specified properties.
1332 * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
1335 * Can be called in two ways:
1337 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1338 * for a duplicate of.
1340 * Otherwise, all of the fields must be filled in, to match against.
1342 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1343 * server (ie, connh is NULL)
1344 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1345 * list will be considered
1346 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1347 * to be canceled can still be matched as long as they still have reader
1348 * or writer refernces
1349 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1350 * just tell us if we would have matched.
1352 * \retval 1 if it finds an already-existing lock that is compatible; in this
1353 * case, lockh is filled in with a addref()ed lock
1355 * We also check security context, and if that fails we simply return 0 (to
1356 * keep caller code unchanged), the context failure will be discovered by
1357 * caller sometime later.
1359 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1360 const struct ldlm_res_id *res_id, ldlm_type_t type,
1361 ldlm_policy_data_t *policy, ldlm_mode_t mode,
1362 struct lustre_handle *lockh, int unref)
1364 struct lock_match_data data = {
1368 .lmd_policy = policy,
1372 struct ldlm_resource *res;
1373 struct ldlm_lock *lock;
1378 data.lmd_old = ldlm_handle2lock(lockh);
1379 LASSERT(data.lmd_old != NULL);
1381 ns = ldlm_lock_to_ns(data.lmd_old);
1382 res_id = &data.lmd_old->l_resource->lr_name;
1383 type = data.lmd_old->l_resource->lr_type;
1384 *data.lmd_mode = data.lmd_old->l_req_mode;
1387 res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1389 LASSERT(data.lmd_old == NULL);
1393 LDLM_RESOURCE_ADDREF(res);
1396 if (res->lr_type == LDLM_EXTENT)
1397 lock = search_itree(res, &data);
1399 lock = search_queue(&res->lr_granted, &data);
1402 if (flags & LDLM_FL_BLOCK_GRANTED)
1404 lock = search_queue(&res->lr_converting, &data);
1407 lock = search_queue(&res->lr_waiting, &data);
1414 LDLM_RESOURCE_DELREF(res);
1415 ldlm_resource_putref(res);
1418 ldlm_lock2handle(lock, lockh);
1419 if ((flags & LDLM_FL_LVB_READY) &&
1420 (!ldlm_is_lvb_ready(lock))) {
1421 __u64 wait_flags = LDLM_FL_LVB_READY |
1422 LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
1423 struct l_wait_info lwi;
1424 if (lock->l_completion_ast) {
1425 int err = lock->l_completion_ast(lock,
1426 LDLM_FL_WAIT_NOREPROC,
1429 if (flags & LDLM_FL_TEST_LOCK)
1430 LDLM_LOCK_RELEASE(lock);
1432 ldlm_lock_decref_internal(lock,
1439 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
1440 NULL, LWI_ON_SIGNAL_NOOP, NULL);
1442 /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1443 l_wait_event(lock->l_waitq,
1444 lock->l_flags & wait_flags,
1446 if (!ldlm_is_lvb_ready(lock)) {
1447 if (flags & LDLM_FL_TEST_LOCK)
1448 LDLM_LOCK_RELEASE(lock);
1450 ldlm_lock_decref_internal(lock, mode);
1457 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
1458 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1459 res_id->name[2] : policy->l_extent.start,
1460 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1461 res_id->name[3] : policy->l_extent.end);
1463 /* check user's security context */
1464 if (lock->l_conn_export &&
1465 sptlrpc_import_check_ctx(
1466 class_exp2cliimp(lock->l_conn_export))) {
1467 if (!(flags & LDLM_FL_TEST_LOCK))
1468 ldlm_lock_decref_internal(lock, mode);
1472 if (flags & LDLM_FL_TEST_LOCK)
1473 LDLM_LOCK_RELEASE(lock);
1475 } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1476 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1477 LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
1478 type, mode, res_id->name[0], res_id->name[1],
1479 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1480 res_id->name[2] :policy->l_extent.start,
1481 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1482 res_id->name[3] : policy->l_extent.end);
1484 if (data.lmd_old != NULL)
1485 LDLM_LOCK_PUT(data.lmd_old);
1487 return rc ? mode : 0;
1489 EXPORT_SYMBOL(ldlm_lock_match);
1491 ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
1494 struct ldlm_lock *lock;
1495 ldlm_mode_t mode = 0;
1498 lock = ldlm_handle2lock(lockh);
1500 lock_res_and_lock(lock);
1501 if (LDLM_HAVE_MASK(lock, GONE))
1504 if (ldlm_is_cbpending(lock) &&
1505 lock->l_readers == 0 && lock->l_writers == 0)
1509 *bits = lock->l_policy_data.l_inodebits.bits;
1510 mode = lock->l_granted_mode;
1511 ldlm_lock_addref_internal_nolock(lock, mode);
1518 unlock_res_and_lock(lock);
1519 LDLM_LOCK_PUT(lock);
1523 EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
1525 /** The caller must guarantee that the buffer is large enough. */
1526 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
1527 enum req_location loc, void *data, int size)
1532 LASSERT(data != NULL);
1535 switch (lock->l_lvb_type) {
1537 if (size == sizeof(struct ost_lvb)) {
1538 if (loc == RCL_CLIENT)
1539 lvb = req_capsule_client_swab_get(pill,
1541 lustre_swab_ost_lvb);
1543 lvb = req_capsule_server_swab_get(pill,
1545 lustre_swab_ost_lvb);
1546 if (unlikely(lvb == NULL)) {
1547 LDLM_ERROR(lock, "no LVB");
1551 memcpy(data, lvb, size);
1552 } else if (size == sizeof(struct ost_lvb_v1)) {
1553 struct ost_lvb *olvb = data;
1555 if (loc == RCL_CLIENT)
1556 lvb = req_capsule_client_swab_get(pill,
1558 lustre_swab_ost_lvb_v1);
1560 lvb = req_capsule_server_sized_swab_get(pill,
1562 lustre_swab_ost_lvb_v1);
1563 if (unlikely(lvb == NULL)) {
1564 LDLM_ERROR(lock, "no LVB");
1568 memcpy(data, lvb, size);
1569 olvb->lvb_mtime_ns = 0;
1570 olvb->lvb_atime_ns = 0;
1571 olvb->lvb_ctime_ns = 0;
1573 LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
1579 if (size == sizeof(struct lquota_lvb)) {
1580 if (loc == RCL_CLIENT)
1581 lvb = req_capsule_client_swab_get(pill,
1583 lustre_swab_lquota_lvb);
1585 lvb = req_capsule_server_swab_get(pill,
1587 lustre_swab_lquota_lvb);
1588 if (unlikely(lvb == NULL)) {
1589 LDLM_ERROR(lock, "no LVB");
1593 memcpy(data, lvb, size);
1595 LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
1604 if (loc == RCL_CLIENT)
1605 lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
1607 lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
1608 if (unlikely(lvb == NULL)) {
1609 LDLM_ERROR(lock, "no LVB");
1613 memcpy(data, lvb, size);
1616 LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
1617 libcfs_debug_dumpstack(NULL);
1625 * Create and fill in new LDLM lock with specified properties.
1626 * Returns a referenced lock
1628 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1629 const struct ldlm_res_id *res_id,
1632 const struct ldlm_callback_suite *cbs,
1633 void *data, __u32 lvb_len,
1634 enum lvb_type lvb_type)
1636 struct ldlm_lock *lock;
1637 struct ldlm_resource *res;
1641 res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1643 RETURN(ERR_CAST(res));
1645 lock = ldlm_lock_new(res);
1647 RETURN(ERR_PTR(-ENOMEM));
1649 lock->l_req_mode = mode;
1650 lock->l_ast_data = data;
1651 lock->l_pid = current_pid();
1652 if (ns_is_server(ns))
1653 ldlm_set_ns_srv(lock);
1655 lock->l_blocking_ast = cbs->lcs_blocking;
1656 lock->l_completion_ast = cbs->lcs_completion;
1657 lock->l_glimpse_ast = cbs->lcs_glimpse;
1660 lock->l_tree_node = NULL;
1661 /* if this is the extent lock, allocate the interval tree node */
1662 if (type == LDLM_EXTENT)
1663 if (ldlm_interval_alloc(lock) == NULL)
1664 GOTO(out, rc = -ENOMEM);
1667 lock->l_lvb_len = lvb_len;
1668 OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
1669 if (lock->l_lvb_data == NULL)
1670 GOTO(out, rc = -ENOMEM);
1673 lock->l_lvb_type = lvb_type;
1674 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1675 GOTO(out, rc = -ENOENT);
1680 ldlm_lock_destroy(lock);
1681 LDLM_LOCK_RELEASE(lock);
1682 RETURN(ERR_PTR(rc));
1686 * Enqueue (request) a lock.
1688 * Does not block. As a result of enqueue the lock would be put
1689 * into granted or waiting list.
1691 * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag
1692 * set, skip all the enqueueing and delegate lock processing to intent policy
1695 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1696 struct ldlm_lock **lockp,
1697 void *cookie, __u64 *flags)
1699 struct ldlm_lock *lock = *lockp;
1700 struct ldlm_resource *res = lock->l_resource;
1701 int local = ns_is_client(ldlm_res_to_ns(res));
1702 #ifdef HAVE_SERVER_SUPPORT
1703 ldlm_processing_policy policy;
1705 ldlm_error_t rc = ELDLM_OK;
1706 struct ldlm_interval *node = NULL;
1709 /* policies are not executed on the client or during replay */
1710 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1711 && !local && ns->ns_policy) {
1712 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1714 if (rc == ELDLM_LOCK_REPLACED) {
1715 /* The lock that was returned has already been granted,
1716 * and placed into lockp. If it's not the same as the
1717 * one we passed in, then destroy the old one and our
1718 * work here is done. */
1719 if (lock != *lockp) {
1720 ldlm_lock_destroy(lock);
1721 LDLM_LOCK_RELEASE(lock);
1723 *flags |= LDLM_FL_LOCK_CHANGED;
1725 } else if (rc != ELDLM_OK ||
1726 (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1727 ldlm_lock_destroy(lock);
1732 if (*flags & LDLM_FL_RESENT) {
1733 /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply.
1734 * Set LOCK_CHANGED always.
1735 * Check if the lock is granted for BLOCK_GRANTED.
1736 * Take NO_TIMEOUT from the lock as it is inherited through
1737 * LDLM_FL_INHERIT_MASK */
1738 *flags |= LDLM_FL_LOCK_CHANGED;
1739 if (lock->l_req_mode != lock->l_granted_mode)
1740 *flags |= LDLM_FL_BLOCK_GRANTED;
1741 *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
1745 /* For a replaying lock, it might be already in granted list. So
1746 * unlinking the lock will cause the interval node to be freed, we
1747 * have to allocate the interval node early otherwise we can't regrant
1748 * this lock in the future. - jay */
1749 if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1750 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
1752 lock_res_and_lock(lock);
1753 if (local && lock->l_req_mode == lock->l_granted_mode) {
1754 /* The server returned a blocked lock, but it was granted
1755 * before we got a chance to actually enqueue it. We don't
1756 * need to do anything else. */
1757 *flags &= ~LDLM_FL_BLOCKED_MASK;
1758 GOTO(out, rc = ELDLM_OK);
1761 ldlm_resource_unlink_lock(lock);
1762 if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1764 ldlm_lock_destroy_nolock(lock);
1765 GOTO(out, rc = -ENOMEM);
1768 INIT_LIST_HEAD(&node->li_group);
1769 ldlm_interval_attach(node, lock);
1773 /* Some flags from the enqueue want to make it into the AST, via the
1774 * lock's l_flags. */
1775 if (*flags & LDLM_FL_AST_DISCARD_DATA)
1776 ldlm_set_ast_discard_data(lock);
1777 if (*flags & LDLM_FL_TEST_LOCK)
1778 ldlm_set_test_lock(lock);
1780 /* This distinction between local lock trees is very important; a client
1781 * namespace only has information about locks taken by that client, and
1782 * thus doesn't have enough information to decide for itself if it can
1783 * be granted (below). In this case, we do exactly what the server
1784 * tells us to do, as dictated by the 'flags'.
1786 * We do exactly the same thing during recovery, when the server is
1787 * more or less trusting the clients not to lie.
1789 * FIXME (bug 268): Detect obvious lies by checking compatibility in
1790 * granted/converting queues. */
1792 if (*flags & LDLM_FL_BLOCK_CONV)
1793 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1794 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1795 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1797 ldlm_grant_lock(lock, NULL);
1798 GOTO(out, rc = ELDLM_OK);
1799 #ifdef HAVE_SERVER_SUPPORT
1800 } else if (*flags & LDLM_FL_REPLAY) {
1801 if (*flags & LDLM_FL_BLOCK_CONV) {
1802 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1803 GOTO(out, rc = ELDLM_OK);
1804 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1805 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1806 GOTO(out, rc = ELDLM_OK);
1807 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1808 ldlm_grant_lock(lock, NULL);
1809 GOTO(out, rc = ELDLM_OK);
1811 /* If no flags, fall through to normal enqueue path. */
1814 policy = ldlm_processing_policy_table[res->lr_type];
1815 policy(lock, flags, 1, &rc, NULL);
1819 CERROR("This is client-side-only module, cannot handle "
1820 "LDLM_NAMESPACE_SERVER resource type lock.\n");
1826 unlock_res_and_lock(lock);
1828 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1832 #ifdef HAVE_SERVER_SUPPORT
1834 * Iterate through all waiting locks on a given resource queue and attempt to
1837 * Must be called with resource lock held.
1839 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
1840 struct list_head *work_list)
1842 struct list_head *tmp, *pos;
1843 ldlm_processing_policy policy;
1845 int rc = LDLM_ITER_CONTINUE;
1849 check_res_locked(res);
1851 policy = ldlm_processing_policy_table[res->lr_type];
1854 list_for_each_safe(tmp, pos, queue) {
1855 struct ldlm_lock *pending;
1856 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
1858 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1861 rc = policy(pending, &flags, 0, &err, work_list);
1862 if (rc != LDLM_ITER_CONTINUE)
1871 * Process a call to blocking AST callback for a lock in ast_work list
1874 ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1876 struct ldlm_cb_set_arg *arg = opaq;
1877 struct ldlm_lock_desc d;
1879 struct ldlm_lock *lock;
1882 if (list_empty(arg->list))
1885 lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
1887 /* nobody should touch l_bl_ast */
1888 lock_res_and_lock(lock);
1889 list_del_init(&lock->l_bl_ast);
1891 LASSERT(ldlm_is_ast_sent(lock));
1892 LASSERT(lock->l_bl_ast_run == 0);
1893 LASSERT(lock->l_blocking_lock);
1894 lock->l_bl_ast_run++;
1895 unlock_res_and_lock(lock);
1897 ldlm_lock2desc(lock->l_blocking_lock, &d);
1899 rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
1900 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1901 lock->l_blocking_lock = NULL;
1902 LDLM_LOCK_RELEASE(lock);
1908 * Process a call to completion AST callback for a lock in ast_work list
1911 ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1913 struct ldlm_cb_set_arg *arg = opaq;
1915 struct ldlm_lock *lock;
1916 ldlm_completion_callback completion_callback;
1919 if (list_empty(arg->list))
1922 lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
1924 /* It's possible to receive a completion AST before we've set
1925 * the l_completion_ast pointer: either because the AST arrived
1926 * before the reply, or simply because there's a small race
1927 * window between receiving the reply and finishing the local
1928 * enqueue. (bug 842)
1930 * This can't happen with the blocking_ast, however, because we
1931 * will never call the local blocking_ast until we drop our
1932 * reader/writer reference, which we won't do until we get the
1933 * reply and finish enqueueing. */
1935 /* nobody should touch l_cp_ast */
1936 lock_res_and_lock(lock);
1937 list_del_init(&lock->l_cp_ast);
1938 LASSERT(ldlm_is_cp_reqd(lock));
1939 /* save l_completion_ast since it can be changed by
1940 * mds_intent_policy(), see bug 14225 */
1941 completion_callback = lock->l_completion_ast;
1942 ldlm_clear_cp_reqd(lock);
1943 unlock_res_and_lock(lock);
1945 if (completion_callback != NULL)
1946 rc = completion_callback(lock, 0, (void *)arg);
1947 LDLM_LOCK_RELEASE(lock);
1953 * Process a call to revocation AST callback for a lock in ast_work list
1956 ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1958 struct ldlm_cb_set_arg *arg = opaq;
1959 struct ldlm_lock_desc desc;
1961 struct ldlm_lock *lock;
1964 if (list_empty(arg->list))
1967 lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
1968 list_del_init(&lock->l_rk_ast);
1970 /* the desc just pretend to exclusive */
1971 ldlm_lock2desc(lock, &desc);
1972 desc.l_req_mode = LCK_EX;
1973 desc.l_granted_mode = 0;
1975 rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
1976 LDLM_LOCK_RELEASE(lock);
1982 * Process a call to glimpse AST callback for a lock in ast_work list
1984 int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1986 struct ldlm_cb_set_arg *arg = opaq;
1987 struct ldlm_glimpse_work *gl_work;
1988 struct ldlm_lock *lock;
1992 if (list_empty(arg->list))
1995 gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
1997 list_del_init(&gl_work->gl_list);
1999 lock = gl_work->gl_lock;
2001 /* transfer the glimpse descriptor to ldlm_cb_set_arg */
2002 arg->gl_desc = gl_work->gl_desc;
2004 /* invoke the actual glimpse callback */
2005 if (lock->l_glimpse_ast(lock, (void*)arg) == 0)
2008 LDLM_LOCK_RELEASE(lock);
2010 if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
2011 OBD_FREE_PTR(gl_work);
2017 * Process list of locks in need of ASTs being sent.
2019 * Used on server to send multiple ASTs together instead of sending one by
2022 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
2023 ldlm_desc_ast_t ast_type)
2025 struct ldlm_cb_set_arg *arg;
2026 set_producer_func work_ast_lock;
2029 if (list_empty(rpc_list))
2036 atomic_set(&arg->restart, 0);
2037 arg->list = rpc_list;
2040 case LDLM_WORK_BL_AST:
2041 arg->type = LDLM_BL_CALLBACK;
2042 work_ast_lock = ldlm_work_bl_ast_lock;
2044 case LDLM_WORK_CP_AST:
2045 arg->type = LDLM_CP_CALLBACK;
2046 work_ast_lock = ldlm_work_cp_ast_lock;
2048 case LDLM_WORK_REVOKE_AST:
2049 arg->type = LDLM_BL_CALLBACK;
2050 work_ast_lock = ldlm_work_revoke_ast_lock;
2052 case LDLM_WORK_GL_AST:
2053 arg->type = LDLM_GL_CALLBACK;
2054 work_ast_lock = ldlm_work_gl_ast_lock;
2060 /* We create a ptlrpc request set with flow control extension.
2061 * This request set will use the work_ast_lock function to produce new
2062 * requests and will send a new request each time one completes in order
2063 * to keep the number of requests in flight to ns_max_parallel_ast */
2064 arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
2065 work_ast_lock, arg);
2066 if (arg->set == NULL)
2067 GOTO(out, rc = -ENOMEM);
2069 ptlrpc_set_wait(arg->set);
2070 ptlrpc_set_destroy(arg->set);
2072 rc = atomic_read(&arg->restart) ? -ERESTART : 0;
2079 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
2081 ldlm_reprocess_all(res);
2082 return LDLM_ITER_CONTINUE;
2085 static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2086 struct hlist_node *hnode, void *arg)
2088 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
2091 rc = reprocess_one_queue(res, arg);
2093 return rc == LDLM_ITER_STOP;
2097 * Iterate through all resources on a namespace attempting to grant waiting
2100 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
2105 cfs_hash_for_each_nolock(ns->ns_rs_hash,
2106 ldlm_reprocess_res, NULL);
2112 * Try to grant all waiting locks on a resource.
2114 * Calls ldlm_reprocess_queue on converting and waiting queues.
2116 * Typically called after some resource locks are cancelled to see
2117 * if anything could be granted as a result of the cancellation.
2119 void ldlm_reprocess_all(struct ldlm_resource *res)
2121 struct list_head rpc_list;
2122 #ifdef HAVE_SERVER_SUPPORT
2126 INIT_LIST_HEAD(&rpc_list);
2127 /* Local lock trees don't get reprocessed. */
2128 if (ns_is_client(ldlm_res_to_ns(res))) {
2135 rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
2136 if (rc == LDLM_ITER_CONTINUE)
2137 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
2140 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
2142 if (rc == -ERESTART) {
2143 LASSERT(list_empty(&rpc_list));
2149 INIT_LIST_HEAD(&rpc_list);
2150 if (!ns_is_client(ldlm_res_to_ns(res))) {
2151 CERROR("This is client-side-only module, cannot handle "
2152 "LDLM_NAMESPACE_SERVER resource type lock.\n");
2158 EXPORT_SYMBOL(ldlm_reprocess_all);
2161 * Helper function to call blocking AST for LDLM lock \a lock in a
2162 * "cancelling" mode.
2164 void ldlm_cancel_callback(struct ldlm_lock *lock)
2166 check_res_locked(lock->l_resource);
2167 if (!ldlm_is_cancel(lock)) {
2168 ldlm_set_cancel(lock);
2169 if (lock->l_blocking_ast) {
2170 unlock_res_and_lock(lock);
2171 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
2173 lock_res_and_lock(lock);
2175 LDLM_DEBUG(lock, "no blocking ast");
2178 ldlm_set_bl_done(lock);
2182 * Remove skiplist-enabled LDLM lock \a req from granted list
2184 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
2186 if (req->l_resource->lr_type != LDLM_PLAIN &&
2187 req->l_resource->lr_type != LDLM_IBITS)
2190 list_del_init(&req->l_sl_policy);
2191 list_del_init(&req->l_sl_mode);
2195 * Attempts to cancel LDLM lock \a lock that has no reader/writer references.
2197 void ldlm_lock_cancel(struct ldlm_lock *lock)
2199 struct ldlm_resource *res;
2200 struct ldlm_namespace *ns;
2203 lock_res_and_lock(lock);
2205 res = lock->l_resource;
2206 ns = ldlm_res_to_ns(res);
2208 /* Please do not, no matter how tempting, remove this LBUG without
2209 * talking to me first. -phik */
2210 if (lock->l_readers || lock->l_writers) {
2211 LDLM_ERROR(lock, "lock still has references");
2215 if (ldlm_is_waited(lock))
2216 ldlm_del_waiting_lock(lock);
2218 /* Releases cancel callback. */
2219 ldlm_cancel_callback(lock);
2221 LASSERT(!ldlm_is_waited(lock));
2223 ldlm_resource_unlink_lock(lock);
2224 ldlm_lock_destroy_nolock(lock);
2226 if (lock->l_granted_mode == lock->l_req_mode)
2227 ldlm_pool_del(&ns->ns_pool, lock);
2229 /* Make sure we will not be called again for same lock what is possible
2230 * if not to zero out lock->l_granted_mode */
2231 lock->l_granted_mode = LCK_MINMODE;
2232 unlock_res_and_lock(lock);
2236 EXPORT_SYMBOL(ldlm_lock_cancel);
2239 * Set opaque data into the lock that only makes sense to upper layer.
2241 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
2243 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2248 if (lock->l_ast_data == NULL)
2249 lock->l_ast_data = data;
2250 if (lock->l_ast_data == data)
2252 LDLM_LOCK_PUT(lock);
2256 EXPORT_SYMBOL(ldlm_lock_set_data);
2258 struct export_cl_data {
2259 struct obd_export *ecl_exp;
2264 * Iterator function for ldlm_cancel_locks_for_export.
2265 * Cancels passed locks.
2268 ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2269 struct hlist_node *hnode, void *data)
2272 struct export_cl_data *ecl = (struct export_cl_data *)data;
2273 struct obd_export *exp = ecl->ecl_exp;
2274 struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2275 struct ldlm_resource *res;
2277 res = ldlm_resource_getref(lock->l_resource);
2278 LDLM_LOCK_GET(lock);
2280 LDLM_DEBUG(lock, "export %p", exp);
2281 ldlm_res_lvbo_update(res, NULL, 1);
2282 ldlm_lock_cancel(lock);
2283 ldlm_reprocess_all(res);
2284 ldlm_resource_putref(res);
2285 LDLM_LOCK_RELEASE(lock);
2288 if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
2290 "Cancel lock %p for export %p (loop %d), still have "
2291 "%d locks left on hash table.\n",
2292 lock, exp, ecl->ecl_loop,
2293 atomic_read(&hs->hs_count));
2300 * Cancel all locks for given export.
2302 * Typically called on client disconnection/eviction
2304 void ldlm_cancel_locks_for_export(struct obd_export *exp)
2306 struct export_cl_data ecl = {
2311 cfs_hash_for_each_empty(exp->exp_lock_hash,
2312 ldlm_cancel_locks_for_export_cb, &ecl);
2316 * Downgrade an exclusive lock.
2318 * A fast variant of ldlm_lock_convert for convertion of exclusive
2319 * locks. The convertion is always successful.
2320 * Used by Commit on Sharing (COS) code.
2322 * \param lock A lock to convert
2323 * \param new_mode new lock mode
2325 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
2329 LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
2330 LASSERT(new_mode == LCK_COS);
2332 lock_res_and_lock(lock);
2333 ldlm_resource_unlink_lock(lock);
2335 * Remove the lock from pool as it will be added again in
2336 * ldlm_grant_lock() called below.
2338 ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
2340 lock->l_req_mode = new_mode;
2341 ldlm_grant_lock(lock, NULL);
2342 unlock_res_and_lock(lock);
2343 ldlm_reprocess_all(lock->l_resource);
2347 EXPORT_SYMBOL(ldlm_lock_downgrade);
2350 * Attempt to convert already granted lock to a different mode.
2352 * While lock conversion is not currently used, future client-side
2353 * optimizations could take advantage of it to avoid discarding cached
2356 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
2359 struct list_head rpc_list;
2360 struct ldlm_resource *res;
2361 struct ldlm_namespace *ns;
2363 #ifdef HAVE_SERVER_SUPPORT
2365 struct sl_insert_point prev;
2367 struct ldlm_interval *node;
2370 INIT_LIST_HEAD(&rpc_list);
2371 /* Just return if mode is unchanged. */
2372 if (new_mode == lock->l_granted_mode) {
2373 *flags |= LDLM_FL_BLOCK_GRANTED;
2374 RETURN(lock->l_resource);
2377 /* I can't check the type of lock here because the bitlock of lock
2378 * is not held here, so do the allocation blindly. -jay */
2379 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
2380 if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */
2383 LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
2384 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
2386 lock_res_and_lock(lock);
2388 res = lock->l_resource;
2389 ns = ldlm_res_to_ns(res);
2391 #ifdef HAVE_SERVER_SUPPORT
2392 old_mode = lock->l_req_mode;
2394 lock->l_req_mode = new_mode;
2395 if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
2396 #ifdef HAVE_SERVER_SUPPORT
2397 /* remember the lock position where the lock might be
2398 * added back to the granted list later and also
2399 * remember the join mode for skiplist fixing. */
2400 prev.res_link = lock->l_res_link.prev;
2401 prev.mode_link = lock->l_sl_mode.prev;
2402 prev.policy_link = lock->l_sl_policy.prev;
2404 ldlm_resource_unlink_lock(lock);
2406 ldlm_resource_unlink_lock(lock);
2407 if (res->lr_type == LDLM_EXTENT) {
2408 /* FIXME: ugly code, I have to attach the lock to a
2409 * interval node again since perhaps it will be granted
2411 INIT_LIST_HEAD(&node->li_group);
2412 ldlm_interval_attach(node, lock);
2418 * Remove old lock from the pool before adding the lock with new
2419 * mode below in ->policy()
2421 ldlm_pool_del(&ns->ns_pool, lock);
2423 /* If this is a local resource, put it on the appropriate list. */
2424 if (ns_is_client(ldlm_res_to_ns(res))) {
2425 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
2426 ldlm_resource_add_lock(res, &res->lr_converting, lock);
2428 /* This should never happen, because of the way the
2429 * server handles conversions. */
2430 LDLM_ERROR(lock, "Erroneous flags %x on local lock\n",
2434 ldlm_grant_lock(lock, &rpc_list);
2436 /* FIXME: completion handling not with lr_lock held ! */
2437 if (lock->l_completion_ast)
2438 lock->l_completion_ast(lock, 0, NULL);
2440 #ifdef HAVE_SERVER_SUPPORT
2445 ldlm_processing_policy policy;
2446 policy = ldlm_processing_policy_table[res->lr_type];
2447 rc = policy(lock, &pflags, 0, &err, &rpc_list);
2448 if (rc == LDLM_ITER_STOP) {
2449 lock->l_req_mode = old_mode;
2450 if (res->lr_type == LDLM_EXTENT)
2451 ldlm_extent_add_lock(res, lock);
2453 ldlm_granted_list_add_lock(lock, &prev);
2457 *flags |= LDLM_FL_BLOCK_GRANTED;
2463 CERROR("This is client-side-only module, cannot handle "
2464 "LDLM_NAMESPACE_SERVER resource type lock.\n");
2468 unlock_res_and_lock(lock);
2471 ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST);
2473 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
2478 * Print lock with lock handle \a lockh description into debug log.
2480 * Used when printing all locks on a resource for debug purposes.
2482 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
2484 struct ldlm_lock *lock;
2486 if (!((libcfs_debug | D_ERROR) & level))
2489 lock = ldlm_handle2lock(lockh);
2493 LDLM_DEBUG_LIMIT(level, lock, "###");
2495 LDLM_LOCK_PUT(lock);
2497 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2500 * Print lock information with custom message into debug log.
2503 void _ldlm_lock_debug(struct ldlm_lock *lock,
2504 struct libcfs_debug_msg_data *msgdata,
2505 const char *fmt, ...)
2508 struct obd_export *exp = lock->l_export;
2509 struct ldlm_resource *resource = lock->l_resource;
2510 char *nid = "local";
2512 va_start(args, fmt);
2514 if (exp && exp->exp_connection) {
2515 nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
2516 } else if (exp && exp->exp_obd != NULL) {
2517 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2518 nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
2521 if (resource == NULL) {
2522 libcfs_debug_vmsg2(msgdata, fmt, args,
2523 " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2524 "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" nid: %s "
2525 "remote: "LPX64" expref: %d pid: %u timeout: %lu "
2528 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2529 lock->l_readers, lock->l_writers,
2530 ldlm_lockname[lock->l_granted_mode],
2531 ldlm_lockname[lock->l_req_mode],
2532 lock->l_flags, nid, lock->l_remote_handle.cookie,
2533 exp ? atomic_read(&exp->exp_refcount) : -99,
2534 lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
2539 switch (resource->lr_type) {
2541 libcfs_debug_vmsg2(msgdata, fmt, args,
2542 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2543 "res: "DLDLMRES" rrc: %d type: %s ["LPU64"->"LPU64"] "
2544 "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: "
2545 LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n",
2546 ldlm_lock_to_ns_name(lock), lock,
2547 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2548 lock->l_readers, lock->l_writers,
2549 ldlm_lockname[lock->l_granted_mode],
2550 ldlm_lockname[lock->l_req_mode],
2552 atomic_read(&resource->lr_refcount),
2553 ldlm_typename[resource->lr_type],
2554 lock->l_policy_data.l_extent.start,
2555 lock->l_policy_data.l_extent.end,
2556 lock->l_req_extent.start, lock->l_req_extent.end,
2557 lock->l_flags, nid, lock->l_remote_handle.cookie,
2558 exp ? atomic_read(&exp->exp_refcount) : -99,
2559 lock->l_pid, lock->l_callback_timeout,
2564 libcfs_debug_vmsg2(msgdata, fmt, args,
2565 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2566 "res: "DLDLMRES" rrc: %d type: %s pid: %d "
2567 "["LPU64"->"LPU64"] flags: "LPX64" nid: %s "
2568 "remote: "LPX64" expref: %d pid: %u timeout: %lu\n",
2569 ldlm_lock_to_ns_name(lock), lock,
2570 lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2571 lock->l_readers, lock->l_writers,
2572 ldlm_lockname[lock->l_granted_mode],
2573 ldlm_lockname[lock->l_req_mode],
2575 atomic_read(&resource->lr_refcount),
2576 ldlm_typename[resource->lr_type],
2577 lock->l_policy_data.l_flock.pid,
2578 lock->l_policy_data.l_flock.start,
2579 lock->l_policy_data.l_flock.end,
2580 lock->l_flags, nid, lock->l_remote_handle.cookie,
2581 exp ? atomic_read(&exp->exp_refcount) : -99,
2582 lock->l_pid, lock->l_callback_timeout);
2586 libcfs_debug_vmsg2(msgdata, fmt, args,
2587 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2588 "res: "DLDLMRES" bits "LPX64" rrc: %d type: %s "
2589 "flags: "LPX64" nid: %s remote: "LPX64" expref: %d "
2590 "pid: %u timeout: %lu lvb_type: %d\n",
2591 ldlm_lock_to_ns_name(lock),
2592 lock, lock->l_handle.h_cookie,
2593 atomic_read(&lock->l_refc),
2594 lock->l_readers, lock->l_writers,
2595 ldlm_lockname[lock->l_granted_mode],
2596 ldlm_lockname[lock->l_req_mode],
2598 lock->l_policy_data.l_inodebits.bits,
2599 atomic_read(&resource->lr_refcount),
2600 ldlm_typename[resource->lr_type],
2601 lock->l_flags, nid, lock->l_remote_handle.cookie,
2602 exp ? atomic_read(&exp->exp_refcount) : -99,
2603 lock->l_pid, lock->l_callback_timeout,
2608 libcfs_debug_vmsg2(msgdata, fmt, args,
2609 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2610 "res: "DLDLMRES" rrc: %d type: %s flags: "LPX64" "
2611 "nid: %s remote: "LPX64" expref: %d pid: %u "
2612 "timeout: %lu lvb_type: %d\n",
2613 ldlm_lock_to_ns_name(lock),
2614 lock, lock->l_handle.h_cookie,
2615 atomic_read(&lock->l_refc),
2616 lock->l_readers, lock->l_writers,
2617 ldlm_lockname[lock->l_granted_mode],
2618 ldlm_lockname[lock->l_req_mode],
2620 atomic_read(&resource->lr_refcount),
2621 ldlm_typename[resource->lr_type],
2622 lock->l_flags, nid, lock->l_remote_handle.cookie,
2623 exp ? atomic_read(&exp->exp_refcount) : -99,
2624 lock->l_pid, lock->l_callback_timeout,
2630 EXPORT_SYMBOL(_ldlm_lock_debug);