1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2010, 2017, Intel Corporation.
11 * This file is part of Lustre, http://www.lustre.org/
13 * Author: Peter Braam <braam@clusterfs.com>
14 * Author: Phil Schwan <phil@clusterfs.com>
17 #define DEBUG_SUBSYSTEM S_LDLM
19 #include <libcfs/libcfs.h>
21 #include <lustre_swab.h>
22 #include <obd_class.h>
24 #include "ldlm_internal.h"
26 struct kmem_cache *ldlm_glimpse_work_kmem;
27 EXPORT_SYMBOL(ldlm_glimpse_work_kmem);
30 char *ldlm_lockname[] = {
38 [LCK_GROUP] = "GROUP",
42 EXPORT_SYMBOL(ldlm_lockname);
44 char *ldlm_typename[] = {
46 [LDLM_EXTENT] = "EXT",
51 static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
52 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
53 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
54 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local,
55 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
58 static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
59 [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire,
60 [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
61 [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire,
62 [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire,
66 * Converts lock policy from local format to on the wire lock_desc format
68 void ldlm_convert_policy_to_wire(enum ldlm_type type,
69 const union ldlm_policy_data *lpolicy,
70 union ldlm_wire_policy_data *wpolicy)
72 ldlm_policy_local_to_wire_t convert;
74 convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
76 convert(lpolicy, wpolicy);
80 * Converts lock policy from on the wire lock_desc format to local format
82 void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
83 const union ldlm_wire_policy_data *wpolicy,
84 union ldlm_policy_data *lpolicy)
86 ldlm_policy_wire_to_local_t convert;
88 convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
90 convert(wpolicy, lpolicy);
93 const char *ldlm_it2str(enum ldlm_intent_flags it)
100 case (IT_OPEN | IT_CREAT):
113 CERROR("Unknown intent 0x%08x\n", it);
117 EXPORT_SYMBOL(ldlm_it2str);
119 #ifdef HAVE_SERVER_SUPPORT
120 static ldlm_processing_policy ldlm_processing_policy_table[] = {
121 [LDLM_PLAIN] = ldlm_process_plain_lock,
122 [LDLM_EXTENT] = ldlm_process_extent_lock,
123 [LDLM_FLOCK] = ldlm_process_flock_lock,
124 [LDLM_IBITS] = ldlm_process_inodebits_lock,
127 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
129 return ldlm_processing_policy_table[res->lr_type];
131 EXPORT_SYMBOL(ldlm_get_processing_policy);
133 static ldlm_reprocessing_policy ldlm_reprocessing_policy_table[] = {
134 [LDLM_PLAIN] = ldlm_reprocess_queue,
135 [LDLM_EXTENT] = ldlm_reprocess_queue,
136 [LDLM_FLOCK] = ldlm_reprocess_queue,
137 [LDLM_IBITS] = ldlm_reprocess_inodebits_queue,
140 ldlm_reprocessing_policy ldlm_get_reprocessing_policy(struct ldlm_resource *res)
142 return ldlm_reprocessing_policy_table[res->lr_type];
145 #endif /* HAVE_SERVER_SUPPORT */
147 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
151 EXPORT_SYMBOL(ldlm_register_intent);
153 /* REFCOUNTED LOCK OBJECTS */
157 * Get a reference on a lock.
159 * Lock refcounts, during creation:
160 * - one special one for allocation, dec'd only once in destroy
161 * - one for being a lock that's in-use
162 * - one for the addref associated with a new lock
164 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
166 refcount_inc(&lock->l_handle.h_ref);
169 EXPORT_SYMBOL(ldlm_lock_get);
171 static void lock_handle_free(struct rcu_head *rcu)
173 struct ldlm_lock *lock = container_of(rcu, struct ldlm_lock,
176 OBD_FREE_PRE(lock, sizeof(*lock), "slab-freed");
177 kmem_cache_free(ldlm_lock_slab, lock);
181 * Release lock reference.
183 * Also frees the lock if it was last reference.
185 void ldlm_lock_put(struct ldlm_lock *lock)
189 LASSERT(lock->l_resource != LP_POISON);
190 LASSERT(refcount_read(&lock->l_handle.h_ref) > 0);
191 if (refcount_dec_and_test(&lock->l_handle.h_ref)) {
192 struct ldlm_resource *res;
195 "final lock_put on destroyed lock, freeing it.");
197 res = lock->l_resource;
198 LASSERT(ldlm_is_destroyed(lock));
199 LASSERT(list_empty(&lock->l_exp_list));
200 LASSERT(list_empty(&lock->l_res_link));
201 LASSERT(list_empty(&lock->l_pending_chain));
203 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
205 if (lock->l_export) {
206 class_export_lock_put(lock->l_export, lock);
207 lock->l_export = NULL;
210 if (lock->l_lvb_data != NULL)
211 OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
213 if (res->lr_type == LDLM_IBITS && lock->l_ibits_node) {
214 OBD_SLAB_FREE_PTR(lock->l_ibits_node,
215 ldlm_inodebits_slab);
217 ldlm_resource_putref(res);
218 lock->l_resource = NULL;
219 if (lock->l_flags & BIT(63))
220 /* Performance testing - bypassing RCU removes overhead */
221 lock_handle_free(&lock->l_handle.h_rcu);
223 call_rcu(&lock->l_handle.h_rcu, lock_handle_free);
228 EXPORT_SYMBOL(ldlm_lock_put);
231 * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
233 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
237 if (!list_empty(&lock->l_lru)) {
238 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
240 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
241 if (ns->ns_last_pos == &lock->l_lru)
242 ns->ns_last_pos = lock->l_lru.prev;
243 list_del_init(&lock->l_lru);
244 LASSERT(ns->ns_nr_unused > 0);
252 * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
254 * If \a last_use is non-zero, it will remove the lock from LRU only if
255 * it matches lock's l_last_used.
257 * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
258 * doesn't match lock's l_last_used;
259 * otherwise, the lock hasn't been in the LRU list.
260 * \retval 1 the lock was in LRU list and removed.
262 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, ktime_t last_use)
264 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
268 if (ldlm_is_ns_srv(lock)) {
269 LASSERT(list_empty(&lock->l_lru));
273 spin_lock(&ns->ns_lock);
274 if (!ktime_compare(last_use, ktime_set(0, 0)) ||
275 !ktime_compare(last_use, lock->l_last_used))
276 rc = ldlm_lock_remove_from_lru_nolock(lock);
277 spin_unlock(&ns->ns_lock);
282 /* Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked. */
283 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
285 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
287 lock->l_last_used = ktime_get();
288 LASSERT(list_empty(&lock->l_lru));
289 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
290 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
291 LASSERT(ns->ns_nr_unused >= 0);
295 /* Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks first */
296 static void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
298 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
301 spin_lock(&ns->ns_lock);
302 ldlm_lock_add_to_lru_nolock(lock);
303 spin_unlock(&ns->ns_lock);
308 * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
309 * the LRU. Performs necessary LRU locking
311 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
313 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
316 if (ldlm_is_ns_srv(lock)) {
317 LASSERT(list_empty(&lock->l_lru));
322 spin_lock(&ns->ns_lock);
323 if (!list_empty(&lock->l_lru)) {
324 ldlm_lock_remove_from_lru_nolock(lock);
325 ldlm_lock_add_to_lru_nolock(lock);
327 spin_unlock(&ns->ns_lock);
332 * Helper to destroy a locked lock.
334 * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock
335 * Must be called with l_lock and lr_lock held.
337 * Does not actually free the lock data, but rather marks the lock as
338 * destroyed by setting l_destroyed field in the lock to 1. Destroys a
339 * handle->lock association too, so that the lock can no longer be found
340 * and removes the lock from LRU list. Actual lock freeing occurs when
341 * last lock reference goes away.
343 * Original comment (of some historical value):
344 * This used to have a 'strict' flag, which recovery would use to mark an
345 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
346 * shall explain why it's gone: with the new hash table scheme, once you call
347 * ldlm_lock_destroy, you can never drop your final references on this lock.
348 * Because it's not in the hash table anymore. -phil
350 static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
354 if (lock->l_readers || lock->l_writers) {
355 LDLM_ERROR(lock, "lock still has references");
359 if (!list_empty(&lock->l_res_link)) {
360 LDLM_ERROR(lock, "lock still on resource");
364 if (ldlm_is_destroyed(lock)) {
365 LASSERT(list_empty(&lock->l_lru));
369 ldlm_set_destroyed(lock);
370 wake_up(&lock->l_waitq);
372 if (lock->l_export && lock->l_export->exp_lock_hash) {
373 /* Safe to call cfs_hash_del as lock isn't in exp_lock_hash. */
374 /* below, .hs_keycmp resolves to ldlm_export_lock_keycmp() */
375 cfs_hash_del(lock->l_export->exp_lock_hash,
376 &lock->l_remote_handle, &lock->l_exp_hash);
379 ldlm_lock_remove_from_lru(lock);
380 class_handle_unhash(&lock->l_handle);
386 /* Destroys a LDLM lock \a lock. Performs necessary locking first. */
387 void ldlm_lock_destroy(struct ldlm_lock *lock)
392 lock_res_and_lock(lock);
393 first = ldlm_lock_destroy_internal(lock);
394 unlock_res_and_lock(lock);
396 /* drop reference from hashtable only for first destroy */
403 /* Destroys a LDLM lock \a lock that is already locked. */
404 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
409 first = ldlm_lock_destroy_internal(lock);
410 /* drop reference from hashtable only for first destroy */
417 static const char lock_handle_owner[] = "ldlm";
421 * Allocate and initialize new lock structure.
423 * usage: pass in a resource on which you have done ldlm_resource_get
424 * new lock will take over the refcount.
425 * returns: lock with refcount 2 - one for current caller and one for remote
427 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
429 struct ldlm_lock *lock;
433 if (resource == NULL)
436 OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS);
440 RCU_INIT_POINTER(lock->l_resource, resource);
442 refcount_set(&lock->l_handle.h_ref, 2);
443 INIT_LIST_HEAD(&lock->l_res_link);
444 INIT_LIST_HEAD(&lock->l_lru);
445 INIT_LIST_HEAD(&lock->l_pending_chain);
446 INIT_LIST_HEAD(&lock->l_bl_ast);
447 INIT_LIST_HEAD(&lock->l_cp_ast);
448 INIT_LIST_HEAD(&lock->l_rk_ast);
449 init_waitqueue_head(&lock->l_waitq);
450 lock->l_blocking_lock = NULL;
451 switch (resource->lr_type) {
454 INIT_LIST_HEAD(&lock->l_sl_mode);
455 INIT_LIST_HEAD(&lock->l_sl_policy);
458 INIT_HLIST_NODE(&lock->l_exp_flock_hash);
459 RB_CLEAR_NODE(&lock->l_fl_rb);
462 RB_CLEAR_NODE(&lock->l_rb);
463 INIT_LIST_HEAD(&lock->l_same_extent);
468 INIT_HLIST_NODE(&lock->l_exp_hash);
470 lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
472 INIT_HLIST_NODE(&lock->l_handle.h_link);
473 class_handle_hash(&lock->l_handle, lock_handle_owner);
475 lock->l_callback_timestamp = 0;
476 lock->l_activity = 0;
478 #if LUSTRE_TRACKS_LOCK_EXP_REFS
479 INIT_LIST_HEAD(&lock->l_exp_refs_link);
480 lock->l_exp_refs_nr = 0;
481 lock->l_exp_refs_target = NULL;
483 INIT_LIST_HEAD(&lock->l_exp_list);
488 struct ldlm_lock *ldlm_lock_new_testing(struct ldlm_resource *resource)
490 struct ldlm_lock *lock = ldlm_lock_new(resource);
495 lock->l_flags |= BIT(63);
496 switch (resource->lr_type) {
498 rc = ldlm_inodebits_alloc_lock(lock);
506 ldlm_lock_destroy(lock);
510 EXPORT_SYMBOL(ldlm_lock_new_testing);
513 * Moves LDLM lock \a lock to another resource.
514 * This is used on client when server returns some other lock than requested
515 * (typically as a result of intent operation)
517 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
518 const struct ldlm_res_id *new_resid)
520 struct ldlm_resource *oldres;
521 struct ldlm_resource *newres;
526 LASSERT(ns_is_client(ns));
528 oldres = lock_res_and_lock(lock);
529 if (memcmp(new_resid, &oldres->lr_name,
530 sizeof(oldres->lr_name)) == 0) {
532 unlock_res_and_lock(lock);
536 LASSERT(new_resid->name[0] != 0);
538 /* This function assumes that the lock isn't on any lists */
539 LASSERT(list_empty(&lock->l_res_link));
541 type = oldres->lr_type;
542 unlock_res_and_lock(lock);
544 newres = ldlm_resource_get(ns, new_resid, type, 1);
546 RETURN(PTR_ERR(newres));
549 * To flip the lock from the old to the new resource, oldres
550 * and newres have to be locked. Resource spin-locks are taken
551 * in the memory address order to avoid dead-locks.
552 * As this is the only circumstance where ->l_resource
553 * can change, and this cannot race with itself, it is safe
554 * to access lock->l_resource without being careful about locking.
556 oldres = lock->l_resource;
557 if (oldres < newres) {
559 lock_res_nested(newres, LRT_NEW);
562 lock_res_nested(oldres, LRT_NEW);
564 LASSERT(memcmp(new_resid, &oldres->lr_name,
565 sizeof(oldres->lr_name)) != 0);
566 rcu_assign_pointer(lock->l_resource, newres);
570 /* ...and the flowers are still standing! */
571 ldlm_resource_putref(oldres);
576 /** \defgroup ldlm_handles LDLM HANDLES
577 * Ways to get hold of locks without any addresses.
582 * Fills in handle for LDLM lock \a lock into supplied \a lockh
583 * Does not take any references.
585 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
587 lockh->cookie = lock->l_handle.h_cookie;
589 EXPORT_SYMBOL(ldlm_lock2handle);
592 * Obtain a lock reference by handle.
594 * if \a flags: atomically get the lock and set the flags.
595 * Return NULL if flag already set
597 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
600 struct ldlm_lock *lock;
606 if (!lustre_handle_is_used(handle))
609 lock = class_handle2object(handle->cookie, lock_handle_owner);
613 if (lock->l_export != NULL && lock->l_export->exp_failed) {
614 CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
615 lock, lock->l_export);
620 /* It's unlikely but possible that someone marked the lock as
621 * destroyed after we did handle2object on it
623 if ((flags == 0) && !ldlm_is_destroyed(lock)) {
627 lock_res_and_lock(lock);
629 LASSERT(lock->l_resource != NULL);
631 if (unlikely(ldlm_is_destroyed(lock))) {
632 unlock_res_and_lock(lock);
633 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
638 /* If we're setting flags, make sure none of them are already set. */
640 if ((lock->l_flags & flags) != 0) {
641 unlock_res_and_lock(lock);
646 lock->l_flags |= flags;
649 unlock_res_and_lock(lock);
652 EXPORT_SYMBOL(__ldlm_handle2lock);
653 /** @} ldlm_handles */
656 * Fill in "on the wire" representation for given LDLM lock into supplied
657 * lock descriptor \a desc structure.
659 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
661 ldlm_res2desc(lock->l_resource, &desc->l_resource);
662 desc->l_req_mode = lock->l_req_mode;
663 desc->l_granted_mode = lock->l_granted_mode;
664 ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
665 &lock->l_policy_data,
666 &desc->l_policy_data);
670 * Add a lock to list of conflicting locks to send AST to.
672 * Only add if we have not sent a blocking AST to the lock yet.
674 static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
675 struct list_head *work_list)
677 if (!ldlm_is_ast_sent(lock)) {
678 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
679 ldlm_set_ast_sent(lock);
680 /* If the enqueuing client said so, tell the AST recipient to
681 * discard dirty data, rather than writing back.
683 if (ldlm_is_ast_discard_data(new))
684 ldlm_set_discard_data(lock);
686 /* Lock can be converted from a blocking state back to granted
687 * after lock convert or COS downgrade but still be in an
688 * older bl_list because it is controlled only by
689 * ldlm_work_bl_ast_lock(), let it be processed there.
691 if (list_empty(&lock->l_bl_ast)) {
692 list_add(&lock->l_bl_ast, work_list);
695 LASSERT(lock->l_blocking_lock == NULL);
696 lock->l_blocking_lock = ldlm_lock_get(new);
700 /* Add a lock to list of just granted locks to send completion AST to. */
701 static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
702 struct list_head *work_list)
704 if (!ldlm_is_cp_reqd(lock)) {
705 ldlm_set_cp_reqd(lock);
706 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
707 LASSERT(list_empty(&lock->l_cp_ast));
708 list_add(&lock->l_cp_ast, work_list);
714 * Aggregator function to add AST work items into a list. Determines
715 * what sort of an AST work needs to be done and calls the proper
717 * Must be called with lr_lock held.
719 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
720 struct list_head *work_list)
723 check_res_locked(lock->l_resource);
725 ldlm_add_bl_work_item(lock, new, work_list);
727 ldlm_add_cp_work_item(lock, work_list);
732 * Add specified reader/writer reference to LDLM lock with handle \a lockh.
733 * r/w reference type is determined by \a mode
734 * Calls ldlm_lock_addref_internal.
736 void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
738 struct ldlm_lock *lock;
740 lock = ldlm_handle2lock(lockh);
741 LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
742 ldlm_lock_addref_internal(lock, mode);
745 EXPORT_SYMBOL(ldlm_lock_addref);
749 * Add specified reader/writer reference to LDLM lock \a lock.
750 * r/w reference type is determined by \a mode
751 * Removes lock from LRU if it is there.
752 * Assumes the LDLM lock is already locked.
754 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
757 ldlm_lock_remove_from_lru(lock);
758 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
761 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS | LCK_TXN)) {
765 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
769 * Attempts to add reader/writer reference to a lock with handle \a lockh, and
770 * fails if lock is already LDLM_FL_CBPENDING or destroyed.
772 * \retval 0 success, lock was addref-ed
774 * \retval -EAGAIN lock is being canceled.
776 int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
778 struct ldlm_lock *lock;
782 lock = ldlm_handle2lock(lockh);
784 lock_res_and_lock(lock);
785 if (lock->l_readers != 0 || lock->l_writers != 0 ||
786 !ldlm_is_cbpending(lock)) {
787 ldlm_lock_addref_internal_nolock(lock, mode);
790 unlock_res_and_lock(lock);
795 EXPORT_SYMBOL(ldlm_lock_addref_try);
798 * Add specified reader/writer reference to LDLM lock \a lock.
799 * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
800 * Only called for local locks.
802 void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
804 lock_res_and_lock(lock);
805 ldlm_lock_addref_internal_nolock(lock, mode);
806 unlock_res_and_lock(lock);
810 * Removes reader/writer reference for LDLM lock \a lock.
811 * Assumes LDLM lock is already locked.
812 * only called in ldlm_flock_destroy and for local locks.
813 * Does NOT add lock to LRU if no r/w references left to accomodate flock locks
814 * that cannot be placed in LRU.
816 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
819 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
820 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
821 LASSERT(lock->l_readers > 0);
824 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS | LCK_TXN)) {
825 LASSERT(lock->l_writers > 0);
829 ldlm_lock_put(lock); /* matches the LDLM_LOCK_GET() in addref */
833 * Removes reader/writer reference for LDLM lock \a lock.
834 * Locks LDLM lock first.
835 * If the lock is determined to be client lock on a client and r/w refcount
836 * drops to zero and the lock is not blocked, the lock is added to LRU lock
838 * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
840 void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
842 struct ldlm_namespace *ns;
846 lock_res_and_lock(lock);
848 ns = ldlm_lock_to_ns(lock);
850 ldlm_lock_decref_internal_nolock(lock, mode);
852 if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) &&
853 !lock->l_readers && !lock->l_writers) {
854 /* If this is a local lock on a server namespace and this was
855 * the last reference, cancel the lock.
857 * Group locks are special:
858 * They must not go in LRU, but they are not called back
859 * like non-group locks, instead they are manually released.
860 * They have an l_writers reference which they keep until
861 * they are manually released, so we remove them when they have
862 * no more reader or writer references. - LU-6368
864 ldlm_set_cbpending(lock);
867 if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) {
868 unsigned int mask = D_DLMTRACE;
870 /* If we received a blocked AST and this was the last reference,
873 if (ldlm_is_ns_srv(lock) && lock->l_export)
875 LDLM_DEBUG_LIMIT(mask, lock,
876 "final decref done on %sCBPENDING lock",
877 mask & D_WARNING ? "non-local " : "");
879 ldlm_lock_get(lock); /* dropped by bl thread */
880 ldlm_lock_remove_from_lru(lock);
881 unlock_res_and_lock(lock);
883 if (ldlm_is_fail_loc(lock))
884 CFS_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
886 if (ldlm_is_atomic_cb(lock) || ldlm_is_local(lock) ||
887 ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
888 ldlm_handle_bl_callback(ns, NULL, lock);
889 } else if (ns_is_client(ns) &&
890 !lock->l_readers && !lock->l_writers &&
891 !ldlm_is_no_lru(lock) &&
892 !ldlm_is_bl_ast(lock) &&
893 !ldlm_is_converting(lock)) {
895 /* If this is a client-side namespace and this was the last
896 * reference, put it on the LRU.
898 ldlm_lock_add_to_lru(lock);
899 unlock_res_and_lock(lock);
900 LDLM_DEBUG(lock, "add lock into lru list");
902 if (ldlm_is_fail_loc(lock))
903 CFS_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
905 ldlm_pool_recalc(&ns->ns_pool, true);
907 LDLM_DEBUG(lock, "do not add lock into lru list");
908 unlock_res_and_lock(lock);
915 * Decrease reader/writer refcount for LDLM lock with handle \a lockh
917 void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
919 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
921 LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
922 ldlm_lock_decref_internal(lock, mode);
925 EXPORT_SYMBOL(ldlm_lock_decref);
928 * Decrease reader/writer refcount for LDLM lock with handle
929 * \a lockh and mark it for subsequent cancellation once r/w refcount
930 * drops to zero instead of putting into LRU.
933 void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
936 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
940 LASSERT(lock != NULL);
942 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
943 lock_res_and_lock(lock);
944 ldlm_set_cbpending(lock);
945 unlock_res_and_lock(lock);
946 ldlm_lock_decref_internal(lock, mode);
949 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
951 struct sl_insert_point {
952 struct list_head *res_link;
953 struct list_head *mode_link;
954 struct list_head *policy_link;
958 * Finds a position to insert the new lock into granted lock list.
960 * Used for locks eligible for skiplist optimization.
963 * queue [input]: the granted list where search acts on;
964 * req [input]: the lock whose position to be located;
965 * prev [output]: positions within 3 lists to insert @req to
969 * - ldlm_grant_lock_with_skiplist
971 static void search_granted_lock(struct list_head *queue,
972 struct ldlm_lock *req,
973 struct sl_insert_point *prev)
975 struct ldlm_lock *lock, *mode_end, *policy_end;
978 list_for_each_entry(lock, queue, l_res_link) {
979 mode_end = list_entry(lock->l_sl_mode.prev,
980 struct ldlm_lock, l_sl_mode);
982 if (lock->l_req_mode != req->l_req_mode) {
983 /* jump to last lock of mode group */
988 /* suitable mode group is found */
989 if (lock->l_resource->lr_type == LDLM_PLAIN) {
990 /* insert point is last lock of the mode group */
991 prev->res_link = &mode_end->l_res_link;
992 prev->mode_link = &mode_end->l_sl_mode;
993 prev->policy_link = &req->l_sl_policy;
996 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
999 list_entry(lock->l_sl_policy.prev,
1003 if (lock->l_policy_data.l_inodebits.bits ==
1004 req->l_policy_data.l_inodebits.bits) {
1005 /* inserting last lock of policy grp */
1007 &policy_end->l_res_link;
1009 &policy_end->l_sl_mode;
1011 &policy_end->l_sl_policy;
1016 if (policy_end == mode_end)
1017 /* done with mode group */
1020 /* go to next policy group within mode group */
1021 lock = list_next_entry(policy_end, l_res_link);
1022 } /* loop over policy groups within the mode group */
1024 /* insert point is last lock of the mode group,
1025 * new policy group is started
1027 prev->res_link = &mode_end->l_res_link;
1028 prev->mode_link = &mode_end->l_sl_mode;
1029 prev->policy_link = &req->l_sl_policy;
1033 LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock");
1037 /* insert point is last lock on the queue,
1038 * new mode group and new policy group are started
1040 prev->res_link = queue->prev;
1041 prev->mode_link = &req->l_sl_mode;
1042 prev->policy_link = &req->l_sl_policy;
1047 * Add a lock into resource granted list after a position described by
1050 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
1051 struct sl_insert_point *prev)
1053 struct ldlm_resource *res = lock->l_resource;
1057 check_res_locked(res);
1059 ldlm_resource_dump(D_INFO, res);
1060 LDLM_DEBUG(lock, "About to add lock:");
1062 if (ldlm_is_destroyed(lock)) {
1063 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1067 LASSERT(list_empty(&lock->l_res_link));
1068 LASSERT(list_empty(&lock->l_sl_mode));
1069 LASSERT(list_empty(&lock->l_sl_policy));
1072 * lock->link == prev->link means lock is first starting the group.
1073 * Don't re-add to itself to suppress kernel warnings.
1075 if (&lock->l_res_link != prev->res_link)
1076 list_add(&lock->l_res_link, prev->res_link);
1077 if (&lock->l_sl_mode != prev->mode_link)
1078 list_add(&lock->l_sl_mode, prev->mode_link);
1079 if (&lock->l_sl_policy != prev->policy_link)
1080 list_add(&lock->l_sl_policy, prev->policy_link);
1086 * Add a lock to granted list on a resource maintaining skiplist
1089 void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
1091 struct sl_insert_point prev;
1093 LASSERT(ldlm_is_granted(lock));
1095 search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
1096 ldlm_granted_list_add_lock(lock, &prev);
1100 * Perform lock granting bookkeeping.
1102 * Includes putting the lock into granted list and updating lock mode.
1104 * - ldlm_lock_enqueue
1105 * - ldlm_reprocess_queue
1107 * must be called with lr_lock held
1109 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
1111 struct ldlm_resource *res = lock->l_resource;
1115 check_res_locked(res);
1117 lock->l_granted_mode = lock->l_req_mode;
1119 if (work_list && lock->l_completion_ast != NULL)
1120 ldlm_add_ast_work_item(lock, NULL, work_list);
1122 if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
1123 ldlm_grant_lock_with_skiplist(lock);
1124 else if (res->lr_type == LDLM_EXTENT)
1125 ldlm_extent_add_lock(res, lock);
1126 else if (res->lr_type == LDLM_FLOCK) {
1127 /* We should not add locks to granted list in the following
1129 * - this is an UNLOCK but not a real lock;
1130 * - this is a TEST lock;
1131 * - this is a F_CANCELLK lock (async flock has req_mode == 0)
1132 * - this is a deadlock (flock cannot be granted)
1134 if (lock->l_req_mode == 0 ||
1135 lock->l_req_mode == LCK_NL ||
1136 ldlm_is_test_lock(lock) ||
1137 ldlm_is_flock_deadlock(lock))
1139 ldlm_flock_add_lock(res, &res->lr_granted, lock);
1144 ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
1149 * Check if the given @lock meets the criteria for a match.
1150 * A reference on the lock is taken if matched.
1152 * @lock test-against this lock
1155 * RETURN returns true if @lock matches @data, false otherwise
1157 static bool lock_matches(struct ldlm_lock *lock, void *vdata)
1159 struct ldlm_match_data *data = vdata;
1160 union ldlm_policy_data *lpol = &lock->l_policy_data;
1161 enum ldlm_mode match = LCK_MINMODE;
1163 if (lock == data->lmd_old)
1166 /* Check if this lock can be matched.
1167 * Used by LU-2919(exclusive open) for open lease lock
1169 if (ldlm_is_excl(lock))
1172 /* llite sometimes wants to match locks that will be
1173 * canceled when their users drop, but we allow it to match
1174 * if it passes in CBPENDING and the lock still has users.
1175 * this is generally only going to be used by children
1176 * whose parents already hold a lock so forward progress
1179 if (ldlm_is_cbpending(lock) &&
1180 !(data->lmd_flags & LDLM_FL_CBPENDING) &&
1181 !(data->lmd_match & LDLM_MATCH_GROUP))
1184 if (!(data->lmd_match & (LDLM_MATCH_UNREF | LDLM_MATCH_GROUP)) &&
1185 ldlm_is_cbpending(lock) &&
1186 lock->l_readers == 0 && lock->l_writers == 0)
1189 if (!(lock->l_req_mode & *data->lmd_mode))
1192 /* When we search for ast_data, we are not doing a traditional match,
1193 * so we don't worry about IBITS or extent matching.
1195 if (data->lmd_match & (LDLM_MATCH_AST | LDLM_MATCH_AST_ANY)) {
1196 if (!lock->l_ast_data)
1199 if (data->lmd_match & LDLM_MATCH_AST_ANY)
1203 match = lock->l_req_mode;
1205 switch (lock->l_resource->lr_type) {
1207 if (!(data->lmd_match & LDLM_MATCH_RIGHT) &&
1208 (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
1209 lpol->l_extent.end < data->lmd_policy->l_extent.end))
1212 if (unlikely(match == LCK_GROUP) &&
1213 data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
1214 lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
1218 /* We match with existing lock with same or wider set of bits */
1219 if ((lpol->l_inodebits.bits &
1220 data->lmd_policy->l_inodebits.bits) !=
1221 data->lmd_policy->l_inodebits.bits)
1224 if (unlikely(match == LCK_GROUP) &&
1225 data->lmd_policy->l_inodebits.li_gid != LDLM_GID_ANY &&
1226 lpol->l_inodebits.li_gid !=
1227 data->lmd_policy->l_inodebits.li_gid)
1234 /* We match if we have existing lock with same or wider set of bits. */
1235 if (!(data->lmd_match & LDLM_MATCH_UNREF) && LDLM_HAVE_MASK(lock, GONE))
1238 if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
1241 /* Filter locks by skipping flags */
1242 if (data->lmd_skip_flags & lock->l_flags)
1247 * In case the lock is a CBPENDING grouplock, just pin it and return,
1248 * we need to wait until it gets to DESTROYED.
1250 if ((data->lmd_flags & LDLM_FL_TEST_LOCK) ||
1251 (ldlm_is_cbpending(lock) && (data->lmd_match & LDLM_MATCH_GROUP))) {
1252 ldlm_lock_get(lock);
1253 ldlm_lock_touch_in_lru(lock);
1255 ldlm_lock_addref_internal_nolock(lock, match);
1258 *data->lmd_mode = match;
1259 data->lmd_lock = lock;
1265 * Search for a lock with given parameters in interval trees.
1267 * \param res search for a lock in this resource
1268 * \param data parameters
1270 * \retval a referenced lock or NULL.
1272 struct ldlm_lock *search_itree(struct ldlm_resource *res,
1273 struct ldlm_match_data *data)
1276 __u64 end = data->lmd_policy->l_extent.end;
1278 data->lmd_lock = NULL;
1280 if (data->lmd_match & LDLM_MATCH_RIGHT)
1281 end = OBD_OBJECT_EOF;
1283 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1284 struct ldlm_interval_tree *tree = &res->lr_itree[idx];
1286 if (INTERVAL_TREE_EMPTY(&tree->lit_root))
1289 if (!(tree->lit_mode & *data->lmd_mode))
1292 ldlm_extent_search(&tree->lit_root,
1293 data->lmd_policy->l_extent.start,
1295 lock_matches, data);
1298 return data->lmd_lock;
1303 EXPORT_SYMBOL(search_itree);
1307 * Search for a lock with given properties in a queue.
1309 * \param queue search for a lock in this queue
1310 * \param data parameters
1312 * \retval a referenced lock or NULL.
1314 static struct ldlm_lock *search_queue(struct list_head *queue,
1315 struct ldlm_match_data *data)
1317 struct ldlm_lock *lock;
1319 data->lmd_lock = NULL;
1321 list_for_each_entry(lock, queue, l_res_link)
1322 if (lock_matches(lock, data))
1323 return data->lmd_lock;
1328 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
1330 if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
1331 lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
1332 wake_up(&lock->l_waitq);
1335 EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
1337 void ldlm_lock_fail_match(struct ldlm_lock *lock)
1339 lock_res_and_lock(lock);
1340 ldlm_lock_fail_match_locked(lock);
1341 unlock_res_and_lock(lock);
1345 * Mark lock as "matchable" by OST.
1347 * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
1349 * Assumes LDLM lock is already locked.
1351 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1353 ldlm_set_lvb_ready(lock);
1354 wake_up(&lock->l_waitq);
1356 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
1359 * Mark lock as "matchable" by OST.
1360 * Locks the lock and then \see ldlm_lock_allow_match_locked
1362 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1364 lock_res_and_lock(lock);
1365 ldlm_lock_allow_match_locked(lock);
1366 unlock_res_and_lock(lock);
1368 EXPORT_SYMBOL(ldlm_lock_allow_match);
1371 * Attempt to find a lock with specified properties.
1373 * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
1376 * Can be called in two ways:
1378 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1379 * for a duplicate of.
1381 * Otherwise, all of the fields must be filled in, to match against.
1383 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1384 * server (ie, connh is NULL)
1385 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1386 * list will be considered
1387 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1388 * to be canceled can still be matched as long as they still have reader
1389 * or writer refernces
1390 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1391 * just tell us if we would have matched.
1393 * \retval 1 if it finds an already-existing lock that is compatible; in this
1394 * case, lockh is filled in with a addref()ed lock
1396 * We also check security context, and if that fails we simply return 0 (to
1397 * keep caller code unchanged), the context failure will be discovered by
1398 * caller sometime later.
1400 enum ldlm_mode ldlm_lock_match_with_skip(struct ldlm_namespace *ns,
1401 __u64 flags, __u64 skip_flags,
1402 const struct ldlm_res_id *res_id,
1403 enum ldlm_type type,
1404 union ldlm_policy_data *policy,
1405 enum ldlm_mode mode,
1406 struct lustre_handle *lockh,
1407 enum ldlm_match_flags match_flags)
1409 struct ldlm_match_data data = {
1413 .lmd_policy = policy,
1415 .lmd_skip_flags = skip_flags,
1416 .lmd_match = match_flags,
1418 struct ldlm_resource *res;
1419 struct ldlm_lock *lock;
1420 struct ldlm_lock *group_lock;
1426 data.lmd_old = ldlm_handle2lock(lockh);
1427 LASSERT(data.lmd_old != NULL);
1429 ns = ldlm_lock_to_ns(data.lmd_old);
1430 res_id = &data.lmd_old->l_resource->lr_name;
1431 type = data.lmd_old->l_resource->lr_type;
1432 *data.lmd_mode = data.lmd_old->l_req_mode;
1435 res = ldlm_resource_get(ns, res_id, type, 0);
1437 LASSERT(data.lmd_old == NULL);
1444 if (res->lr_type == LDLM_EXTENT)
1445 lock = search_itree(res, &data);
1447 lock = search_queue(&res->lr_granted, &data);
1448 if (!lock && !(flags & LDLM_FL_BLOCK_GRANTED))
1449 lock = search_queue(&res->lr_waiting, &data);
1450 matched = lock ? mode : 0;
1452 if (lock && ldlm_is_cbpending(lock) &&
1453 (data.lmd_match & LDLM_MATCH_GROUP))
1458 l_wait_event_abortable(group_lock->l_waitq,
1459 ldlm_is_destroyed(lock));
1460 ldlm_lock_put(lock);
1463 ldlm_resource_putref(res);
1466 ldlm_lock2handle(lock, lockh);
1467 if ((flags & LDLM_FL_LVB_READY) &&
1468 (!ldlm_is_lvb_ready(lock))) {
1469 __u64 wait_flags = LDLM_FL_LVB_READY |
1470 LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
1472 if (lock->l_completion_ast) {
1473 int err = lock->l_completion_ast(lock,
1474 LDLM_FL_WAIT_NOREPROC,
1477 GOTO(out_fail_match, matched = 0);
1480 wait_event_idle_timeout(
1482 lock->l_flags & wait_flags,
1483 cfs_time_seconds(obd_timeout));
1485 if (!ldlm_is_lvb_ready(lock))
1486 GOTO(out_fail_match, matched = 0);
1489 /* check user's security context */
1490 if (lock->l_conn_export &&
1491 sptlrpc_import_check_ctx(
1492 class_exp2cliimp(lock->l_conn_export)))
1493 GOTO(out_fail_match, matched = 0);
1495 LDLM_DEBUG(lock, "matched (%llu %llu)",
1496 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1497 res_id->name[2] : policy->l_extent.start,
1498 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1499 res_id->name[3] : policy->l_extent.end);
1502 if (flags & LDLM_FL_TEST_LOCK)
1503 ldlm_lock_put(lock);
1505 ldlm_lock_decref_internal(lock, mode);
1508 /* less verbose for test-only */
1509 if (!matched && !(flags & LDLM_FL_TEST_LOCK)) {
1510 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1511 "%llu/%llu (%llu %llu)", ns,
1512 type, mode, res_id->name[0], res_id->name[1],
1513 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1514 res_id->name[2] : policy->l_extent.start,
1515 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1516 res_id->name[3] : policy->l_extent.end);
1518 if (data.lmd_old != NULL)
1519 ldlm_lock_put(data.lmd_old);
1523 EXPORT_SYMBOL(ldlm_lock_match_with_skip);
1525 enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
1528 struct ldlm_lock *lock;
1529 enum ldlm_mode mode = 0;
1533 lock = ldlm_handle2lock(lockh);
1535 lock_res_and_lock(lock);
1536 if (LDLM_HAVE_MASK(lock, GONE))
1539 if (ldlm_is_cbpending(lock) &&
1540 lock->l_readers == 0 && lock->l_writers == 0)
1544 *bits = lock->l_policy_data.l_inodebits.bits;
1545 mode = lock->l_granted_mode;
1546 ldlm_lock_addref_internal_nolock(lock, mode);
1553 unlock_res_and_lock(lock);
1554 ldlm_lock_put(lock);
1558 EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
1560 /** The caller must guarantee that the buffer is large enough. */
1561 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
1562 enum req_location loc, void *data, int size)
1568 LASSERT(data != NULL);
1571 switch (lock->l_lvb_type) {
1573 if (size == sizeof(struct ost_lvb)) {
1574 if (loc == RCL_CLIENT)
1575 lvb = req_capsule_client_swab_get(pill,
1577 lustre_swab_ost_lvb);
1579 lvb = req_capsule_server_swab_get(pill,
1581 lustre_swab_ost_lvb);
1582 if (unlikely(lvb == NULL)) {
1583 LDLM_ERROR(lock, "no LVB");
1587 memcpy(data, lvb, size);
1588 } else if (size == sizeof(struct ost_lvb_v1)) {
1589 struct ost_lvb *olvb = data;
1591 if (loc == RCL_CLIENT)
1592 lvb = req_capsule_client_swab_get(pill,
1594 lustre_swab_ost_lvb_v1);
1596 lvb = req_capsule_server_sized_swab_get(pill,
1598 lustre_swab_ost_lvb_v1);
1599 if (unlikely(lvb == NULL)) {
1600 LDLM_ERROR(lock, "no LVB");
1604 memcpy(data, lvb, size);
1605 olvb->lvb_mtime_ns = 0;
1606 olvb->lvb_atime_ns = 0;
1607 olvb->lvb_ctime_ns = 0;
1609 LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
1615 if (size == sizeof(struct lquota_lvb)) {
1616 if (loc == RCL_CLIENT)
1617 lvb = req_capsule_client_swab_get(pill,
1619 lustre_swab_lquota_lvb);
1621 lvb = req_capsule_server_swab_get(pill,
1623 lustre_swab_lquota_lvb);
1624 if (unlikely(lvb == NULL)) {
1625 LDLM_ERROR(lock, "no LVB");
1629 memcpy(data, lvb, size);
1632 "Replied unexpected lquota LVB size %d",
1641 if (loc == RCL_CLIENT)
1642 lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
1644 lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
1645 if (unlikely(lvb == NULL)) {
1646 LDLM_ERROR(lock, "no LVB");
1650 memcpy(data, lvb, size);
1653 LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type);
1661 /* Create and fill in new LDLM lock with specified properties.
1662 * Returns: a referenced lock
1664 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1665 const struct ldlm_res_id *res_id,
1666 enum ldlm_type type,
1667 enum ldlm_mode mode,
1668 const struct ldlm_callback_suite *cbs,
1669 void *data, __u32 lvb_len,
1670 enum lvb_type lvb_type)
1672 struct ldlm_lock *lock;
1673 struct ldlm_resource *res;
1678 res = ldlm_resource_get(ns, res_id, type, 1);
1680 RETURN(ERR_CAST(res));
1682 lock = ldlm_lock_new(res);
1684 ldlm_resource_putref(res);
1685 RETURN(ERR_PTR(-ENOMEM));
1688 lock->l_req_mode = mode;
1689 lock->l_ast_data = data;
1690 lock->l_pid = current->pid;
1691 if (ns_is_server(ns))
1692 ldlm_set_ns_srv(lock);
1694 lock->l_blocking_ast = cbs->lcs_blocking;
1695 lock->l_completion_ast = cbs->lcs_completion;
1696 lock->l_glimpse_ast = cbs->lcs_glimpse;
1699 if (type == LDLM_IBITS) {
1700 rc = ldlm_inodebits_alloc_lock(lock);
1706 lock->l_lvb_len = lvb_len;
1707 OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
1708 if (lock->l_lvb_data == NULL)
1709 GOTO(out, rc = -ENOMEM);
1712 lock->l_lvb_type = lvb_type;
1713 if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1714 GOTO(out, rc = -ENOENT);
1719 ldlm_lock_destroy(lock);
1720 ldlm_lock_put(lock);
1721 RETURN(ERR_PTR(rc));
1724 #ifdef HAVE_SERVER_SUPPORT
1725 static enum ldlm_error ldlm_lock_enqueue_helper(struct ldlm_lock *lock,
1728 struct ldlm_resource *res = lock->l_resource;
1729 enum ldlm_error rc = ELDLM_OK;
1730 LIST_HEAD(rpc_list);
1731 ldlm_processing_policy policy;
1735 policy = ldlm_get_processing_policy(res);
1736 policy(lock, flags, LDLM_PROCESS_ENQUEUE, &rc, &rpc_list);
1737 if (rc == ELDLM_OK && lock->l_granted_mode != lock->l_req_mode &&
1738 res->lr_type != LDLM_FLOCK)
1739 rc = ldlm_handle_conflict_lock(lock, flags, &rpc_list);
1741 if (!list_empty(&rpc_list))
1742 ldlm_discard_bl_list(&rpc_list);
1749 * Enqueue (request) a lock.
1751 * Does not block. As a result of enqueue the lock would be put
1752 * into granted or waiting list.
1754 * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag
1755 * set, skip all the enqueueing and delegate lock processing to intent policy
1758 enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
1759 struct ldlm_namespace *ns,
1760 struct ldlm_lock **lockp,
1761 void *cookie, __u64 *flags)
1763 struct ldlm_lock *lock = *lockp;
1764 struct ldlm_resource *res;
1765 int local = ns_is_client(ns);
1766 enum ldlm_error rc = ELDLM_OK;
1767 #ifdef HAVE_SERVER_SUPPORT
1768 bool reconstruct = false;
1772 /* policies are not executed on the client or during replay */
1773 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1774 && !local && ns->ns_policy) {
1775 rc = ns->ns_policy(env, ns, lockp, cookie, lock->l_req_mode,
1777 if (rc == ELDLM_LOCK_REPLACED) {
1778 /* The lock that was returned has already been granted,
1779 * and placed into lockp. If it's not the same as the
1780 * one we passed in, then destroy the old one and our
1781 * work here is done.
1783 if (lock != *lockp) {
1784 ldlm_lock_destroy(lock);
1785 ldlm_lock_put(lock);
1787 *flags |= LDLM_FL_LOCK_CHANGED;
1789 } else if (rc != ELDLM_OK &&
1790 ldlm_is_granted(lock)) {
1791 LASSERT(*flags & LDLM_FL_RESENT);
1792 /* It may happen that ns_policy returns an error in
1793 * resend case, object may be unlinked or just some
1794 * error occurs. It is unclear if lock reached the
1795 * client in the original reply, just leave the lock on
1796 * server, not returning it again to client. Due to
1797 * LU-6529, the server will not OOM.
1800 } else if (rc != ELDLM_OK ||
1801 (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1802 ldlm_lock_destroy(lock);
1807 if (*flags & LDLM_FL_RESENT) {
1808 /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply.
1809 * Set LOCK_CHANGED always.
1810 * Check if the lock is granted for BLOCK_GRANTED.
1811 * Take NO_TIMEOUT from the lock as it is inherited through
1812 * LDLM_FL_INHERIT_MASK
1814 *flags |= LDLM_FL_LOCK_CHANGED;
1815 if (!ldlm_is_granted(lock))
1816 *flags |= LDLM_FL_BLOCK_GRANTED;
1817 *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
1821 #ifdef HAVE_SERVER_SUPPORT
1822 reconstruct = !local && lock->l_resource->lr_type == LDLM_FLOCK &&
1823 !(*flags & LDLM_FL_TEST_LOCK);
1825 rc = req_can_reconstruct(cookie, NULL);
1833 if (!local && lock->l_resource->lr_type == LDLM_FLOCK) {
1834 struct ldlm_flock_node *fn = &lock->l_resource->lr_flock_node;
1836 if (lock->l_req_mode == LCK_NL) {
1837 atomic_inc(&fn->lfn_unlock_pending);
1838 res = lock_res_and_lock(lock);
1839 atomic_dec(&fn->lfn_unlock_pending);
1841 res = lock_res_and_lock(lock);
1843 while (atomic_read(&fn->lfn_unlock_pending)) {
1844 unlock_res_and_lock(lock);
1846 lock_res_and_lock(lock);
1852 res = lock_res_and_lock(lock);
1854 if (local && ldlm_is_granted(lock)) {
1855 /* The server returned a blocked lock, but it was granted
1856 * before we got a chance to actually enqueue it. We don't
1857 * need to do anything else.
1859 *flags &= ~LDLM_FL_BLOCKED_MASK;
1860 GOTO(out, rc = ELDLM_OK);
1863 ldlm_resource_unlink_lock(lock);
1865 /* Some flags from the enqueue want to make it into the AST, via the
1868 if (*flags & LDLM_FL_AST_DISCARD_DATA)
1869 ldlm_set_ast_discard_data(lock);
1870 if (*flags & LDLM_FL_TEST_LOCK)
1871 ldlm_set_test_lock(lock);
1873 /* This distinction between local lock trees is very important; a client
1874 * namespace only has information about locks taken by that client, and
1875 * thus doesn't have enough information to decide for itself if it can
1876 * be granted (below). In this case, we do exactly what the server
1877 * tells us to do, as dictated by the 'flags'.
1879 * We do exactly the same thing during recovery, when the server is
1880 * more or less trusting the clients not to lie.
1882 * FIXME (bug 268): Detect obvious lies by checking compatibility in
1886 if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1887 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1889 ldlm_grant_lock(lock, NULL);
1890 GOTO(out, rc = ELDLM_OK);
1891 #ifdef HAVE_SERVER_SUPPORT
1892 } else if (*flags & LDLM_FL_REPLAY) {
1893 if (*flags & LDLM_FL_BLOCK_WAIT) {
1894 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1895 GOTO(out, rc = ELDLM_OK);
1896 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1897 ldlm_grant_lock(lock, NULL);
1898 GOTO(out, rc = ELDLM_OK);
1900 /* If no flags, fall through to normal enqueue path. */
1903 rc = ldlm_lock_enqueue_helper(lock, flags);
1907 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
1913 unlock_res_and_lock(lock);
1915 #ifdef HAVE_SERVER_SUPPORT
1917 struct ptlrpc_request *req = cookie;
1919 tgt_mk_reply_data(NULL, NULL,
1920 &req->rq_export->exp_target_data,
1921 req, 0, NULL, false, 0);
1927 #ifdef HAVE_SERVER_SUPPORT
1929 * Iterate through all waiting locks on a given resource queue and attempt to
1932 * Must be called with resource lock held.
1934 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
1935 struct list_head *work_list,
1936 enum ldlm_process_intention intention, __u64 hint)
1938 struct list_head *tmp, *pos;
1939 ldlm_processing_policy policy;
1941 int rc = LDLM_ITER_CONTINUE;
1942 enum ldlm_error err;
1943 LIST_HEAD(bl_ast_list);
1947 check_res_locked(res);
1949 policy = ldlm_get_processing_policy(res);
1951 LASSERT(intention == LDLM_PROCESS_RESCAN ||
1952 intention == LDLM_PROCESS_RECOVERY);
1955 list_for_each_safe(tmp, pos, queue) {
1956 struct ldlm_lock *pending;
1957 LIST_HEAD(rpc_list);
1959 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
1961 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1964 rc = policy(pending, &flags, intention, &err, &rpc_list);
1965 if (pending->l_granted_mode == pending->l_req_mode ||
1966 res->lr_type == LDLM_FLOCK) {
1967 list_splice(&rpc_list, work_list);
1969 list_splice(&rpc_list, &bl_ast_list);
1972 * When this is called from recovery done, we always want
1973 * to scan the whole list no matter what 'rc' is returned.
1975 if (rc != LDLM_ITER_CONTINUE &&
1976 intention == LDLM_PROCESS_RESCAN)
1980 if (!list_empty(&bl_ast_list)) {
1983 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &bl_ast_list,
1987 if (rc == -ERESTART)
1991 if (!list_empty(&bl_ast_list))
1992 ldlm_discard_bl_list(&bl_ast_list);
1994 RETURN(intention == LDLM_PROCESS_RESCAN ? rc : LDLM_ITER_CONTINUE);
1998 * Conflicting locks are detected for a lock to be enqueued, add the lock
1999 * into waiting list and send blocking ASTs to the conflicting locks.
2001 * \param[in] lock The lock to be enqueued.
2002 * \param[out] flags Lock flags for the lock to be enqueued.
2003 * \param[in] rpc_list Conflicting locks list.
2005 * \retval -ERESTART: Some lock was instantly canceled while sending
2006 * blocking ASTs, caller needs to re-check conflicting
2008 * \retval -EAGAIN: Lock was destroyed, caller should return error.
2009 * \reval 0: Lock is successfully added in waiting list.
2011 int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags,
2012 struct list_head *rpc_list)
2014 struct ldlm_resource *res = lock->l_resource;
2019 check_res_locked(res);
2021 /* If either of the compat_queue()s returned failure, then we
2022 * have ASTs to send and must go onto the waiting list.
2024 * bug 2322: we used to unlink and re-add here, which was a
2025 * terrible folly -- if we goto restart, we could get
2026 * re-ordered! Causes deadlock, because ASTs aren't sent!
2028 if (list_empty(&lock->l_res_link))
2029 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
2032 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), rpc_list,
2035 if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
2036 !ns_is_client(ldlm_res_to_ns(res)))
2037 class_fail_export(lock->l_export);
2039 if (rc == -ERESTART)
2040 ldlm_reprocess_all(res, 0);
2043 if (rc == -ERESTART) {
2044 /* 15715: The lock was granted and destroyed after
2045 * resource lock was dropped. Interval node was freed
2046 * in ldlm_lock_destroy. Anyway, this always happens
2047 * when a client is being evicted. So it would be
2048 * ok to return an error. -jay
2050 if (ldlm_is_destroyed(lock))
2053 /* lock was granted while resource was unlocked. */
2054 if (ldlm_is_granted(lock)) {
2055 /* bug 11300: if the lock has been granted,
2056 * break earlier because otherwise, we will go
2057 * to restart and ldlm_resource_unlink will be
2058 * called and it causes the interval node to be
2059 * freed. Then we will fail at
2060 * ldlm_extent_add_lock()
2062 *flags &= ~LDLM_FL_BLOCKED_MASK;
2066 *flags |= LDLM_FL_BLOCK_GRANTED;
2072 * Discard all AST work items from list.
2074 * If for whatever reason we do not want to send ASTs to conflicting locks
2075 * anymore, disassemble the list with this function.
2077 void ldlm_discard_bl_list(struct list_head *bl_list)
2079 struct ldlm_lock *lock, *tmp;
2083 list_for_each_entry_safe(lock, tmp, bl_list, l_bl_ast) {
2084 LASSERT(!list_empty(&lock->l_bl_ast));
2085 list_del_init(&lock->l_bl_ast);
2086 ldlm_clear_ast_sent(lock);
2087 LASSERT(lock->l_bl_ast_run == 0);
2088 ldlm_clear_blocking_lock(lock);
2089 ldlm_lock_put(lock);
2094 /* Process a call to blocking AST callback for a lock in ast_work list */
2096 ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2098 struct ldlm_cb_set_arg *arg = opaq;
2099 struct ldlm_lock *lock;
2100 struct ldlm_lock_desc d;
2101 struct ldlm_bl_desc bld;
2106 if (list_empty(arg->list))
2109 lock = list_first_entry(arg->list, struct ldlm_lock, l_bl_ast);
2111 /* nobody should touch l_bl_ast but some locks in the list may become
2112 * granted after lock convert or COS downgrade, these locks should be
2113 * just skipped here and removed from the list.
2115 lock_res_and_lock(lock);
2116 list_del_init(&lock->l_bl_ast);
2118 /* lock is not blocking lock anymore, but was kept in the list because
2119 * it can managed only here.
2121 if (!ldlm_is_ast_sent(lock)) {
2122 unlock_res_and_lock(lock);
2123 ldlm_lock_put(lock);
2127 LASSERT(lock->l_blocking_lock);
2128 ldlm_lock2desc(lock->l_blocking_lock, &d);
2129 /* copy blocking lock ibits in cancel_bits as well,
2130 * new client may use them for lock convert and it is
2131 * important to use new field to convert locks from
2134 d.l_policy_data.l_inodebits.cancel_bits =
2135 lock->l_blocking_lock->l_policy_data.l_inodebits.bits;
2137 /* Blocking lock is being destroyed here but some information about it
2138 * may be needed inside l_blocking_ast() function below,
2139 * e.g. in mdt_blocking_ast(). So save needed data in bl_desc.
2141 bld.bl_same_client = lock->l_client_cookie ==
2142 lock->l_blocking_lock->l_client_cookie;
2143 /* if two locks are initiated from the same MDT, transactions are
2144 * independent, or the request lock mode isn't EX|PW, no need to trigger
2145 * CoS because current lock will be downgraded to TXN mode soon, then
2146 * the blocking lock can be granted.
2148 if (lock->l_blocking_lock->l_policy_data.l_inodebits.li_initiator_id ==
2149 lock->l_policy_data.l_inodebits.li_initiator_id ||
2150 !(lock->l_blocking_lock->l_req_mode & (LCK_EX | LCK_PW)))
2151 bld.bl_txn_dependent = false;
2153 bld.bl_txn_dependent = true;
2154 arg->bl_desc = &bld;
2156 LASSERT(ldlm_is_ast_sent(lock));
2157 LASSERT(lock->l_bl_ast_run == 0);
2158 lock->l_bl_ast_run++;
2159 ldlm_clear_blocking_lock(lock);
2160 unlock_res_and_lock(lock);
2162 rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
2164 ldlm_lock_put(lock);
2169 /* Process a call to revocation AST callback for a lock in ast_work list */
2171 ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2173 struct ldlm_cb_set_arg *arg = opaq;
2174 struct ldlm_lock_desc desc;
2176 struct ldlm_lock *lock;
2180 if (list_empty(arg->list))
2183 lock = list_first_entry(arg->list, struct ldlm_lock, l_rk_ast);
2184 list_del_init(&lock->l_rk_ast);
2186 /* the desc just pretend to exclusive */
2187 ldlm_lock2desc(lock, &desc);
2188 desc.l_req_mode = LCK_EX;
2189 desc.l_granted_mode = 0;
2191 rc = lock->l_blocking_ast(lock, &desc, (void *)arg, LDLM_CB_BLOCKING);
2192 ldlm_lock_put(lock);
2197 /* Process a call to glimpse AST callback for a lock in ast_work list */
2198 int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2200 struct ldlm_cb_set_arg *arg = opaq;
2201 struct ldlm_glimpse_work *gl_work;
2202 struct ldlm_lock *lock;
2207 if (list_empty(arg->list))
2210 gl_work = list_first_entry(arg->list, struct ldlm_glimpse_work,
2212 list_del_init(&gl_work->gl_list);
2214 lock = gl_work->gl_lock;
2216 /* transfer the glimpse descriptor to ldlm_cb_set_arg */
2217 arg->gl_desc = gl_work->gl_desc;
2218 arg->gl_interpret_reply = gl_work->gl_interpret_reply;
2219 arg->gl_interpret_data = gl_work->gl_interpret_data;
2221 /* invoke the actual glimpse callback */
2222 rc = lock->l_glimpse_ast(lock, (void *)arg);
2224 rc = 1; /* update LVB if this is server lock */
2225 else if (rc == -ELDLM_NO_LOCK_DATA)
2226 ldlm_lvbo_update(lock->l_resource, lock, NULL, 1);
2228 ldlm_lock_put(lock);
2229 if (gl_work->gl_flags & LDLM_GL_WORK_SLAB_ALLOCATED)
2230 OBD_SLAB_FREE_PTR(gl_work, ldlm_glimpse_work_kmem);
2232 OBD_FREE_PTR(gl_work);
2238 /* Process a call to completion AST callback for a lock in ast_work list */
2240 ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2242 struct ldlm_cb_set_arg *arg = opaq;
2243 struct ldlm_lock *lock;
2244 ldlm_completion_callback completion_callback;
2249 if (list_empty(arg->list))
2252 lock = list_first_entry(arg->list, struct ldlm_lock, l_cp_ast);
2254 /* It's possible to receive a completion AST before we've set
2255 * the l_completion_ast pointer: either because the AST arrived
2256 * before the reply, or simply because there's a small race
2257 * window between receiving the reply and finishing the local
2258 * enqueue. (bug 842)
2260 * This can't happen with the blocking_ast, however, because we
2261 * will never call the local blocking_ast until we drop our
2262 * reader/writer reference, which we won't do until we get the
2263 * reply and finish enqueueing.
2266 /* nobody should touch l_cp_ast */
2267 lock_res_and_lock(lock);
2268 list_del_init(&lock->l_cp_ast);
2269 LASSERT(ldlm_is_cp_reqd(lock));
2270 /* save l_completion_ast since it can be changed by
2271 * mds_intent_policy(), see bug 14225
2273 completion_callback = lock->l_completion_ast;
2274 ldlm_clear_cp_reqd(lock);
2275 unlock_res_and_lock(lock);
2277 if (completion_callback != NULL)
2278 rc = completion_callback(lock, 0, (void *)arg);
2279 ldlm_lock_put(lock);
2285 * Process list of locks in need of ASTs being sent.
2287 * Used on server to send multiple ASTs together instead of sending one by
2290 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
2291 ldlm_desc_ast_t ast_type)
2293 struct ldlm_cb_set_arg *arg;
2294 set_producer_func work_ast_lock;
2297 if (list_empty(rpc_list))
2304 atomic_set(&arg->restart, 0);
2305 arg->list = rpc_list;
2308 case LDLM_WORK_CP_AST:
2309 arg->type = LDLM_CP_CALLBACK;
2310 work_ast_lock = ldlm_work_cp_ast_lock;
2312 #ifdef HAVE_SERVER_SUPPORT
2313 case LDLM_WORK_BL_AST:
2314 arg->type = LDLM_BL_CALLBACK;
2315 work_ast_lock = ldlm_work_bl_ast_lock;
2317 case LDLM_WORK_REVOKE_AST:
2318 arg->type = LDLM_BL_CALLBACK;
2319 work_ast_lock = ldlm_work_revoke_ast_lock;
2321 case LDLM_WORK_GL_AST:
2322 arg->type = LDLM_GL_CALLBACK;
2323 work_ast_lock = ldlm_work_gl_ast_lock;
2330 /* We create a ptlrpc request set with flow control extension.
2331 * This request set will use the work_ast_lock function to produce new
2332 * requests and will send a new request each time one completes in order
2333 * to keep the number of requests in flight to ns_max_parallel_ast
2335 arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
2336 work_ast_lock, arg);
2337 if (arg->set == NULL)
2338 GOTO(out, rc = -ENOMEM);
2340 ptlrpc_set_wait(NULL, arg->set);
2341 ptlrpc_set_destroy(arg->set);
2343 rc = atomic_read(&arg->restart) ? -ERESTART : 0;
2351 * Try to grant all waiting locks on a resource.
2353 * Calls ldlm_reprocess_queue on waiting queue.
2355 * Typically called after some resource locks are cancelled to see
2356 * if anything could be granted as a result of the cancellation.
2358 static void __ldlm_reprocess_all(struct ldlm_resource *res,
2359 enum ldlm_process_intention intention,
2362 LIST_HEAD(rpc_list);
2363 #ifdef HAVE_SERVER_SUPPORT
2364 ldlm_reprocessing_policy reprocess;
2365 struct obd_device *obd;
2370 /* Local lock trees don't get reprocessed. */
2371 if (ns_is_client(ldlm_res_to_ns(res))) {
2376 /* Disable reprocess during lock replay stage but allow during
2377 * request replay stage.
2379 obd = ldlm_res_to_ns(res)->ns_obd;
2380 if (obd->obd_recovering &&
2381 atomic_read(&obd->obd_req_replay_clients) == 0)
2385 reprocess = ldlm_get_reprocessing_policy(res);
2386 reprocess(res, &res->lr_waiting, &rpc_list, intention, hint);
2389 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
2391 if (rc == -ERESTART) {
2392 LASSERT(list_empty(&rpc_list));
2399 if (!ns_is_client(ldlm_res_to_ns(res))) {
2400 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
2407 void ldlm_reprocess_all(struct ldlm_resource *res, __u64 hint)
2409 __ldlm_reprocess_all(res, LDLM_PROCESS_RESCAN, hint);
2411 EXPORT_SYMBOL(ldlm_reprocess_all);
2413 static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2414 struct hlist_node *hnode, void *arg)
2416 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
2418 /* This is only called once after recovery done. LU-8306. */
2419 __ldlm_reprocess_all(res, LDLM_PROCESS_RECOVERY, 0);
2423 /* Iterate on all resources on namespace attempting to grant waiting locks. */
2424 void ldlm_reprocess_recovery_done(struct ldlm_namespace *ns)
2429 cfs_hash_for_each_nolock(ns->ns_rs_hash,
2430 ldlm_reprocess_res, NULL, 0);
2435 /* Helper to call blocking AST for LDLM lock \a lock in a "cancelling" mode. */
2436 void ldlm_cancel_callback(struct ldlm_lock *lock)
2438 check_res_locked(lock->l_resource);
2439 if (!ldlm_is_cancel(lock)) {
2440 ldlm_set_cancel(lock);
2441 if (lock->l_blocking_ast) {
2442 unlock_res_and_lock(lock);
2443 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
2445 lock_res_and_lock(lock);
2447 LDLM_DEBUG(lock, "no blocking ast");
2450 /* only canceller can set bl_done bit */
2451 ldlm_set_bl_done(lock);
2452 wake_up(&lock->l_waitq);
2453 } else if (!ldlm_is_bl_done(lock)) {
2454 /* lock is guaranteed to be canceled returning from function. */
2455 unlock_res_and_lock(lock);
2456 wait_event_idle(lock->l_waitq, is_bl_done(lock));
2457 lock_res_and_lock(lock);
2461 /* Remove skiplist-enabled LDLM lock \a req from granted list */
2462 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
2464 if (req->l_resource->lr_type != LDLM_PLAIN &&
2465 req->l_resource->lr_type != LDLM_IBITS)
2468 list_del_init(&req->l_sl_policy);
2469 list_del_init(&req->l_sl_mode);
2472 /* Attempts to cancel LDLM lock \a lock that has no reader/writer references. */
2473 void ldlm_lock_cancel(struct ldlm_lock *lock)
2475 struct ldlm_resource *res;
2476 struct ldlm_namespace *ns;
2480 lock_res_and_lock(lock);
2482 res = lock->l_resource;
2483 ns = ldlm_res_to_ns(res);
2485 /* Please do not remove this LBUG without talking to me first. -phik */
2486 if (lock->l_readers || lock->l_writers) {
2487 LDLM_ERROR(lock, "lock still has references");
2488 unlock_res_and_lock(lock);
2492 if (ldlm_is_waited(lock))
2493 ldlm_del_waiting_lock(lock);
2495 /* Releases cancel callback. */
2496 ldlm_cancel_callback(lock);
2498 /* Yes, second time, just in case it was added again while we were
2499 * running with no res lock in ldlm_cancel_callback
2501 if (ldlm_is_waited(lock))
2502 ldlm_del_waiting_lock(lock);
2504 ldlm_resource_unlink_lock(lock);
2505 ldlm_lock_destroy_nolock(lock);
2507 if (ldlm_is_granted(lock))
2508 ldlm_pool_del(&ns->ns_pool, lock);
2510 /* should not be called again for same lock(zero out l_granted_mode) */
2511 lock->l_granted_mode = LCK_MINMODE;
2512 unlock_res_and_lock(lock);
2516 EXPORT_SYMBOL(ldlm_lock_cancel);
2518 /* Set opaque data into the lock that only makes sense to upper layer. */
2519 int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
2521 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2527 if (lock->l_ast_data == NULL)
2528 lock->l_ast_data = data;
2529 if (lock->l_ast_data == data)
2531 ldlm_lock_put(lock);
2535 EXPORT_SYMBOL(ldlm_lock_set_data);
2537 struct export_cl_data {
2538 const struct lu_env *ecl_env;
2539 struct obd_export *ecl_exp;
2543 static void ldlm_cancel_lock_for_export(struct obd_export *exp,
2544 struct ldlm_lock *lock,
2545 struct export_cl_data *ecl)
2547 struct ldlm_resource *res;
2549 res = ldlm_resource_getref(lock->l_resource);
2551 ldlm_lvbo_update(res, lock, NULL, 1);
2552 ldlm_lock_cancel(lock);
2553 if (!exp->exp_obd->obd_stopping)
2554 ldlm_reprocess_all(res, lock->l_policy_data.l_inodebits.bits);
2555 ldlm_resource_putref(res);
2558 if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
2559 CDEBUG(D_INFO, "Export %p, %d locks cancelled.\n",
2560 exp, ecl->ecl_loop);
2565 * Iterator function for ldlm_export_cancel_locks.
2566 * Cancels passed locks.
2569 ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2570 struct hlist_node *hnode, void *data)
2573 struct export_cl_data *ecl = (struct export_cl_data *)data;
2574 struct obd_export *exp = ecl->ecl_exp;
2575 struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
2577 ldlm_lock_get(lock);
2578 ldlm_cancel_lock_for_export(exp, lock, ecl);
2579 ldlm_lock_put(lock);
2585 * Cancel all blocked locks for given export.
2587 * Typically called on client disconnection/eviction
2589 int ldlm_export_cancel_blocked_locks(struct obd_export *exp)
2592 struct export_cl_data ecl = {
2598 rc = lu_env_init(&env, LCT_DT_THREAD);
2603 while (!list_empty(&exp->exp_bl_list)) {
2604 struct ldlm_lock *lock;
2606 spin_lock_bh(&exp->exp_bl_list_lock);
2607 if (!list_empty(&exp->exp_bl_list)) {
2608 lock = list_first_entry(&exp->exp_bl_list,
2609 struct ldlm_lock, l_exp_list);
2610 ldlm_lock_get(lock);
2611 list_del_init(&lock->l_exp_list);
2615 spin_unlock_bh(&exp->exp_bl_list_lock);
2620 ldlm_cancel_lock_for_export(exp, lock, &ecl);
2621 ldlm_lock_put(lock);
2627 "Export %p, canceled %d locks, left on hash table %d.\n", exp,
2628 ecl.ecl_loop, atomic_read(&exp->exp_lock_hash->hs_count));
2630 return ecl.ecl_loop;
2634 * Cancel all locks for given export.
2636 * Typically called after client disconnection/eviction
2638 int ldlm_export_cancel_locks(struct obd_export *exp)
2640 struct export_cl_data ecl;
2644 rc = lu_env_init(&env, LCT_DT_THREAD);
2651 cfs_hash_for_each_empty(exp->exp_lock_hash,
2652 ldlm_cancel_locks_for_export_cb, &ecl);
2655 "Export %p, canceled %d locks, left on hash table %d.\n", exp,
2656 ecl.ecl_loop, atomic_read(&exp->exp_lock_hash->hs_count));
2658 if (ecl.ecl_loop > 0 &&
2659 atomic_read(&exp->exp_lock_hash->hs_count) == 0 &&
2660 exp->exp_obd->obd_stopping)
2661 ldlm_reprocess_recovery_done(exp->exp_obd->obd_namespace);
2665 return ecl.ecl_loop;
2669 * Downgrade an PW/EX lock to COS, TXN or CR mode.
2671 * A lock mode convertion from PW/EX mode to less conflict mode. The
2672 * convertion may fail if lock was canceled before downgrade, but it doesn't
2673 * indicate any problem, because such lock has no reader or writer, and will
2676 * Used by Commit on Sharing (COS) code to force object changes commit in case
2677 * of conflict. Converted lock is considered as new lock and all blocking AST
2678 * things are cleared, so any pending or new blocked lock on that lock will
2679 * cause new call to blocking_ast and force resource object commit.
2681 * Used by DNE to force commit upon operation dependency.
2683 * Also used by layout_change to replace EX lock to CR lock.
2685 * \param lock A lock to convert
2686 * \param new_mode new lock mode
2688 void ldlm_lock_mode_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode)
2690 #ifdef HAVE_SERVER_SUPPORT
2693 LASSERT(new_mode == LCK_COS || new_mode == LCK_TXN ||
2694 new_mode == LCK_CR);
2696 lock_res_and_lock(lock);
2698 if (!(lock->l_granted_mode & (LCK_PW | LCK_EX))) {
2699 unlock_res_and_lock(lock);
2701 LASSERT(lock->l_granted_mode == LCK_MINMODE);
2702 LDLM_DEBUG(lock, "lock was canceled before downgrade");
2706 ldlm_resource_unlink_lock(lock);
2708 * Remove the lock from pool as it will be added again in
2709 * ldlm_grant_lock() called below.
2711 ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
2713 /* Consider downgraded lock as a new lock and clear all states
2714 * related to a previous blocking AST processing.
2716 ldlm_clear_blocking_data(lock);
2718 lock->l_req_mode = new_mode;
2719 ldlm_grant_lock(lock, NULL);
2720 unlock_res_and_lock(lock);
2722 ldlm_reprocess_all(lock->l_resource,
2723 lock->l_policy_data.l_inodebits.bits);
2728 EXPORT_SYMBOL(ldlm_lock_mode_downgrade);
2731 * Print lock with lock handle \a lockh description into debug log.
2733 * Used when printing all locks on a resource for debug purposes.
2735 void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh)
2737 struct ldlm_lock *lock;
2739 if (!((libcfs_debug | D_ERROR) & level))
2742 lock = ldlm_handle2lock(lockh);
2746 LDLM_DEBUG_LIMIT(level, lock, "###");
2748 ldlm_lock_put(lock);
2750 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2753 * Print lock information with custom message into debug log.
2756 void _ldlm_lock_debug(struct ldlm_lock *lock,
2757 struct libcfs_debug_msg_data *msgdata,
2758 const char *fmt, ...)
2761 struct obd_export *exp = lock->l_export;
2762 struct ldlm_resource *resource = NULL;
2763 struct va_format vaf;
2764 char *nid = "local";
2767 resource = rcu_dereference(lock->l_resource);
2768 if (resource && !refcount_inc_not_zero(&resource->lr_refcount))
2772 va_start(args, fmt);
2776 if (exp && exp->exp_connection) {
2777 nid = obd_export_nid2str(exp);
2778 } else if (exp && exp->exp_obd != NULL) {
2779 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2781 nid = obd_import_nid2str(imp);
2784 if (resource == NULL) {
2785 libcfs_debug_msg(msgdata,
2786 "%pV ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2789 lock->l_handle.h_cookie,
2790 refcount_read(&lock->l_handle.h_ref),
2791 lock->l_readers, lock->l_writers,
2792 ldlm_lockname[lock->l_granted_mode],
2793 ldlm_lockname[lock->l_req_mode],
2795 lock->l_remote_handle.cookie,
2796 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2797 lock->l_pid, lock->l_callback_timestamp,
2803 switch (resource->lr_type) {
2805 libcfs_debug_msg(msgdata,
2806 "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) gid %llu flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2808 ldlm_lock_to_ns_name(lock), lock,
2809 lock->l_handle.h_cookie,
2810 refcount_read(&lock->l_handle.h_ref),
2811 lock->l_readers, lock->l_writers,
2812 ldlm_lockname[lock->l_granted_mode],
2813 ldlm_lockname[lock->l_req_mode],
2815 refcount_read(&resource->lr_refcount),
2816 ldlm_typename[resource->lr_type],
2817 lock->l_policy_data.l_extent.start,
2818 lock->l_policy_data.l_extent.end,
2819 lock->l_req_extent.start, lock->l_req_extent.end,
2820 lock->l_req_extent.gid,
2822 lock->l_remote_handle.cookie,
2823 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2824 lock->l_pid, lock->l_callback_timestamp,
2829 libcfs_debug_msg(msgdata,
2830 "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld\n",
2832 ldlm_lock_to_ns_name(lock), lock,
2833 lock->l_handle.h_cookie,
2834 refcount_read(&lock->l_handle.h_ref),
2835 lock->l_readers, lock->l_writers,
2836 ldlm_lockname[lock->l_granted_mode],
2837 ldlm_lockname[lock->l_req_mode],
2839 refcount_read(&resource->lr_refcount),
2840 ldlm_typename[resource->lr_type],
2841 lock->l_policy_data.l_flock.pid,
2842 lock->l_policy_data.l_flock.start,
2843 lock->l_policy_data.l_flock.end,
2845 lock->l_remote_handle.cookie,
2846 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2847 lock->l_pid, lock->l_callback_timestamp);
2851 if (!lock->l_remote_handle.cookie)
2852 libcfs_debug_msg(msgdata,
2853 "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx/%#llx rrc: %d type: %s flags: %#llx pid: %u initiator: MDT%d\n",
2855 ldlm_lock_to_ns_name(lock),
2856 lock, lock->l_handle.h_cookie,
2857 refcount_read(&lock->l_handle.h_ref),
2858 lock->l_readers, lock->l_writers,
2859 ldlm_lockname[lock->l_granted_mode],
2860 ldlm_lockname[lock->l_req_mode],
2862 lock->l_policy_data.l_inodebits.bits,
2863 lock->l_policy_data.l_inodebits.try_bits,
2864 refcount_read(&resource->lr_refcount),
2865 ldlm_typename[resource->lr_type],
2866 lock->l_flags, lock->l_pid,
2867 lock->l_policy_data.l_inodebits.li_initiator_id);
2869 libcfs_debug_msg(msgdata,
2870 "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx/%#llx rrc: %d type: %s gid %llu flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2872 ldlm_lock_to_ns_name(lock),
2873 lock, lock->l_handle.h_cookie,
2874 refcount_read(&lock->l_handle.h_ref),
2875 lock->l_readers, lock->l_writers,
2876 ldlm_lockname[lock->l_granted_mode],
2877 ldlm_lockname[lock->l_req_mode],
2879 lock->l_policy_data.l_inodebits.bits,
2880 lock->l_policy_data.l_inodebits.try_bits,
2881 refcount_read(&resource->lr_refcount),
2882 ldlm_typename[resource->lr_type],
2883 lock->l_policy_data.l_inodebits.li_gid,
2885 lock->l_remote_handle.cookie,
2886 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2887 lock->l_pid, lock->l_callback_timestamp,
2892 libcfs_debug_msg(msgdata,
2893 "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2895 ldlm_lock_to_ns_name(lock),
2896 lock, lock->l_handle.h_cookie,
2897 refcount_read(&lock->l_handle.h_ref),
2898 lock->l_readers, lock->l_writers,
2899 ldlm_lockname[lock->l_granted_mode],
2900 ldlm_lockname[lock->l_req_mode],
2902 refcount_read(&resource->lr_refcount),
2903 ldlm_typename[resource->lr_type],
2905 lock->l_remote_handle.cookie,
2906 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2907 lock->l_pid, lock->l_callback_timestamp,
2912 ldlm_resource_putref(resource);
2914 EXPORT_SYMBOL(_ldlm_lock_debug);