X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_lock.c;h=39abaefd100a60f451fce64d46c3c0550be2d625;hb=8f01f8b51d114b0d2d54a5ab7db3161782e52447;hp=348549b790c41872ad9348fcafbe1664a2bb6817;hpb=3f92a017ae2d97506db89b34c374bbcf9670db54;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 348549b..39abaef 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -27,7 +27,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2010, 2013, Intel Corporation. + * Copyright (c) 2010, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -41,89 +41,69 @@ #define DEBUG_SUBSYSTEM S_LDLM -#ifdef __KERNEL__ -# include -# include -#else -# include -#endif - +#include #include #include "ldlm_internal.h" /* lock types */ char *ldlm_lockname[] = { - [0] "--", - [LCK_EX] "EX", - [LCK_PW] "PW", - [LCK_PR] "PR", - [LCK_CW] "CW", - [LCK_CR] "CR", - [LCK_NL] "NL", - [LCK_GROUP] "GROUP", - [LCK_COS] "COS" + [0] = "--", + [LCK_EX] = "EX", + [LCK_PW] = "PW", + [LCK_PR] = "PR", + [LCK_CW] = "CW", + [LCK_CR] = "CR", + [LCK_NL] = "NL", + [LCK_GROUP] = "GROUP", + [LCK_COS] = "COS" }; EXPORT_SYMBOL(ldlm_lockname); char *ldlm_typename[] = { - [LDLM_PLAIN] "PLN", - [LDLM_EXTENT] "EXT", - [LDLM_FLOCK] "FLK", - [LDLM_IBITS] "IBT", -}; -EXPORT_SYMBOL(ldlm_typename); - -static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = { - [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local, - [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local, - [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire18_to_local, - [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local, + [LDLM_PLAIN] = "PLN", + [LDLM_EXTENT] = "EXT", + [LDLM_FLOCK] = "FLK", + [LDLM_IBITS] = "IBT", }; -static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = { - [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local, - [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local, - [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire21_to_local, - [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local, +static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = { + [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local, + [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local, + [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local, + [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local, }; static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { - [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_local_to_wire, - [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_local_to_wire, - [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_local_to_wire, - [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_local_to_wire, + [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire, + [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire, + [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire, + [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire, }; /** * Converts lock policy from local format to on the wire lock_desc format */ -void ldlm_convert_policy_to_wire(ldlm_type_t type, - const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy) +void ldlm_convert_policy_to_wire(enum ldlm_type type, + const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy) { - ldlm_policy_local_to_wire_t convert; + ldlm_policy_local_to_wire_t convert; - convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE]; + convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE]; - convert(lpolicy, wpolicy); + convert(lpolicy, wpolicy); } /** * Converts lock policy from on the wire lock_desc format to local format */ -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, - const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy) +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, + const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy) { ldlm_policy_wire_to_local_t convert; - int new_client; - /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */ - new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0; - if (new_client) - convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE]; - else - convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE]; + convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE]; convert(wpolicy, lpolicy); } @@ -149,8 +129,6 @@ char *ldlm_it2str(int it) return "getxattr"; case IT_LAYOUT: return "layout"; - case IT_SETXATTR: - return "setxattr"; default: CERROR("Unknown intent %d\n", it); return "UNKNOWN"; @@ -162,12 +140,10 @@ extern struct kmem_cache *ldlm_lock_slab; #ifdef HAVE_SERVER_SUPPORT static ldlm_processing_policy ldlm_processing_policy_table[] = { - [LDLM_PLAIN] ldlm_process_plain_lock, - [LDLM_EXTENT] ldlm_process_extent_lock, -# ifdef __KERNEL__ - [LDLM_FLOCK] ldlm_process_flock_lock, -# endif - [LDLM_IBITS] ldlm_process_inodebits_lock, + [LDLM_PLAIN] = ldlm_process_plain_lock, + [LDLM_EXTENT] = ldlm_process_extent_lock, + [LDLM_FLOCK] = ldlm_process_flock_lock, + [LDLM_IBITS] = ldlm_process_inodebits_lock, }; ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res) @@ -198,7 +174,7 @@ EXPORT_SYMBOL(ldlm_register_intent); */ struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) { - cfs_atomic_inc(&lock->l_refc); + atomic_inc(&lock->l_refc); return lock; } EXPORT_SYMBOL(ldlm_lock_get); @@ -213,17 +189,18 @@ void ldlm_lock_put(struct ldlm_lock *lock) ENTRY; LASSERT(lock->l_resource != LP_POISON); - LASSERT(cfs_atomic_read(&lock->l_refc) > 0); - if (cfs_atomic_dec_and_test(&lock->l_refc)) { + LASSERT(atomic_read(&lock->l_refc) > 0); + if (atomic_dec_and_test(&lock->l_refc)) { struct ldlm_resource *res; LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing it."); res = lock->l_resource; - LASSERT(lock->l_flags & LDLM_FL_DESTROYED); - LASSERT(cfs_list_empty(&lock->l_res_link)); - LASSERT(cfs_list_empty(&lock->l_pending_chain)); + LASSERT(ldlm_is_destroyed(lock)); + LASSERT(list_empty(&lock->l_exp_list)); + LASSERT(list_empty(&lock->l_res_link)); + LASSERT(list_empty(&lock->l_pending_chain)); lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats, LDLM_NSS_LOCKS); @@ -236,7 +213,7 @@ void ldlm_lock_put(struct ldlm_lock *lock) } if (lock->l_lvb_data != NULL) - OBD_FREE(lock->l_lvb_data, lock->l_lvb_len); + OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len); ldlm_interval_free(ldlm_interval_detach(lock)); lu_ref_fini(&lock->l_reference); @@ -252,40 +229,47 @@ EXPORT_SYMBOL(ldlm_lock_put); */ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) { - int rc = 0; - if (!cfs_list_empty(&lock->l_lru)) { - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - cfs_list_del_init(&lock->l_lru); - if (lock->l_flags & LDLM_FL_SKIPPED) - lock->l_flags &= ~LDLM_FL_SKIPPED; - LASSERT(ns->ns_nr_unused > 0); - ns->ns_nr_unused--; - rc = 1; - } - return rc; + int rc = 0; + if (!list_empty(&lock->l_lru)) { + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + + LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); + list_del_init(&lock->l_lru); + LASSERT(ns->ns_nr_unused > 0); + ns->ns_nr_unused--; + rc = 1; + } + return rc; } /** * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first. + * + * If \a last_use is non-zero, it will remove the lock from LRU only if + * it matches lock's l_last_used. + * + * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use + * doesn't match lock's l_last_used; + * otherwise, the lock hasn't been in the LRU list. + * \retval 1 the lock was in LRU list and removed. */ -int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) +int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, cfs_time_t last_use) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - int rc; + int rc = 0; ENTRY; - if (lock->l_flags & LDLM_FL_NS_SRV) { - LASSERT(cfs_list_empty(&lock->l_lru)); + if (ldlm_is_ns_srv(lock)) { + LASSERT(list_empty(&lock->l_lru)); RETURN(0); } spin_lock(&ns->ns_lock); - rc = ldlm_lock_remove_from_lru_nolock(lock); + if (last_use == 0 || last_use == lock->l_last_used) + rc = ldlm_lock_remove_from_lru_nolock(lock); spin_unlock(&ns->ns_lock); - EXIT; - return rc; + + RETURN(rc); } /** @@ -293,14 +277,15 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) */ void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) { - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - lock->l_last_used = cfs_time_current(); - LASSERT(cfs_list_empty(&lock->l_lru)); - LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list); - LASSERT(ns->ns_nr_unused >= 0); - ns->ns_nr_unused++; + lock->l_last_used = cfs_time_current(); + LASSERT(list_empty(&lock->l_lru)); + LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); + list_add_tail(&lock->l_lru, &ns->ns_unused_list); + ldlm_clear_skipped(lock); + LASSERT(ns->ns_nr_unused >= 0); + ns->ns_nr_unused++; } /** @@ -327,14 +312,14 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ENTRY; - if (lock->l_flags & LDLM_FL_NS_SRV) { - LASSERT(cfs_list_empty(&lock->l_lru)); + if (ldlm_is_ns_srv(lock)) { + LASSERT(list_empty(&lock->l_lru)); EXIT; return; } spin_lock(&ns->ns_lock); - if (!cfs_list_empty(&lock->l_lru)) { + if (!list_empty(&lock->l_lru)) { ldlm_lock_remove_from_lru_nolock(lock); ldlm_lock_add_to_lru_nolock(lock); } @@ -361,7 +346,7 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) * ldlm_lock_destroy, you can never drop your final references on this lock. * Because it's not in the hash table anymore. -phil */ -int ldlm_lock_destroy_internal(struct ldlm_lock *lock) +static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) { ENTRY; @@ -370,17 +355,17 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock) LBUG(); } - if (!cfs_list_empty(&lock->l_res_link)) { + if (!list_empty(&lock->l_res_link)) { LDLM_ERROR(lock, "lock still on resource"); LBUG(); } - if (lock->l_flags & LDLM_FL_DESTROYED) { - LASSERT(cfs_list_empty(&lock->l_lru)); + if (ldlm_is_destroyed(lock)) { + LASSERT(list_empty(&lock->l_lru)); EXIT; return 0; } - lock->l_flags |= LDLM_FL_DESTROYED; + ldlm_set_destroyed(lock); if (lock->l_export && lock->l_export->exp_lock_hash) { /* NB: it's safe to call cfs_hash_del() even lock isn't @@ -456,7 +441,7 @@ static void lock_handle_free(void *lock, int size) OBD_SLAB_FREE(lock, ldlm_lock_slab, size); } -struct portals_handle_ops lock_handle_ops = { +static struct portals_handle_ops lock_handle_ops = { .hop_addref = lock_handle_addref, .hop_free = lock_handle_free, }; @@ -471,37 +456,37 @@ struct portals_handle_ops lock_handle_ops = { */ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) { - struct ldlm_lock *lock; - ENTRY; + struct ldlm_lock *lock; + ENTRY; - if (resource == NULL) - LBUG(); + if (resource == NULL) + LBUG(); - OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO); - if (lock == NULL) - RETURN(NULL); + OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS); + if (lock == NULL) + RETURN(NULL); spin_lock_init(&lock->l_lock); - lock->l_resource = resource; - lu_ref_add(&resource->lr_reference, "lock", lock); - - cfs_atomic_set(&lock->l_refc, 2); - CFS_INIT_LIST_HEAD(&lock->l_res_link); - CFS_INIT_LIST_HEAD(&lock->l_lru); - CFS_INIT_LIST_HEAD(&lock->l_pending_chain); - CFS_INIT_LIST_HEAD(&lock->l_bl_ast); - CFS_INIT_LIST_HEAD(&lock->l_cp_ast); - CFS_INIT_LIST_HEAD(&lock->l_rk_ast); - cfs_waitq_init(&lock->l_waitq); - lock->l_blocking_lock = NULL; - CFS_INIT_LIST_HEAD(&lock->l_sl_mode); - CFS_INIT_LIST_HEAD(&lock->l_sl_policy); - CFS_INIT_HLIST_NODE(&lock->l_exp_hash); - CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash); + lock->l_resource = resource; + lu_ref_add(&resource->lr_reference, "lock", lock); + + atomic_set(&lock->l_refc, 2); + INIT_LIST_HEAD(&lock->l_res_link); + INIT_LIST_HEAD(&lock->l_lru); + INIT_LIST_HEAD(&lock->l_pending_chain); + INIT_LIST_HEAD(&lock->l_bl_ast); + INIT_LIST_HEAD(&lock->l_cp_ast); + INIT_LIST_HEAD(&lock->l_rk_ast); + init_waitqueue_head(&lock->l_waitq); + lock->l_blocking_lock = NULL; + INIT_LIST_HEAD(&lock->l_sl_mode); + INIT_LIST_HEAD(&lock->l_sl_policy); + INIT_HLIST_NODE(&lock->l_exp_hash); + INIT_HLIST_NODE(&lock->l_exp_flock_hash); lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats, LDLM_NSS_LOCKS); - CFS_INIT_LIST_HEAD(&lock->l_handle.h_link); + INIT_LIST_HEAD(&lock->l_handle.h_link); class_handle_hash(&lock->l_handle, &lock_handle_ops); lu_ref_init(&lock->l_reference); @@ -509,11 +494,11 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) lock->l_callback_timeout = 0; #if LUSTRE_TRACKS_LOCK_EXP_REFS - CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link); + INIT_LIST_HEAD(&lock->l_exp_refs_link); lock->l_exp_refs_nr = 0; lock->l_exp_refs_target = NULL; #endif - CFS_INIT_LIST_HEAD(&lock->l_exp_list); + INIT_LIST_HEAD(&lock->l_exp_list); RETURN(lock); } @@ -544,14 +529,14 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, LASSERT(new_resid->name[0] != 0); /* This function assumes that the lock isn't on any lists */ - LASSERT(cfs_list_empty(&lock->l_res_link)); + LASSERT(list_empty(&lock->l_res_link)); type = oldres->lr_type; unlock_res_and_lock(lock); - newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); - if (newres == NULL) - RETURN(-ENOMEM); + newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); + if (IS_ERR(newres)) + RETURN(PTR_ERR(newres)); lu_ref_add(&newres->lr_reference, "lock", lock); /* @@ -581,7 +566,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, RETURN(0); } -EXPORT_SYMBOL(ldlm_lock_change_resource); /** \defgroup ldlm_handles LDLM HANDLES * Ways to get hold of locks without any addresses. @@ -607,45 +591,47 @@ EXPORT_SYMBOL(ldlm_lock2handle); struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, __u64 flags) { - struct ldlm_lock *lock; - ENTRY; + struct ldlm_lock *lock; + ENTRY; - LASSERT(handle); + LASSERT(handle); lock = class_handle2object(handle->cookie, NULL); - if (lock == NULL) - RETURN(NULL); - - /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it */ - if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) { - lu_ref_add(&lock->l_reference, "handle", cfs_current()); - RETURN(lock); - } + if (lock == NULL) + RETURN(NULL); + + /* It's unlikely but possible that someone marked the lock as + * destroyed after we did handle2object on it */ + if ((flags == 0) && !ldlm_is_destroyed(lock)) { + lu_ref_add(&lock->l_reference, "handle", current); + RETURN(lock); + } - lock_res_and_lock(lock); + lock_res_and_lock(lock); - LASSERT(lock->l_resource != NULL); + LASSERT(lock->l_resource != NULL); - lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current()); - if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { - unlock_res_and_lock(lock); - CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); - LDLM_LOCK_PUT(lock); - RETURN(NULL); - } + lu_ref_add_atomic(&lock->l_reference, "handle", current); + if (unlikely(ldlm_is_destroyed(lock))) { + unlock_res_and_lock(lock); + CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); + LDLM_LOCK_PUT(lock); + RETURN(NULL); + } - if (flags && (lock->l_flags & flags)) { - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - RETURN(NULL); - } + /* If we're setting flags, make sure none of them are already set. */ + if (flags != 0) { + if ((lock->l_flags & flags) != 0) { + unlock_res_and_lock(lock); + LDLM_LOCK_PUT(lock); + RETURN(NULL); + } - if (flags) - lock->l_flags |= flags; + lock->l_flags |= flags; + } - unlock_res_and_lock(lock); - RETURN(lock); + unlock_res_and_lock(lock); + RETURN(lock); } EXPORT_SYMBOL(__ldlm_handle2lock); /** @} ldlm_handles */ @@ -656,71 +642,31 @@ EXPORT_SYMBOL(__ldlm_handle2lock); */ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) { - struct obd_export *exp = lock->l_export ?: lock->l_conn_export; - - /* INODEBITS_INTEROP: If the other side does not support - * inodebits, reply with a plain lock descriptor. */ - if ((lock->l_resource->lr_type == LDLM_IBITS) && - (exp && !(exp_connect_flags(exp) & OBD_CONNECT_IBITS))) { - /* Make sure all the right bits are set in this lock we - are going to pass to client */ - LASSERTF(lock->l_policy_data.l_inodebits.bits == - (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE | - MDS_INODELOCK_LAYOUT), - "Inappropriate inode lock bits during " - "conversion " LPU64 "\n", - lock->l_policy_data.l_inodebits.bits); - - ldlm_res2desc(lock->l_resource, &desc->l_resource); - desc->l_resource.lr_type = LDLM_PLAIN; - - /* Convert "new" lock mode to something old client can - understand */ - if ((lock->l_req_mode == LCK_CR) || - (lock->l_req_mode == LCK_CW)) - desc->l_req_mode = LCK_PR; - else - desc->l_req_mode = lock->l_req_mode; - if ((lock->l_granted_mode == LCK_CR) || - (lock->l_granted_mode == LCK_CW)) { - desc->l_granted_mode = LCK_PR; - } else { - /* We never grant PW/EX locks to clients */ - LASSERT((lock->l_granted_mode != LCK_PW) && - (lock->l_granted_mode != LCK_EX)); - desc->l_granted_mode = lock->l_granted_mode; - } - - /* We do not copy policy here, because there is no - policy for plain locks */ - } else { - ldlm_res2desc(lock->l_resource, &desc->l_resource); - desc->l_req_mode = lock->l_req_mode; - desc->l_granted_mode = lock->l_granted_mode; - ldlm_convert_policy_to_wire(lock->l_resource->lr_type, - &lock->l_policy_data, - &desc->l_policy_data); - } + ldlm_res2desc(lock->l_resource, &desc->l_resource); + desc->l_req_mode = lock->l_req_mode; + desc->l_granted_mode = lock->l_granted_mode; + ldlm_convert_policy_to_wire(lock->l_resource->lr_type, + &lock->l_policy_data, + &desc->l_policy_data); } -EXPORT_SYMBOL(ldlm_lock2desc); /** * Add a lock to list of conflicting locks to send AST to. * * Only add if we have not sent a blocking AST to the lock yet. */ -void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, - cfs_list_t *work_list) -{ - if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) { - LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); - lock->l_flags |= LDLM_FL_AST_SENT; - /* If the enqueuing client said so, tell the AST recipient to - * discard dirty data, rather than writing back. */ - if (new->l_flags & LDLM_FL_AST_DISCARD_DATA) - lock->l_flags |= LDLM_FL_DISCARD_DATA; - LASSERT(cfs_list_empty(&lock->l_bl_ast)); - cfs_list_add(&lock->l_bl_ast, work_list); +static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, + struct list_head *work_list) +{ + if (!ldlm_is_ast_sent(lock)) { + LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); + ldlm_set_ast_sent(lock); + /* If the enqueuing client said so, tell the AST recipient to + * discard dirty data, rather than writing back. */ + if (ldlm_is_ast_discard_data(new)) + ldlm_set_discard_data(lock); + LASSERT(list_empty(&lock->l_bl_ast)); + list_add(&lock->l_bl_ast, work_list); LDLM_LOCK_GET(lock); LASSERT(lock->l_blocking_lock == NULL); lock->l_blocking_lock = LDLM_LOCK_GET(new); @@ -730,13 +676,14 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, /** * Add a lock to list of just granted locks to send completion AST to. */ -void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list) +static void ldlm_add_cp_work_item(struct ldlm_lock *lock, + struct list_head *work_list) { - if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) { - lock->l_flags |= LDLM_FL_CP_REQD; + if (!ldlm_is_cp_reqd(lock)) { + ldlm_set_cp_reqd(lock); LDLM_DEBUG(lock, "lock granted; sending completion AST."); - LASSERT(cfs_list_empty(&lock->l_cp_ast)); - cfs_list_add(&lock->l_cp_ast, work_list); + LASSERT(list_empty(&lock->l_cp_ast)); + list_add(&lock->l_cp_ast, work_list); LDLM_LOCK_GET(lock); } } @@ -748,7 +695,7 @@ void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list) * Must be called with lr_lock held. */ void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, - cfs_list_t *work_list) + struct list_head *work_list) { ENTRY; check_res_locked(lock->l_resource); @@ -764,14 +711,14 @@ void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, * r/w reference type is determined by \a mode * Calls ldlm_lock_addref_internal. */ -void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode) +void ldlm_lock_addref(struct lustre_handle *lockh, enum ldlm_mode mode) { - struct ldlm_lock *lock; + struct ldlm_lock *lock; - lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); - ldlm_lock_addref_internal(lock, mode); - LDLM_LOCK_PUT(lock); + lock = ldlm_handle2lock(lockh); + LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie); + ldlm_lock_addref_internal(lock, mode); + LDLM_LOCK_PUT(lock); } EXPORT_SYMBOL(ldlm_lock_addref); @@ -782,7 +729,8 @@ EXPORT_SYMBOL(ldlm_lock_addref); * Removes lock from LRU if it is there. * Assumes the LDLM lock is already locked. */ -void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, + enum ldlm_mode mode) { ldlm_lock_remove_from_lru(lock); if (mode & (LCK_NL | LCK_CR | LCK_PR)) { @@ -806,7 +754,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) * * \retval -EAGAIN lock is being canceled. */ -int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) +int ldlm_lock_addref_try(struct lustre_handle *lockh, enum ldlm_mode mode) { struct ldlm_lock *lock; int result; @@ -816,7 +764,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) if (lock != NULL) { lock_res_and_lock(lock); if (lock->l_readers != 0 || lock->l_writers != 0 || - !(lock->l_flags & LDLM_FL_CBPENDING)) { + !ldlm_is_cbpending(lock)) { ldlm_lock_addref_internal_nolock(lock, mode); result = 0; } @@ -832,11 +780,11 @@ EXPORT_SYMBOL(ldlm_lock_addref_try); * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work. * Only called for local locks. */ -void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode) { - lock_res_and_lock(lock); - ldlm_lock_addref_internal_nolock(lock, mode); - unlock_res_and_lock(lock); + lock_res_and_lock(lock); + ldlm_lock_addref_internal_nolock(lock, mode); + unlock_res_and_lock(lock); } /** @@ -846,7 +794,8 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) * Does NOT add lock to LRU if no r/w references left to accomodate flock locks * that cannot be placed in LRU. */ -void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, + enum ldlm_mode mode) { LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); if (mode & (LCK_NL | LCK_CR | LCK_PR)) { @@ -872,7 +821,7 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) * on the namespace. * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called. */ -void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) +void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode) { struct ldlm_namespace *ns; ENTRY; @@ -883,19 +832,25 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_decref_internal_nolock(lock, mode); - if (lock->l_flags & LDLM_FL_LOCAL && + if (ldlm_is_local(lock) && !lock->l_readers && !lock->l_writers) { /* If this is a local lock on a server namespace and this was * the last reference, cancel the lock. */ CDEBUG(D_INFO, "forcing cancel of local lock\n"); - lock->l_flags |= LDLM_FL_CBPENDING; + ldlm_set_cbpending(lock); } - if (!lock->l_readers && !lock->l_writers && - (lock->l_flags & LDLM_FL_CBPENDING)) { - /* If we received a blocked AST and this was the last reference, - * run the callback. */ - if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export) + if (!lock->l_readers && !lock->l_writers && + (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) { + /* If we received a blocked AST and this was the last reference, + * run the callback. + * Group locks are special: + * They must not go in LRU, but they are not called back + * like non-group locks, instead they are manually released. + * They have an l_writers reference which they keep until + * they are manually released, so we remove them when they have + * no more reader or writer references. - LU-6368 */ + if (ldlm_is_ns_srv(lock) && lock->l_export) CERROR("FL_CBPENDING set on non-local lock--just a " "warning\n"); @@ -905,16 +860,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_remove_from_lru(lock); unlock_res_and_lock(lock); - if (lock->l_flags & LDLM_FL_FAIL_LOC) + if (ldlm_is_fail_loc(lock)) OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); - if ((lock->l_flags & LDLM_FL_ATOMIC_CB) || + if (ldlm_is_atomic_cb(lock) || ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) ldlm_handle_bl_callback(ns, NULL, lock); } else if (ns_is_client(ns) && !lock->l_readers && !lock->l_writers && - !(lock->l_flags & LDLM_FL_NO_LRU) && - !(lock->l_flags & LDLM_FL_BL_AST)) { + !ldlm_is_no_lru(lock) && + !ldlm_is_bl_ast(lock)) { LDLM_DEBUG(lock, "add lock into lru list"); @@ -923,7 +878,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); - if (lock->l_flags & LDLM_FL_FAIL_LOC) + if (ldlm_is_fail_loc(lock)) OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE @@ -943,7 +898,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) /** * Decrease reader/writer refcount for LDLM lock with handle \a lockh */ -void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode) +void ldlm_lock_decref(struct lustre_handle *lockh, enum ldlm_mode mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie); @@ -957,9 +912,9 @@ EXPORT_SYMBOL(ldlm_lock_decref); * \a lockh and mark it for subsequent cancellation once r/w refcount * drops to zero instead of putting into LRU. * - * Typical usage is for GROUP locks which we cannot allow to be cached. */ -void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) +void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, + enum ldlm_mode mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); ENTRY; @@ -968,7 +923,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); lock_res_and_lock(lock); - lock->l_flags |= LDLM_FL_CBPENDING; + ldlm_set_cbpending(lock); unlock_res_and_lock(lock); ldlm_lock_decref_internal(lock, mode); LDLM_LOCK_PUT(lock); @@ -976,9 +931,9 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) EXPORT_SYMBOL(ldlm_lock_decref_and_cancel); struct sl_insert_point { - cfs_list_t *res_link; - cfs_list_t *mode_link; - cfs_list_t *policy_link; + struct list_head *res_link; + struct list_head *mode_link; + struct list_head *policy_link; }; /** @@ -995,18 +950,18 @@ struct sl_insert_point { * NOTE: called by * - ldlm_grant_lock_with_skiplist */ -static void search_granted_lock(cfs_list_t *queue, +static void search_granted_lock(struct list_head *queue, struct ldlm_lock *req, struct sl_insert_point *prev) { - cfs_list_t *tmp; + struct list_head *tmp; struct ldlm_lock *lock, *mode_end, *policy_end; ENTRY; - cfs_list_for_each(tmp, queue) { - lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link); + list_for_each(tmp, queue) { + lock = list_entry(tmp, struct ldlm_lock, l_res_link); - mode_end = cfs_list_entry(lock->l_sl_mode.prev, + mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock, l_sl_mode); if (lock->l_req_mode != req->l_req_mode) { @@ -1026,7 +981,7 @@ static void search_granted_lock(cfs_list_t *queue, } else if (lock->l_resource->lr_type == LDLM_IBITS) { for (;;) { policy_end = - cfs_list_entry(lock->l_sl_policy.prev, + list_entry(lock->l_sl_policy.prev, struct ldlm_lock, l_sl_policy); @@ -1050,7 +1005,7 @@ static void search_granted_lock(cfs_list_t *queue, /* go to next policy group within mode group */ tmp = policy_end->l_res_link.next; - lock = cfs_list_entry(tmp, struct ldlm_lock, + lock = list_entry(tmp, struct ldlm_lock, l_res_link); } /* loop over policy groups within the mode group */ @@ -1091,25 +1046,25 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, ldlm_resource_dump(D_INFO, res); LDLM_DEBUG(lock, "About to add lock:"); - if (lock->l_flags & LDLM_FL_DESTROYED) { + if (ldlm_is_destroyed(lock)) { CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); return; } - LASSERT(cfs_list_empty(&lock->l_res_link)); - LASSERT(cfs_list_empty(&lock->l_sl_mode)); - LASSERT(cfs_list_empty(&lock->l_sl_policy)); + LASSERT(list_empty(&lock->l_res_link)); + LASSERT(list_empty(&lock->l_sl_mode)); + LASSERT(list_empty(&lock->l_sl_policy)); /* * lock->link == prev->link means lock is first starting the group. * Don't re-add to itself to suppress kernel warnings. */ if (&lock->l_res_link != prev->res_link) - cfs_list_add(&lock->l_res_link, prev->res_link); + list_add(&lock->l_res_link, prev->res_link); if (&lock->l_sl_mode != prev->mode_link) - cfs_list_add(&lock->l_sl_mode, prev->mode_link); + list_add(&lock->l_sl_mode, prev->mode_link); if (&lock->l_sl_policy != prev->policy_link) - cfs_list_add(&lock->l_sl_policy, prev->policy_link); + list_add(&lock->l_sl_policy, prev->policy_link); EXIT; } @@ -1141,7 +1096,7 @@ static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock) * * must be called with lr_lock held */ -void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list) +void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) { struct ldlm_resource *res = lock->l_resource; ENTRY; @@ -1149,6 +1104,21 @@ void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list) check_res_locked(res); lock->l_granted_mode = lock->l_req_mode; + + if (work_list && lock->l_completion_ast != NULL) + ldlm_add_ast_work_item(lock, NULL, work_list); + + /* We should not add locks to granted list in the following cases: + * - this is an UNLOCK but not a real lock; + * - this is a TEST lock; + * - this is a F_CANCELLK lock (async flock has req_mode == 0) + * - this is a deadlock (flock cannot be granted) */ + if (lock->l_req_mode == 0 || + lock->l_req_mode == LCK_NL || + ldlm_is_test_lock(lock) || + ldlm_is_flock_deadlock(lock)) + RETURN_EXIT; + if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) ldlm_grant_lock_with_skiplist(lock); else if (res->lr_type == LDLM_EXTENT) @@ -1156,105 +1126,178 @@ void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list) else ldlm_resource_add_lock(res, &res->lr_granted, lock); - if (lock->l_granted_mode < res->lr_most_restr) - res->lr_most_restr = lock->l_granted_mode; - - if (work_list && lock->l_completion_ast != NULL) - ldlm_add_ast_work_item(lock, NULL, work_list); - ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); EXIT; } /** - * Search for a lock with given properties in a queue. + * Describe the overlap between two locks. itree_overlap_cb data. + */ +struct lock_match_data { + struct ldlm_lock *lmd_old; + struct ldlm_lock *lmd_lock; + enum ldlm_mode *lmd_mode; + union ldlm_policy_data *lmd_policy; + __u64 lmd_flags; + int lmd_unref; +}; + +/** + * Check if the given @lock meets the criteria for a match. + * A reference on the lock is taken if matched. * - * \retval a referenced lock or NULL. See the flag descriptions below, in the - * comment above ldlm_lock_match + * \param lock test-against this lock + * \param data parameters */ -static struct ldlm_lock *search_queue(cfs_list_t *queue, - ldlm_mode_t *mode, - ldlm_policy_data_t *policy, - struct ldlm_lock *old_lock, - __u64 flags, int unref) -{ - struct ldlm_lock *lock; - cfs_list_t *tmp; +static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data) +{ + union ldlm_policy_data *lpol = &lock->l_policy_data; + enum ldlm_mode match; + + if (lock == data->lmd_old) + return INTERVAL_ITER_STOP; + + /* Check if this lock can be matched. + * Used by LU-2919(exclusive open) for open lease lock */ + if (ldlm_is_excl(lock)) + return INTERVAL_ITER_CONT; + + /* llite sometimes wants to match locks that will be + * canceled when their users drop, but we allow it to match + * if it passes in CBPENDING and the lock still has users. + * this is generally only going to be used by children + * whose parents already hold a lock so forward progress + * can still happen. */ + if (ldlm_is_cbpending(lock) && + !(data->lmd_flags & LDLM_FL_CBPENDING)) + return INTERVAL_ITER_CONT; + if (!data->lmd_unref && ldlm_is_cbpending(lock) && + lock->l_readers == 0 && lock->l_writers == 0) + return INTERVAL_ITER_CONT; + + if (!(lock->l_req_mode & *data->lmd_mode)) + return INTERVAL_ITER_CONT; + match = lock->l_req_mode; + + switch (lock->l_resource->lr_type) { + case LDLM_EXTENT: + if (lpol->l_extent.start > data->lmd_policy->l_extent.start || + lpol->l_extent.end < data->lmd_policy->l_extent.end) + return INTERVAL_ITER_CONT; + + if (unlikely(match == LCK_GROUP) && + data->lmd_policy->l_extent.gid != LDLM_GID_ANY && + lpol->l_extent.gid != data->lmd_policy->l_extent.gid) + return INTERVAL_ITER_CONT; + break; + case LDLM_IBITS: + /* We match if we have existing lock with same or wider set + of bits. */ + if ((lpol->l_inodebits.bits & + data->lmd_policy->l_inodebits.bits) != + data->lmd_policy->l_inodebits.bits) + return INTERVAL_ITER_CONT; + break; + default: + ; + } - cfs_list_for_each(tmp, queue) { - ldlm_mode_t match; + /* We match if we have existing lock with same or wider set + of bits. */ + if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE)) + return INTERVAL_ITER_CONT; - lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link); + if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock))) + return INTERVAL_ITER_CONT; - if (lock == old_lock) - break; + if (data->lmd_flags & LDLM_FL_TEST_LOCK) { + LDLM_LOCK_GET(lock); + ldlm_lock_touch_in_lru(lock); + } else { + ldlm_lock_addref_internal_nolock(lock, match); + } - /* Check if this lock can be matched. - * Used by LU-2919(exclusive open) for open lease lock */ - if (ldlm_is_excl(lock)) - continue; + *data->lmd_mode = match; + data->lmd_lock = lock; - /* llite sometimes wants to match locks that will be - * canceled when their users drop, but we allow it to match - * if it passes in CBPENDING and the lock still has users. - * this is generally only going to be used by children - * whose parents already hold a lock so forward progress - * can still happen. */ - if (lock->l_flags & LDLM_FL_CBPENDING && - !(flags & LDLM_FL_CBPENDING)) - continue; - if (!unref && lock->l_flags & LDLM_FL_CBPENDING && - lock->l_readers == 0 && lock->l_writers == 0) - continue; + return INTERVAL_ITER_STOP; +} - if (!(lock->l_req_mode & *mode)) - continue; - match = lock->l_req_mode; +static unsigned int itree_overlap_cb(struct interval_node *in, void *args) +{ + struct ldlm_interval *node = to_ldlm_interval(in); + struct lock_match_data *data = args; + struct ldlm_lock *lock; + int rc; - if (lock->l_resource->lr_type == LDLM_EXTENT && - (lock->l_policy_data.l_extent.start > - policy->l_extent.start || - lock->l_policy_data.l_extent.end < policy->l_extent.end)) - continue; + list_for_each_entry(lock, &node->li_group, l_sl_policy) { + rc = lock_matches(lock, data); + if (rc == INTERVAL_ITER_STOP) + return INTERVAL_ITER_STOP; + } + return INTERVAL_ITER_CONT; +} - if (unlikely(match == LCK_GROUP) && - lock->l_resource->lr_type == LDLM_EXTENT && - lock->l_policy_data.l_extent.gid != policy->l_extent.gid) - continue; +/** + * Search for a lock with given parameters in interval trees. + * + * \param res search for a lock in this resource + * \param data parameters + * + * \retval a referenced lock or NULL. + */ +static struct ldlm_lock *search_itree(struct ldlm_resource *res, + struct lock_match_data *data) +{ + struct interval_node_extent ext = { + .start = data->lmd_policy->l_extent.start, + .end = data->lmd_policy->l_extent.end + }; + int idx; - /* We match if we have existing lock with same or wider set - of bits. */ - if (lock->l_resource->lr_type == LDLM_IBITS && - ((lock->l_policy_data.l_inodebits.bits & - policy->l_inodebits.bits) != - policy->l_inodebits.bits)) - continue; + for (idx = 0; idx < LCK_MODE_NUM; idx++) { + struct ldlm_interval_tree *tree = &res->lr_itree[idx]; - if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK)) - continue; + if (tree->lit_root == NULL) + continue; - if ((flags & LDLM_FL_LOCAL_ONLY) && - !(lock->l_flags & LDLM_FL_LOCAL)) - continue; + if (!(tree->lit_mode & *data->lmd_mode)) + continue; + + interval_search(tree->lit_root, &ext, + itree_overlap_cb, data); + } + return data->lmd_lock; +} - if (flags & LDLM_FL_TEST_LOCK) { - LDLM_LOCK_GET(lock); - ldlm_lock_touch_in_lru(lock); - } else { - ldlm_lock_addref_internal_nolock(lock, match); - } - *mode = match; - return lock; - } - return NULL; +/** + * Search for a lock with given properties in a queue. + * + * \param queue search for a lock in this queue + * \param data parameters + * + * \retval a referenced lock or NULL. + */ +static struct ldlm_lock *search_queue(struct list_head *queue, + struct lock_match_data *data) +{ + struct ldlm_lock *lock; + int rc; + + list_for_each_entry(lock, queue, l_res_link) { + rc = lock_matches(lock, data); + if (rc == INTERVAL_ITER_STOP) + return data->lmd_lock; + } + return NULL; } void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) { if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) { lock->l_flags |= LDLM_FL_FAIL_NOTIFIED; - cfs_waitq_broadcast(&lock->l_waitq); + wake_up_all(&lock->l_waitq); } } EXPORT_SYMBOL(ldlm_lock_fail_match_locked); @@ -1265,7 +1308,6 @@ void ldlm_lock_fail_match(struct ldlm_lock *lock) ldlm_lock_fail_match_locked(lock); unlock_res_and_lock(lock); } -EXPORT_SYMBOL(ldlm_lock_fail_match); /** * Mark lock as "matchable" by OST. @@ -1276,8 +1318,8 @@ EXPORT_SYMBOL(ldlm_lock_fail_match); */ void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) { - lock->l_flags |= LDLM_FL_LVB_READY; - cfs_waitq_broadcast(&lock->l_waitq); + ldlm_set_lvb_ready(lock); + wake_up_all(&lock->l_waitq); } EXPORT_SYMBOL(ldlm_lock_allow_match_locked); @@ -1323,49 +1365,59 @@ EXPORT_SYMBOL(ldlm_lock_allow_match); * keep caller code unchanged), the context failure will be discovered by * caller sometime later. */ -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *res_id, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh, int unref) -{ - struct ldlm_resource *res; - struct ldlm_lock *lock, *old_lock = NULL; - int rc = 0; - ENTRY; +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *res_id, + enum ldlm_type type, + union ldlm_policy_data *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh, int unref) +{ + struct lock_match_data data = { + .lmd_old = NULL, + .lmd_lock = NULL, + .lmd_mode = &mode, + .lmd_policy = policy, + .lmd_flags = flags, + .lmd_unref = unref, + }; + struct ldlm_resource *res; + struct ldlm_lock *lock; + int rc = 0; + ENTRY; - if (ns == NULL) { - old_lock = ldlm_handle2lock(lockh); - LASSERT(old_lock); + if (ns == NULL) { + data.lmd_old = ldlm_handle2lock(lockh); + LASSERT(data.lmd_old != NULL); - ns = ldlm_lock_to_ns(old_lock); - res_id = &old_lock->l_resource->lr_name; - type = old_lock->l_resource->lr_type; - mode = old_lock->l_req_mode; - } + ns = ldlm_lock_to_ns(data.lmd_old); + res_id = &data.lmd_old->l_resource->lr_name; + type = data.lmd_old->l_resource->lr_type; + *data.lmd_mode = data.lmd_old->l_req_mode; + } - res = ldlm_resource_get(ns, NULL, res_id, type, 0); - if (res == NULL) { - LASSERT(old_lock == NULL); - RETURN(0); - } + res = ldlm_resource_get(ns, NULL, res_id, type, 0); + if (IS_ERR(res)) { + LASSERT(data.lmd_old == NULL); + RETURN(0); + } - LDLM_RESOURCE_ADDREF(res); - lock_res(res); + LDLM_RESOURCE_ADDREF(res); + lock_res(res); - lock = search_queue(&res->lr_granted, &mode, policy, old_lock, - flags, unref); - if (lock != NULL) - GOTO(out, rc = 1); - if (flags & LDLM_FL_BLOCK_GRANTED) - GOTO(out, rc = 0); - lock = search_queue(&res->lr_converting, &mode, policy, old_lock, - flags, unref); - if (lock != NULL) - GOTO(out, rc = 1); - lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, - flags, unref); - if (lock != NULL) - GOTO(out, rc = 1); + if (res->lr_type == LDLM_EXTENT) + lock = search_itree(res, &data); + else + lock = search_queue(&res->lr_granted, &data); + if (lock != NULL) + GOTO(out, rc = 1); + if (flags & LDLM_FL_BLOCK_GRANTED) + GOTO(out, rc = 0); + lock = search_queue(&res->lr_converting, &data); + if (lock != NULL) + GOTO(out, rc = 1); + lock = search_queue(&res->lr_waiting, &data); + if (lock != NULL) + GOTO(out, rc = 1); EXIT; out: @@ -1376,7 +1428,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, if (lock) { ldlm_lock2handle(lock, lockh); if ((flags & LDLM_FL_LVB_READY) && - (!(lock->l_flags & LDLM_FL_LVB_READY))) { + (!ldlm_is_lvb_ready(lock))) { __u64 wait_flags = LDLM_FL_LVB_READY | LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED; struct l_wait_info lwi; @@ -1402,7 +1454,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, l_wait_event(lock->l_waitq, lock->l_flags & wait_flags, &lwi); - if (!(lock->l_flags & LDLM_FL_LVB_READY)) { + if (!ldlm_is_lvb_ready(lock)) { if (flags & LDLM_FL_TEST_LOCK) LDLM_LOCK_RELEASE(lock); else @@ -1440,27 +1492,27 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, (type == LDLM_PLAIN || type == LDLM_IBITS) ? res_id->name[3] : policy->l_extent.end); } - if (old_lock) - LDLM_LOCK_PUT(old_lock); + if (data.lmd_old != NULL) + LDLM_LOCK_PUT(data.lmd_old); - return rc ? mode : 0; + return rc ? mode : 0; } EXPORT_SYMBOL(ldlm_lock_match); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits) +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits) { - struct ldlm_lock *lock; - ldlm_mode_t mode = 0; - ENTRY; + struct ldlm_lock *lock; + enum ldlm_mode mode = 0; + ENTRY; - lock = ldlm_handle2lock(lockh); - if (lock != NULL) { - lock_res_and_lock(lock); - if (lock->l_flags & LDLM_FL_GONE_MASK) - GOTO(out, mode); + lock = ldlm_handle2lock(lockh); + if (lock != NULL) { + lock_res_and_lock(lock); + if (LDLM_HAVE_MASK(lock, GONE)) + GOTO(out, mode); - if (lock->l_flags & LDLM_FL_CBPENDING && + if (ldlm_is_cbpending(lock) && lock->l_readers == 0 && lock->l_writers == 0) GOTO(out, mode); @@ -1585,61 +1637,60 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, * Returns a referenced lock */ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, - const struct ldlm_res_id *res_id, - ldlm_type_t type, - ldlm_mode_t mode, - const struct ldlm_callback_suite *cbs, + const struct ldlm_res_id *res_id, + enum ldlm_type type, + enum ldlm_mode mode, + const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len, enum lvb_type lvb_type) { - struct ldlm_lock *lock; - struct ldlm_resource *res; - ENTRY; - - res = ldlm_resource_get(ns, NULL, res_id, type, 1); - if (res == NULL) - RETURN(NULL); + struct ldlm_lock *lock; + struct ldlm_resource *res; + int rc; + ENTRY; - lock = ldlm_lock_new(res); + res = ldlm_resource_get(ns, NULL, res_id, type, 1); + if (IS_ERR(res)) + RETURN(ERR_CAST(res)); - if (lock == NULL) - RETURN(NULL); + lock = ldlm_lock_new(res); + if (lock == NULL) + RETURN(ERR_PTR(-ENOMEM)); lock->l_req_mode = mode; lock->l_ast_data = data; lock->l_pid = current_pid(); if (ns_is_server(ns)) - lock->l_flags |= LDLM_FL_NS_SRV; - if (cbs) { - lock->l_blocking_ast = cbs->lcs_blocking; - lock->l_completion_ast = cbs->lcs_completion; - lock->l_glimpse_ast = cbs->lcs_glimpse; - } - - lock->l_tree_node = NULL; - /* if this is the extent lock, allocate the interval tree node */ - if (type == LDLM_EXTENT) { - if (ldlm_interval_alloc(lock) == NULL) - GOTO(out, 0); - } + ldlm_set_ns_srv(lock); + if (cbs) { + lock->l_blocking_ast = cbs->lcs_blocking; + lock->l_completion_ast = cbs->lcs_completion; + lock->l_glimpse_ast = cbs->lcs_glimpse; + } - if (lvb_len) { - lock->l_lvb_len = lvb_len; - OBD_ALLOC(lock->l_lvb_data, lvb_len); - if (lock->l_lvb_data == NULL) - GOTO(out, 0); - } + lock->l_tree_node = NULL; + /* if this is the extent lock, allocate the interval tree node */ + if (type == LDLM_EXTENT) + if (ldlm_interval_alloc(lock) == NULL) + GOTO(out, rc = -ENOMEM); + + if (lvb_len) { + lock->l_lvb_len = lvb_len; + OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len); + if (lock->l_lvb_data == NULL) + GOTO(out, rc = -ENOMEM); + } lock->l_lvb_type = lvb_type; - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) - GOTO(out, 0); + if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) + GOTO(out, rc = -ENOENT); - RETURN(lock); + RETURN(lock); out: - ldlm_lock_destroy(lock); - LDLM_LOCK_RELEASE(lock); - return NULL; + ldlm_lock_destroy(lock); + LDLM_LOCK_RELEASE(lock); + RETURN(ERR_PTR(rc)); } /** @@ -1652,21 +1703,20 @@ out: * set, skip all the enqueueing and delegate lock processing to intent policy * function. */ -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, - struct ldlm_lock **lockp, - void *cookie, __u64 *flags) +enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns, + struct ldlm_lock **lockp, + void *cookie, __u64 *flags) { - struct ldlm_lock *lock = *lockp; - struct ldlm_resource *res = lock->l_resource; - int local = ns_is_client(ldlm_res_to_ns(res)); + struct ldlm_lock *lock = *lockp; + struct ldlm_resource *res = lock->l_resource; + int local = ns_is_client(ldlm_res_to_ns(res)); #ifdef HAVE_SERVER_SUPPORT - ldlm_processing_policy policy; + ldlm_processing_policy policy; #endif - ldlm_error_t rc = ELDLM_OK; - struct ldlm_interval *node = NULL; - ENTRY; + enum ldlm_error rc = ELDLM_OK; + struct ldlm_interval *node = NULL; + ENTRY; - lock->l_last_activity = cfs_time_current_sec(); /* policies are not executed on the client or during replay */ if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT && !local && ns->ns_policy) { @@ -1690,21 +1740,33 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, } } - /* For a replaying lock, it might be already in granted list. So - * unlinking the lock will cause the interval node to be freed, we - * have to allocate the interval node early otherwise we can't regrant - * this lock in the future. - jay */ - if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT) - OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO); + if (*flags & LDLM_FL_RESENT) { + /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply. + * Set LOCK_CHANGED always. + * Check if the lock is granted for BLOCK_GRANTED. + * Take NO_TIMEOUT from the lock as it is inherited through + * LDLM_FL_INHERIT_MASK */ + *flags |= LDLM_FL_LOCK_CHANGED; + if (lock->l_req_mode != lock->l_granted_mode) + *flags |= LDLM_FL_BLOCK_GRANTED; + *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT; + RETURN(ELDLM_OK); + } + + /* For a replaying lock, it might be already in granted list. So + * unlinking the lock will cause the interval node to be freed, we + * have to allocate the interval node early otherwise we can't regrant + * this lock in the future. - jay */ + if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT) + OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS); lock_res_and_lock(lock); if (local && lock->l_req_mode == lock->l_granted_mode) { /* The server returned a blocked lock, but it was granted * before we got a chance to actually enqueue it. We don't * need to do anything else. */ - *flags &= ~(LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); - GOTO(out, ELDLM_OK); + *flags &= ~LDLM_FL_BLOCKED_MASK; + GOTO(out, rc = ELDLM_OK); } ldlm_resource_unlink_lock(lock); @@ -1714,14 +1776,17 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, GOTO(out, rc = -ENOMEM); } - CFS_INIT_LIST_HEAD(&node->li_group); + INIT_LIST_HEAD(&node->li_group); ldlm_interval_attach(node, lock); node = NULL; } /* Some flags from the enqueue want to make it into the AST, via the * lock's l_flags. */ - lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; + if (*flags & LDLM_FL_AST_DISCARD_DATA) + ldlm_set_ast_discard_data(lock); + if (*flags & LDLM_FL_TEST_LOCK) + ldlm_set_test_lock(lock); /* This distinction between local lock trees is very important; a client * namespace only has information about locks taken by that client, and @@ -1741,18 +1806,18 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, ldlm_resource_add_lock(res, &res->lr_waiting, lock); else ldlm_grant_lock(lock, NULL); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); #ifdef HAVE_SERVER_SUPPORT } else if (*flags & LDLM_FL_REPLAY) { if (*flags & LDLM_FL_BLOCK_CONV) { ldlm_resource_add_lock(res, &res->lr_converting, lock); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); } else if (*flags & LDLM_FL_BLOCK_WAIT) { ldlm_resource_add_lock(res, &res->lr_waiting, lock); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); } else if (*flags & LDLM_FL_BLOCK_GRANTED) { ldlm_grant_lock(lock, NULL); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); } /* If no flags, fall through to normal enqueue path. */ } @@ -1782,24 +1847,25 @@ out: * * Must be called with resource lock held. */ -int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue, - cfs_list_t *work_list) +int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue, + struct list_head *work_list) { - cfs_list_t *tmp, *pos; - ldlm_processing_policy policy; + struct list_head *tmp, *pos; + ldlm_processing_policy policy; __u64 flags; - int rc = LDLM_ITER_CONTINUE; - ldlm_error_t err; - ENTRY; + int rc = LDLM_ITER_CONTINUE; + enum ldlm_error err; + ENTRY; - check_res_locked(res); + check_res_locked(res); - policy = ldlm_processing_policy_table[res->lr_type]; - LASSERT(policy); + policy = ldlm_processing_policy_table[res->lr_type]; + LASSERT(policy); - cfs_list_for_each_safe(tmp, pos, queue) { - struct ldlm_lock *pending; - pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link); + list_for_each_safe(tmp, pos, queue) { + struct ldlm_lock *pending; + + pending = list_entry(tmp, struct ldlm_lock, l_res_link); CDEBUG(D_INFO, "Reprocessing lock %p\n", pending); @@ -1825,16 +1891,16 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) struct ldlm_lock *lock; ENTRY; - if (cfs_list_empty(arg->list)) + if (list_empty(arg->list)) RETURN(-ENOENT); - lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_bl_ast); + lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast); /* nobody should touch l_bl_ast */ lock_res_and_lock(lock); - cfs_list_del_init(&lock->l_bl_ast); + list_del_init(&lock->l_bl_ast); - LASSERT(lock->l_flags & LDLM_FL_AST_SENT); + LASSERT(ldlm_is_ast_sent(lock)); LASSERT(lock->l_bl_ast_run == 0); LASSERT(lock->l_blocking_lock); lock->l_bl_ast_run++; @@ -1862,10 +1928,10 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) ldlm_completion_callback completion_callback; ENTRY; - if (cfs_list_empty(arg->list)) + if (list_empty(arg->list)) RETURN(-ENOENT); - lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_cp_ast); + lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast); /* It's possible to receive a completion AST before we've set * the l_completion_ast pointer: either because the AST arrived @@ -1880,12 +1946,12 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) /* nobody should touch l_cp_ast */ lock_res_and_lock(lock); - cfs_list_del_init(&lock->l_cp_ast); - LASSERT(lock->l_flags & LDLM_FL_CP_REQD); + list_del_init(&lock->l_cp_ast); + LASSERT(ldlm_is_cp_reqd(lock)); /* save l_completion_ast since it can be changed by * mds_intent_policy(), see bug 14225 */ completion_callback = lock->l_completion_ast; - lock->l_flags &= ~LDLM_FL_CP_REQD; + ldlm_clear_cp_reqd(lock); unlock_res_and_lock(lock); if (completion_callback != NULL) @@ -1907,11 +1973,11 @@ ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) struct ldlm_lock *lock; ENTRY; - if (cfs_list_empty(arg->list)) + if (list_empty(arg->list)) RETURN(-ENOENT); - lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_rk_ast); - cfs_list_del_init(&lock->l_rk_ast); + lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast); + list_del_init(&lock->l_rk_ast); /* the desc just pretend to exclusive */ ldlm_lock2desc(lock, &desc); @@ -1935,12 +2001,12 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) int rc = 0; ENTRY; - if (cfs_list_empty(arg->list)) + if (list_empty(arg->list)) RETURN(-ENOENT); - gl_work = cfs_list_entry(arg->list->next, struct ldlm_glimpse_work, + gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work, gl_list); - cfs_list_del_init(&gl_work->gl_list); + list_del_init(&gl_work->gl_list); lock = gl_work->gl_lock; @@ -1965,21 +2031,21 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) * Used on server to send multiple ASTs together instead of sending one by * one. */ -int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list, +int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, ldlm_desc_ast_t ast_type) { struct ldlm_cb_set_arg *arg; set_producer_func work_ast_lock; int rc; - if (cfs_list_empty(rpc_list)) + if (list_empty(rpc_list)) RETURN(0); OBD_ALLOC_PTR(arg); if (arg == NULL) RETURN(-ENOMEM); - cfs_atomic_set(&arg->restart, 0); + atomic_set(&arg->restart, 0); arg->list = rpc_list; switch (ast_type) { @@ -2015,7 +2081,7 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list, ptlrpc_set_wait(arg->set); ptlrpc_set_destroy(arg->set); - rc = cfs_atomic_read(&arg->restart) ? -ERESTART : 0; + rc = atomic_read(&arg->restart) ? -ERESTART : 0; GOTO(out, rc); out: OBD_FREE_PTR(arg); @@ -2028,8 +2094,8 @@ static int reprocess_one_queue(struct ldlm_resource *res, void *closure) return LDLM_ITER_CONTINUE; } -static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *arg) +static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); int rc; @@ -2045,15 +2111,14 @@ static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd, */ void ldlm_reprocess_all_ns(struct ldlm_namespace *ns) { - ENTRY; + ENTRY; - if (ns != NULL) { - cfs_hash_for_each_nolock(ns->ns_rs_hash, - ldlm_reprocess_res, NULL); - } - EXIT; + if (ns != NULL) { + cfs_hash_for_each_nolock(ns->ns_rs_hash, + ldlm_reprocess_res, NULL, 0); + } + EXIT; } -EXPORT_SYMBOL(ldlm_reprocess_all_ns); /** * Try to grant all waiting locks on a resource. @@ -2065,11 +2130,12 @@ EXPORT_SYMBOL(ldlm_reprocess_all_ns); */ void ldlm_reprocess_all(struct ldlm_resource *res) { - CFS_LIST_HEAD(rpc_list); - + struct list_head rpc_list; #ifdef HAVE_SERVER_SUPPORT int rc; ENTRY; + + INIT_LIST_HEAD(&rpc_list); /* Local lock trees don't get reprocessed. */ if (ns_is_client(ldlm_res_to_ns(res))) { EXIT; @@ -2086,11 +2152,13 @@ restart: rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list, LDLM_WORK_CP_AST); if (rc == -ERESTART) { - LASSERT(cfs_list_empty(&rpc_list)); + LASSERT(list_empty(&rpc_list)); goto restart; } #else ENTRY; + + INIT_LIST_HEAD(&rpc_list); if (!ns_is_client(ldlm_res_to_ns(res))) { CERROR("This is client-side-only module, cannot handle " "LDLM_NAMESPACE_SERVER resource type lock.\n"); @@ -2099,6 +2167,7 @@ restart: #endif EXIT; } +EXPORT_SYMBOL(ldlm_reprocess_all); /** * Helper function to call blocking AST for LDLM lock \a lock in a @@ -2107,8 +2176,8 @@ restart: void ldlm_cancel_callback(struct ldlm_lock *lock) { check_res_locked(lock->l_resource); - if (!(lock->l_flags & LDLM_FL_CANCEL)) { - lock->l_flags |= LDLM_FL_CANCEL; + if (!ldlm_is_cancel(lock)) { + ldlm_set_cancel(lock); if (lock->l_blocking_ast) { unlock_res_and_lock(lock); lock->l_blocking_ast(lock, NULL, lock->l_ast_data, @@ -2118,7 +2187,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock) LDLM_DEBUG(lock, "no blocking ast"); } } - lock->l_flags |= LDLM_FL_BL_DONE; + ldlm_set_bl_done(lock); } /** @@ -2130,8 +2199,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req) req->l_resource->lr_type != LDLM_IBITS) return; - cfs_list_del_init(&req->l_sl_policy); - cfs_list_del_init(&req->l_sl_mode); + list_del_init(&req->l_sl_policy); + list_del_init(&req->l_sl_mode); } /** @@ -2155,16 +2224,13 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) LBUG(); } - if (lock->l_flags & LDLM_FL_WAITED) + if (ldlm_is_waited(lock)) ldlm_del_waiting_lock(lock); /* Releases cancel callback. */ ldlm_cancel_callback(lock); - /* Yes, second time, just in case it was added again while we were - * running with no res lock in ldlm_cancel_callback */ - if (lock->l_flags & LDLM_FL_WAITED) - ldlm_del_waiting_lock(lock); + LASSERT(!ldlm_is_waited(lock)); ldlm_resource_unlink_lock(lock); ldlm_lock_destroy_nolock(lock); @@ -2206,47 +2272,93 @@ struct export_cl_data { int ecl_loop; }; +static void ldlm_cancel_lock_for_export(struct obd_export *exp, + struct ldlm_lock *lock, + struct export_cl_data *ecl) +{ + struct ldlm_resource *res; + + res = ldlm_resource_getref(lock->l_resource); + + ldlm_res_lvbo_update(res, NULL, 1); + ldlm_lock_cancel(lock); + if (!exp->exp_obd->obd_stopping) + ldlm_reprocess_all(res); + ldlm_resource_putref(res); + + ecl->ecl_loop++; + if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) { + CDEBUG(D_INFO, "Export %p, %d locks cancelled.\n", + exp, ecl->ecl_loop); + } +} + /** - * Iterator function for ldlm_cancel_locks_for_export. + * Iterator function for ldlm_export_cancel_locks. * Cancels passed locks. */ -int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) +static int +ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode, void *data) { struct export_cl_data *ecl = (struct export_cl_data *)data; struct obd_export *exp = ecl->ecl_exp; - struct ldlm_lock *lock = cfs_hash_object(hs, hnode); - struct ldlm_resource *res; + struct ldlm_lock *lock = cfs_hash_object(hs, hnode); - res = ldlm_resource_getref(lock->l_resource); - LDLM_LOCK_GET(lock); + LDLM_LOCK_GET(lock); + ldlm_cancel_lock_for_export(exp, lock, ecl); + LDLM_LOCK_RELEASE(lock); - LDLM_DEBUG(lock, "export %p", exp); - ldlm_res_lvbo_update(res, NULL, 1); - ldlm_lock_cancel(lock); - ldlm_reprocess_all(res); - ldlm_resource_putref(res); - LDLM_LOCK_RELEASE(lock); + return 0; +} - ecl->ecl_loop++; - if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) { - CDEBUG(D_INFO, - "Cancel lock %p for export %p (loop %d), still have " - "%d locks left on hash table.\n", - lock, exp, ecl->ecl_loop, - cfs_atomic_read(&hs->hs_count)); +/** + * Cancel all blocked locks for given export. + * + * Typically called on client disconnection/eviction + */ +int ldlm_export_cancel_blocked_locks(struct obd_export *exp) +{ + struct export_cl_data ecl = { + .ecl_exp = exp, + .ecl_loop = 0, + }; + + while (!list_empty(&exp->exp_bl_list)) { + struct ldlm_lock *lock; + + spin_lock_bh(&exp->exp_bl_list_lock); + if (!list_empty(&exp->exp_bl_list)) { + lock = list_entry(exp->exp_bl_list.next, + struct ldlm_lock, l_exp_list); + LDLM_LOCK_GET(lock); + list_del_init(&lock->l_exp_list); + } else { + lock = NULL; + } + spin_unlock_bh(&exp->exp_bl_list_lock); + + if (lock == NULL) + break; + + ldlm_cancel_lock_for_export(exp, lock, &ecl); + LDLM_LOCK_RELEASE(lock); } - return 0; + CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, " + "left on hash table %d.\n", exp, ecl.ecl_loop, + atomic_read(&exp->exp_lock_hash->hs_count)); + + return ecl.ecl_loop; } /** * Cancel all locks for given export. * - * Typically called on client disconnection/eviction + * Typically called after client disconnection/eviction */ -void ldlm_cancel_locks_for_export(struct obd_export *exp) +int ldlm_export_cancel_locks(struct obd_export *exp) { struct export_cl_data ecl = { .ecl_exp = exp, @@ -2255,6 +2367,12 @@ void ldlm_cancel_locks_for_export(struct obd_export *exp) cfs_hash_for_each_empty(exp->exp_lock_hash, ldlm_cancel_locks_for_export_cb, &ecl); + + CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, " + "left on hash table %d.\n", exp, ecl.ecl_loop, + atomic_read(&exp->exp_lock_hash->hs_count)); + + return ecl.ecl_loop; } /** @@ -2267,7 +2385,7 @@ void ldlm_cancel_locks_for_export(struct obd_export *exp) * \param lock A lock to convert * \param new_mode new lock mode */ -void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode) +void ldlm_lock_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode) { ENTRY; @@ -2298,39 +2416,40 @@ EXPORT_SYMBOL(ldlm_lock_downgrade); * optimizations could take advantage of it to avoid discarding cached * pages on a file. */ -struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, - __u32 *flags) +struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, + enum ldlm_mode new_mode, __u32 *flags) { - CFS_LIST_HEAD(rpc_list); - struct ldlm_resource *res; - struct ldlm_namespace *ns; - int granted = 0; + struct list_head rpc_list; + struct ldlm_resource *res; + struct ldlm_namespace *ns; + int granted = 0; #ifdef HAVE_SERVER_SUPPORT int old_mode; struct sl_insert_point prev; #endif - struct ldlm_interval *node; - ENTRY; + struct ldlm_interval *node; + ENTRY; + INIT_LIST_HEAD(&rpc_list); /* Just return if mode is unchanged. */ if (new_mode == lock->l_granted_mode) { - *flags |= LDLM_FL_BLOCK_GRANTED; - RETURN(lock->l_resource); - } + *flags |= LDLM_FL_BLOCK_GRANTED; + RETURN(lock->l_resource); + } - /* I can't check the type of lock here because the bitlock of lock - * is not held here, so do the allocation blindly. -jay */ - OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO); + /* I can't check the type of lock here because the bitlock of lock + * is not held here, so do the allocation blindly. -jay */ + OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS); if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */ - RETURN(NULL); + RETURN(NULL); - LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR), - "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode); + LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR), + "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode); - lock_res_and_lock(lock); + lock_res_and_lock(lock); - res = lock->l_resource; - ns = ldlm_res_to_ns(res); + res = lock->l_resource; + ns = ldlm_res_to_ns(res); #ifdef HAVE_SERVER_SUPPORT old_mode = lock->l_req_mode; @@ -2352,7 +2471,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, /* FIXME: ugly code, I have to attach the lock to a * interval node again since perhaps it will be granted * soon */ - CFS_INIT_LIST_HEAD(&node->li_group); + INIT_LIST_HEAD(&node->li_group); ldlm_interval_attach(node, lock); node = NULL; } @@ -2382,11 +2501,12 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, lock->l_completion_ast(lock, 0, NULL); } #ifdef HAVE_SERVER_SUPPORT - } else { - int rc; - ldlm_error_t err; + } else { + int rc; + enum ldlm_error err; __u64 pflags = 0; - ldlm_processing_policy policy; + ldlm_processing_policy policy; + policy = ldlm_processing_policy_table[res->lr_type]; rc = policy(lock, &pflags, 0, &err, &rpc_list); if (rc == LDLM_ITER_STOP) { @@ -2417,7 +2537,6 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node)); RETURN(res); } -EXPORT_SYMBOL(ldlm_lock_convert); /** * Print lock with lock handle \a lockh description into debug log. @@ -2470,12 +2589,12 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "remote: "LPX64" expref: %d pid: %u timeout: %lu " "lvb_type: %d\n", lock, - lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), + lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); va_end(args); return; @@ -2489,18 +2608,18 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: " LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, - lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), + lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_policy_data.l_extent.start, lock->l_policy_data.l_extent.end, lock->l_req_extent.start, lock->l_req_extent.end, lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); break; @@ -2512,18 +2631,18 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "["LPU64"->"LPU64"] flags: "LPX64" nid: %s " "remote: "LPX64" expref: %d pid: %u timeout: %lu\n", ldlm_lock_to_ns_name(lock), lock, - lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), + lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_policy_data.l_flock.pid, lock->l_policy_data.l_flock.start, lock->l_policy_data.l_flock.end, lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout); break; @@ -2535,16 +2654,16 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "pid: %u timeout: %lu lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, - cfs_atomic_read(&lock->l_refc), + atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), lock->l_policy_data.l_inodebits.bits, - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); break; @@ -2557,15 +2676,15 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "timeout: %lu lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, - cfs_atomic_read(&lock->l_refc), + atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); break;