X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_lock.c;h=93ccbee37c27b0f4d26b6533e888ae364229345d;hp=6aa3953a2b289c8cd6ef7cad893744a8b350f6d1;hb=ae0d69437e35961c257f076da6dcc1842a55456d;hpb=1ad64953c611a4f8bcb436bc269b7a85820e23c2 diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 6aa3953..93ccbee 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -1,36 +1,49 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Copyright (c) 2002, 2003 Cluster File Systems, Inc. - * Author: Peter Braam - * Author: Phil Schwan + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * This file is part of the Lustre file system, http://www.lustre.org - * Lustre is a trademark of Cluster File Systems, Inc. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * You may have signed or agreed to another license before downloading - * this software. If so, you are bound by the terms and conditions - * of that agreement, and the following does not apply to you. See the - * LICENSE file included with this distribution for more information. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. * - * If you did not agree to a different license, then this copy of Lustre - * is open source software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. + * GPL HEADER END + */ +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. * - * In either case, Lustre is distributed in the hope that it will be - * useful, but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * license text for more details. + * Copyright (c) 2011, 2012, Whamcloud, Inc. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/ldlm/ldlm_lock.c + * + * Author: Peter Braam + * Author: Phil Schwan */ #define DEBUG_SUBSYSTEM S_LDLM #ifdef __KERNEL__ # include -# ifndef HAVE_VFS_INTENT_PATCHES # include -# endif #else # include #endif @@ -47,7 +60,8 @@ char *ldlm_lockname[] = { [LCK_CW] "CW", [LCK_CR] "CR", [LCK_NL] "NL", - [LCK_GROUP] "GROUP" + [LCK_GROUP] "GROUP", + [LCK_COS] "COS" }; char *ldlm_typename[] = { @@ -57,6 +71,61 @@ char *ldlm_typename[] = { [LDLM_IBITS] "IBT", }; +static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = { + [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local, + [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local, + [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire18_to_local, + [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local, +}; + +static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = { + [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local, + [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local, + [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire21_to_local, + [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local, +}; + +static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { + [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_local_to_wire, + [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_local_to_wire, + [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_local_to_wire, + [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_local_to_wire, +}; + +/** + * Converts lock policy from local format to on the wire lock_desc format + */ +void ldlm_convert_policy_to_wire(ldlm_type_t type, + const ldlm_policy_data_t *lpolicy, + ldlm_wire_policy_data_t *wpolicy) +{ + ldlm_policy_local_to_wire_t convert; + + convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE]; + + convert(lpolicy, wpolicy); +} + +/** + * Converts lock policy from on the wire lock_desc format to local format + */ +void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, + const ldlm_wire_policy_data_t *wpolicy, + ldlm_policy_data_t *lpolicy) +{ + ldlm_policy_wire_to_local_t convert; + int new_client; + + /** some badnes for 2.0.0 clients, but 2.0.0 isn't supported */ + new_client = (exp->exp_connect_flags & OBD_CONNECT_FULL20) != 0; + if (new_client) + convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE]; + else + convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE]; + + convert(wpolicy, lpolicy); +} + char *ldlm_it2str(int it) { switch (it) { @@ -76,6 +145,8 @@ char *ldlm_it2str(int it) return "unlink"; case IT_GETXATTR: return "getxattr"; + case IT_LAYOUT: + return "layout"; default: CERROR("Unknown intent %d\n", it); return "UNKNOWN"; @@ -84,12 +155,13 @@ char *ldlm_it2str(int it) extern cfs_mem_cache_t *ldlm_lock_slab; +#ifdef HAVE_SERVER_SUPPORT static ldlm_processing_policy ldlm_processing_policy_table[] = { [LDLM_PLAIN] ldlm_process_plain_lock, [LDLM_EXTENT] ldlm_process_extent_lock, -#ifdef __KERNEL__ +# ifdef __KERNEL__ [LDLM_FLOCK] ldlm_process_flock_lock, -#endif +# endif [LDLM_IBITS] ldlm_process_inodebits_lock, }; @@ -97,6 +169,7 @@ ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res) { return ldlm_processing_policy_table[res->lr_type]; } +#endif /* HAVE_SERVER_SUPPORT */ void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg) { @@ -116,23 +189,17 @@ void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg) */ struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) { - atomic_inc(&lock->l_refc); + cfs_atomic_inc(&lock->l_refc); return lock; } -static void ldlm_lock_free(struct ldlm_lock *lock, size_t size) -{ - LASSERT(size == sizeof(*lock)); - OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock)); -} - void ldlm_lock_put(struct ldlm_lock *lock) { ENTRY; LASSERT(lock->l_resource != LP_POISON); - LASSERT(atomic_read(&lock->l_refc) > 0); - if (atomic_dec_and_test(&lock->l_refc)) { + LASSERT(cfs_atomic_read(&lock->l_refc) > 0); + if (cfs_atomic_dec_and_test(&lock->l_refc)) { struct ldlm_resource *res; LDLM_DEBUG(lock, @@ -140,14 +207,16 @@ void ldlm_lock_put(struct ldlm_lock *lock) res = lock->l_resource; LASSERT(lock->l_destroyed); - LASSERT(list_empty(&lock->l_res_link)); - LASSERT(list_empty(&lock->l_pending_chain)); + LASSERT(cfs_list_empty(&lock->l_res_link)); + LASSERT(cfs_list_empty(&lock->l_pending_chain)); - atomic_dec(&res->lr_namespace->ns_locks); + lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats, + LDLM_NSS_LOCKS); + lu_ref_del(&res->lr_reference, "lock", lock); ldlm_resource_putref(res); lock->l_resource = NULL; if (lock->l_export) { - class_export_put(lock->l_export); + class_export_lock_put(lock->l_export, lock); lock->l_export = NULL; } @@ -155,8 +224,8 @@ void ldlm_lock_put(struct ldlm_lock *lock) OBD_FREE(lock->l_lvb_data, lock->l_lvb_len); ldlm_interval_free(ldlm_interval_detach(lock)); - OBD_FREE_RCU_CB(lock, sizeof(*lock), &lock->l_handle, - ldlm_lock_free); + lu_ref_fini(&lock->l_reference); + OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle); } EXIT; @@ -165,12 +234,15 @@ void ldlm_lock_put(struct ldlm_lock *lock) int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) { int rc = 0; - if (!list_empty(&lock->l_lru)) { - struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + if (!cfs_list_empty(&lock->l_lru)) { + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - list_del_init(&lock->l_lru); + cfs_list_del_init(&lock->l_lru); + if (lock->l_flags & LDLM_FL_SKIPPED) + lock->l_flags &= ~LDLM_FL_SKIPPED; + LASSERT(ns->ns_nr_unused > 0); ns->ns_nr_unused--; - LASSERT(ns->ns_nr_unused >= 0); rc = 1; } return rc; @@ -178,47 +250,62 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) { - struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); int rc; + ENTRY; - spin_lock(&ns->ns_unused_lock); + if (lock->l_ns_srv) { + LASSERT(cfs_list_empty(&lock->l_lru)); + RETURN(0); + } + + cfs_spin_lock(&ns->ns_lock); rc = ldlm_lock_remove_from_lru_nolock(lock); - spin_unlock(&ns->ns_unused_lock); + cfs_spin_unlock(&ns->ns_lock); EXIT; return rc; } void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) { - struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + lock->l_last_used = cfs_time_current(); - LASSERT(list_empty(&lock->l_lru)); + LASSERT(cfs_list_empty(&lock->l_lru)); LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - list_add_tail(&lock->l_lru, &ns->ns_unused_list); + cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list); LASSERT(ns->ns_nr_unused >= 0); ns->ns_nr_unused++; } void ldlm_lock_add_to_lru(struct ldlm_lock *lock) { - struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + ENTRY; - spin_lock(&ns->ns_unused_lock); + cfs_spin_lock(&ns->ns_lock); ldlm_lock_add_to_lru_nolock(lock); - spin_unlock(&ns->ns_unused_lock); + cfs_spin_unlock(&ns->ns_lock); EXIT; } void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) { - struct ldlm_namespace *ns = lock->l_resource->lr_namespace; + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + ENTRY; - spin_lock(&ns->ns_unused_lock); - if (!list_empty(&lock->l_lru)) { + if (lock->l_ns_srv) { + LASSERT(cfs_list_empty(&lock->l_lru)); + EXIT; + return; + } + + cfs_spin_lock(&ns->ns_lock); + if (!cfs_list_empty(&lock->l_lru)) { ldlm_lock_remove_from_lru_nolock(lock); ldlm_lock_add_to_lru_nolock(lock); } - spin_unlock(&ns->ns_unused_lock); + cfs_spin_unlock(&ns->ns_lock); EXIT; } @@ -233,28 +320,27 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock) if (lock->l_readers || lock->l_writers) { LDLM_ERROR(lock, "lock still has references"); - ldlm_lock_dump(D_ERROR, lock, 0); LBUG(); } - if (!list_empty(&lock->l_res_link)) { + if (!cfs_list_empty(&lock->l_res_link)) { LDLM_ERROR(lock, "lock still on resource"); - ldlm_lock_dump(D_ERROR, lock, 0); LBUG(); } if (lock->l_destroyed) { - LASSERT(list_empty(&lock->l_lru)); + LASSERT(cfs_list_empty(&lock->l_lru)); EXIT; return 0; } lock->l_destroyed = 1; - if (lock->l_export) - spin_lock(&lock->l_export->exp_ldlm_data.led_lock); - list_del_init(&lock->l_export_chain); - if (lock->l_export) - spin_unlock(&lock->l_export->exp_ldlm_data.led_lock); + if (lock->l_export && lock->l_export->exp_lock_hash) { + /* NB: it's safe to call cfs_hash_del() even lock isn't + * in exp_lock_hash. */ + cfs_hash_del(lock->l_export->exp_lock_hash, + &lock->l_remote_handle, &lock->l_exp_hash); + } ldlm_lock_remove_from_lru(lock); class_handle_unhash(&lock->l_handle); @@ -282,8 +368,10 @@ void ldlm_lock_destroy(struct ldlm_lock *lock) unlock_res_and_lock(lock); /* drop reference from hashtable only for first destroy */ - if (first) - LDLM_LOCK_PUT(lock); + if (first) { + lu_ref_del(&lock->l_reference, "hash", lock); + LDLM_LOCK_RELEASE(lock); + } EXIT; } @@ -293,8 +381,10 @@ void ldlm_lock_destroy_nolock(struct ldlm_lock *lock) ENTRY; first = ldlm_lock_destroy_internal(lock); /* drop reference from hashtable only for first destroy */ - if (first) - LDLM_LOCK_PUT(lock); + if (first) { + lu_ref_del(&lock->l_reference, "hash", lock); + LDLM_LOCK_RELEASE(lock); + } EXIT; } @@ -304,10 +394,20 @@ static void lock_handle_addref(void *lock) LDLM_LOCK_GET((struct ldlm_lock *)lock); } +static void lock_handle_free(void *lock, int size) +{ + LASSERT(size == sizeof(struct ldlm_lock)); + OBD_SLAB_FREE(lock, ldlm_lock_slab, size); +} + +struct portals_handle_ops lock_handle_ops = { + .hop_addref = lock_handle_addref, + .hop_free = lock_handle_free, +}; + /* * usage: pass in a resource on which you have done ldlm_resource_get - * pass in a parent lock on which you have done a ldlm_lock_get - * after return, ldlm_*_put the resource and parent + * new lock will take over the refcount. * returns: lock with refcount 2 - one for current caller and one for remote */ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) @@ -318,32 +418,43 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) if (resource == NULL) LBUG(); - OBD_SLAB_ALLOC(lock, ldlm_lock_slab, CFS_ALLOC_IO, sizeof(*lock)); + OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, CFS_ALLOC_IO); if (lock == NULL) RETURN(NULL); - spin_lock_init(&lock->l_lock); - lock->l_resource = ldlm_resource_getref(resource); + cfs_spin_lock_init(&lock->l_lock); + lock->l_resource = resource; + lu_ref_add(&resource->lr_reference, "lock", lock); - atomic_set(&lock->l_refc, 2); + cfs_atomic_set(&lock->l_refc, 2); CFS_INIT_LIST_HEAD(&lock->l_res_link); CFS_INIT_LIST_HEAD(&lock->l_lru); - CFS_INIT_LIST_HEAD(&lock->l_export_chain); CFS_INIT_LIST_HEAD(&lock->l_pending_chain); CFS_INIT_LIST_HEAD(&lock->l_bl_ast); CFS_INIT_LIST_HEAD(&lock->l_cp_ast); + CFS_INIT_LIST_HEAD(&lock->l_rk_ast); cfs_waitq_init(&lock->l_waitq); lock->l_blocking_lock = NULL; CFS_INIT_LIST_HEAD(&lock->l_sl_mode); CFS_INIT_LIST_HEAD(&lock->l_sl_policy); + CFS_INIT_HLIST_NODE(&lock->l_exp_hash); + CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash); - atomic_inc(&resource->lr_namespace->ns_locks); + lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats, + LDLM_NSS_LOCKS); CFS_INIT_LIST_HEAD(&lock->l_handle.h_link); - class_handle_hash(&lock->l_handle, lock_handle_addref); + class_handle_hash(&lock->l_handle, &lock_handle_ops); - CFS_INIT_LIST_HEAD(&lock->l_extents_list); - spin_lock_init(&lock->l_extents_list_lock); - CFS_INIT_LIST_HEAD(&lock->l_cache_locks_list); + lu_ref_init(&lock->l_reference); + lu_ref_add(&lock->l_reference, "hash", lock); + lock->l_callback_timeout = 0; + +#if LUSTRE_TRACKS_LOCK_EXP_REFS + CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link); + lock->l_exp_refs_nr = 0; + lock->l_exp_refs_target = NULL; +#endif + CFS_INIT_LIST_HEAD(&lock->l_exp_list); RETURN(lock); } @@ -369,7 +480,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, LASSERT(new_resid->name[0] != 0); /* This function assumes that the lock isn't on any lists */ - LASSERT(list_empty(&lock->l_res_link)); + LASSERT(cfs_list_empty(&lock->l_res_link)); type = oldres->lr_type; unlock_res_and_lock(lock); @@ -378,15 +489,30 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, if (newres == NULL) RETURN(-ENOMEM); - lock_res_and_lock(lock); - LASSERT(memcmp(new_resid, &lock->l_resource->lr_name, - sizeof(lock->l_resource->lr_name)) != 0); - lock_res(newres); + lu_ref_add(&newres->lr_reference, "lock", lock); + /* + * To flip the lock from the old to the new resource, lock, oldres and + * newres have to be locked. Resource spin-locks are nested within + * lock->l_lock, and are taken in the memory address order to avoid + * dead-locks. + */ + cfs_spin_lock(&lock->l_lock); + oldres = lock->l_resource; + if (oldres < newres) { + lock_res(oldres); + lock_res_nested(newres, LRT_NEW); + } else { + lock_res(newres); + lock_res_nested(oldres, LRT_NEW); + } + LASSERT(memcmp(new_resid, &oldres->lr_name, + sizeof oldres->lr_name) != 0); lock->l_resource = newres; unlock_res(oldres); unlock_res_and_lock(lock); /* ...and the flowers are still standing! */ + lu_ref_del(&oldres->lr_reference, "lock", lock); ldlm_resource_putref(oldres); RETURN(0); @@ -408,8 +534,7 @@ void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh) struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, int flags) { - struct ldlm_namespace *ns; - struct ldlm_lock *lock, *retval = NULL; + struct ldlm_lock *lock; ENTRY; LASSERT(handle); @@ -418,43 +543,36 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, if (lock == NULL) RETURN(NULL); - LASSERT(lock->l_resource != NULL); - ns = lock->l_resource->lr_namespace; - LASSERT(ns != NULL); + /* It's unlikely but possible that someone marked the lock as + * destroyed after we did handle2object on it */ + if (flags == 0 && !lock->l_destroyed) { + lu_ref_add(&lock->l_reference, "handle", cfs_current()); + RETURN(lock); + } lock_res_and_lock(lock); - /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it */ - if (lock->l_destroyed) { + LASSERT(lock->l_resource != NULL); + + lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current()); + if (unlikely(lock->l_destroyed)) { unlock_res_and_lock(lock); CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); LDLM_LOCK_PUT(lock); - GOTO(out, retval); + RETURN(NULL); } if (flags && (lock->l_flags & flags)) { unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); - GOTO(out, retval); + RETURN(NULL); } if (flags) lock->l_flags |= flags; unlock_res_and_lock(lock); - retval = lock; - EXIT; - out: - return retval; -} - -struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns, - const struct lustre_handle *handle) -{ - struct ldlm_lock *retval = NULL; - retval = __ldlm_handle2lock(handle, 0); - return retval; + RETURN(lock); } void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) @@ -465,17 +583,18 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) */ if ((lock->l_resource->lr_type == LDLM_IBITS) && (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) { - struct ldlm_resource res = *lock->l_resource; - /* Make sure all the right bits are set in this lock we are going to pass to client */ LASSERTF(lock->l_policy_data.l_inodebits.bits == - (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE), + (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE | + MDS_INODELOCK_LAYOUT), "Inappropriate inode lock bits during " "conversion " LPU64 "\n", lock->l_policy_data.l_inodebits.bits); - res.lr_type = LDLM_PLAIN; - ldlm_res2desc(&res, &desc->l_resource); + + ldlm_res2desc(lock->l_resource, &desc->l_resource); + desc->l_resource.lr_type = LDLM_PLAIN; + /* Convert "new" lock mode to something old client can understand */ if ((lock->l_req_mode == LCK_CR) || @@ -499,12 +618,14 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) ldlm_res2desc(lock->l_resource, &desc->l_resource); desc->l_req_mode = lock->l_req_mode; desc->l_granted_mode = lock->l_granted_mode; - desc->l_policy_data = lock->l_policy_data; + ldlm_convert_policy_to_wire(lock->l_resource->lr_type, + &lock->l_policy_data, + &desc->l_policy_data); } } void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, - struct list_head *work_list) + cfs_list_t *work_list) { if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) { LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); @@ -513,28 +634,28 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, * discard dirty data, rather than writing back. */ if (new->l_flags & LDLM_AST_DISCARD_DATA) lock->l_flags |= LDLM_FL_DISCARD_DATA; - LASSERT(list_empty(&lock->l_bl_ast)); - list_add(&lock->l_bl_ast, work_list); + LASSERT(cfs_list_empty(&lock->l_bl_ast)); + cfs_list_add(&lock->l_bl_ast, work_list); LDLM_LOCK_GET(lock); LASSERT(lock->l_blocking_lock == NULL); lock->l_blocking_lock = LDLM_LOCK_GET(new); } } -void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list) +void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list) { if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) { lock->l_flags |= LDLM_FL_CP_REQD; LDLM_DEBUG(lock, "lock granted; sending completion AST."); - LASSERT(list_empty(&lock->l_cp_ast)); - list_add(&lock->l_cp_ast, work_list); + LASSERT(cfs_list_empty(&lock->l_cp_ast)); + cfs_list_add(&lock->l_cp_ast, work_list); LDLM_LOCK_GET(lock); } } /* must be called with lr_lock held */ void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, - struct list_head *work_list) + cfs_list_t *work_list) { ENTRY; check_res_locked(lock->l_resource); @@ -558,14 +679,47 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode) void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) { ldlm_lock_remove_from_lru(lock); - if (mode & (LCK_NL | LCK_CR | LCK_PR)) + if (mode & (LCK_NL | LCK_CR | LCK_PR)) { lock->l_readers++; - if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) + lu_ref_add_atomic(&lock->l_reference, "reader", lock); + } + if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) { lock->l_writers++; + lu_ref_add_atomic(&lock->l_reference, "writer", lock); + } LDLM_LOCK_GET(lock); + lu_ref_add_atomic(&lock->l_reference, "user", lock); LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]); } +/** + * Attempts to addref a lock, and fails if lock is already LDLM_FL_CBPENDING + * or destroyed. + * + * \retval 0 success, lock was addref-ed + * + * \retval -EAGAIN lock is being canceled. + */ +int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) +{ + struct ldlm_lock *lock; + int result; + + result = -EAGAIN; + lock = ldlm_handle2lock(lockh); + if (lock != NULL) { + lock_res_and_lock(lock); + if (lock->l_readers != 0 || lock->l_writers != 0 || + !(lock->l_flags & LDLM_FL_CBPENDING)) { + ldlm_lock_addref_internal_nolock(lock, mode); + result = 0; + } + unlock_res_and_lock(lock); + LDLM_LOCK_PUT(lock); + } + return result; +} + /* only called for local locks */ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) { @@ -574,25 +728,39 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) unlock_res_and_lock(lock); } -void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) +/* only called in ldlm_flock_destroy and for local locks. + * * for LDLM_FLOCK type locks, l_blocking_ast is null, and + * * ldlm_lock_remove_from_lru() does nothing, it is safe + * * for ldlm_flock_destroy usage by dropping some code */ +void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) { - struct ldlm_namespace *ns; - ENTRY; - - lock_res_and_lock(lock); - - ns = lock->l_resource->lr_namespace; - LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); if (mode & (LCK_NL | LCK_CR | LCK_PR)) { LASSERT(lock->l_readers > 0); + lu_ref_del(&lock->l_reference, "reader", lock); lock->l_readers--; } - if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) { + if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) { LASSERT(lock->l_writers > 0); + lu_ref_del(&lock->l_reference, "writer", lock); lock->l_writers--; } + lu_ref_del(&lock->l_reference, "user", lock); + LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */ +} + +void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) +{ + struct ldlm_namespace *ns; + ENTRY; + + lock_res_and_lock(lock); + + ns = ldlm_lock_to_ns(lock); + + ldlm_lock_decref_internal_nolock(lock, mode); + if (lock->l_flags & LDLM_FL_LOCAL && !lock->l_readers && !lock->l_writers) { /* If this is a local lock on a server namespace and this was @@ -605,7 +773,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) (lock->l_flags & LDLM_FL_CBPENDING)) { /* If we received a blocked AST and this was the last reference, * run the callback. */ - if (ns_is_server(ns) && lock->l_export) + if (lock->l_ns_srv && lock->l_export) CERROR("FL_CBPENDING set on non-local lock--just a " "warning\n"); @@ -614,6 +782,10 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) LDLM_LOCK_GET(lock); /* dropped by bl thread */ ldlm_lock_remove_from_lru(lock); unlock_res_and_lock(lock); + + if (lock->l_flags & LDLM_FL_FAIL_LOC) + OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); + if ((lock->l_flags & LDLM_FL_ATOMIC_CB) || ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) ldlm_handle_bl_callback(ns, NULL, lock); @@ -621,22 +793,28 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) !lock->l_readers && !lock->l_writers && !(lock->l_flags & LDLM_FL_NO_LRU) && !(lock->l_flags & LDLM_FL_BL_AST)) { + + LDLM_DEBUG(lock, "add lock into lru list"); + /* If this is a client-side namespace and this was the last * reference, put it on the LRU. */ ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); - /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE - * are not supported by the server, otherwise, it is done on + + if (lock->l_flags & LDLM_FL_FAIL_LOC) + OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); + + /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE + * are not supported by the server, otherwise, it is done on * enqueue. */ - if (!exp_connect_cancelset(lock->l_conn_export) && + if (!exp_connect_cancelset(lock->l_conn_export) && !ns_connect_lru_resize(ns)) ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0); } else { + LDLM_DEBUG(lock, "do not add lock into lru list"); unlock_res_and_lock(lock); } - LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */ - EXIT; } @@ -666,9 +844,9 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) } struct sl_insert_point { - struct list_head *res_link; - struct list_head *mode_link; - struct list_head *policy_link; + cfs_list_t *res_link; + cfs_list_t *mode_link; + cfs_list_t *policy_link; }; /* @@ -685,19 +863,19 @@ struct sl_insert_point { * NOTE: called by * - ldlm_grant_lock_with_skiplist */ -static void search_granted_lock(struct list_head *queue, +static void search_granted_lock(cfs_list_t *queue, struct ldlm_lock *req, struct sl_insert_point *prev) { - struct list_head *tmp; + cfs_list_t *tmp; struct ldlm_lock *lock, *mode_end, *policy_end; ENTRY; - list_for_each(tmp, queue) { - lock = list_entry(tmp, struct ldlm_lock, l_res_link); + cfs_list_for_each(tmp, queue) { + lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link); - mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock, - l_sl_mode); + mode_end = cfs_list_entry(lock->l_sl_mode.prev, + struct ldlm_lock, l_sl_mode); if (lock->l_req_mode != req->l_req_mode) { /* jump to last lock of mode group */ @@ -715,9 +893,10 @@ static void search_granted_lock(struct list_head *queue, return; } else if (lock->l_resource->lr_type == LDLM_IBITS) { for (;;) { - policy_end = list_entry(lock->l_sl_policy.prev, - struct ldlm_lock, - l_sl_policy); + policy_end = + cfs_list_entry(lock->l_sl_policy.prev, + struct ldlm_lock, + l_sl_policy); if (lock->l_policy_data.l_inodebits.bits == req->l_policy_data.l_inodebits.bits) { @@ -737,10 +916,10 @@ static void search_granted_lock(struct list_head *queue, /* done with mode group */ break; - /* jump to next policy group within the mode group */ + /* go to next policy group within mode group */ tmp = policy_end->l_res_link.next; - lock = list_entry(tmp, struct ldlm_lock, - l_res_link); + lock = cfs_list_entry(tmp, struct ldlm_lock, + l_res_link); } /* loop over policy groups within the mode group */ /* insert point is last lock of the mode group, @@ -751,7 +930,7 @@ static void search_granted_lock(struct list_head *queue, EXIT; return; } else { - LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock"); + LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock"); LBUG(); } } @@ -765,7 +944,7 @@ static void search_granted_lock(struct list_head *queue, return; } -static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, +static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, struct sl_insert_point *prev) { struct ldlm_resource *res = lock->l_resource; @@ -773,22 +952,21 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, check_res_locked(res); - ldlm_resource_dump(D_OTHER, res); - CDEBUG(D_OTHER, "About to add this lock:\n"); - ldlm_lock_dump(D_OTHER, lock, 0); + ldlm_resource_dump(D_INFO, res); + LDLM_DEBUG(lock, "About to add lock:"); if (lock->l_destroyed) { CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); return; } - LASSERT(list_empty(&lock->l_res_link)); - LASSERT(list_empty(&lock->l_sl_mode)); - LASSERT(list_empty(&lock->l_sl_policy)); + LASSERT(cfs_list_empty(&lock->l_res_link)); + LASSERT(cfs_list_empty(&lock->l_sl_mode)); + LASSERT(cfs_list_empty(&lock->l_sl_policy)); - list_add(&lock->l_res_link, prev->res_link); - list_add(&lock->l_sl_mode, prev->mode_link); - list_add(&lock->l_sl_policy, prev->policy_link); + cfs_list_add(&lock->l_res_link, prev->res_link); + cfs_list_add(&lock->l_sl_mode, prev->mode_link); + cfs_list_add(&lock->l_sl_policy, prev->policy_link); EXIT; } @@ -812,7 +990,7 @@ static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock) * * must be called with lr_lock held */ -void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) +void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list) { struct ldlm_resource *res = lock->l_resource; ENTRY; @@ -833,24 +1011,25 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) if (work_list && lock->l_completion_ast != NULL) ldlm_add_ast_work_item(lock, NULL, work_list); - ldlm_pool_add(&res->lr_namespace->ns_pool, lock); + ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); EXIT; } /* returns a referenced lock or NULL. See the flag descriptions below, in the * comment above ldlm_lock_match */ -static struct ldlm_lock *search_queue(struct list_head *queue, +static struct ldlm_lock *search_queue(cfs_list_t *queue, ldlm_mode_t *mode, ldlm_policy_data_t *policy, - struct ldlm_lock *old_lock, int flags) + struct ldlm_lock *old_lock, + int flags, int unref) { struct ldlm_lock *lock; - struct list_head *tmp; + cfs_list_t *tmp; - list_for_each(tmp, queue) { + cfs_list_for_each(tmp, queue) { ldlm_mode_t match; - lock = list_entry(tmp, struct ldlm_lock, l_res_link); + lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link); if (lock == old_lock) break; @@ -864,7 +1043,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue, if (lock->l_flags & LDLM_FL_CBPENDING && !(flags & LDLM_FL_CBPENDING)) continue; - if (lock->l_flags & LDLM_FL_CBPENDING && + if (!unref && lock->l_flags & LDLM_FL_CBPENDING && lock->l_readers == 0 && lock->l_writers == 0) continue; @@ -891,7 +1070,9 @@ static struct ldlm_lock *search_queue(struct list_head *queue, policy->l_inodebits.bits)) continue; - if (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED)) + if (!unref && + (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED || + lock->l_failed)) continue; if ((flags & LDLM_FL_LOCAL_ONLY) && @@ -911,74 +1092,34 @@ static struct ldlm_lock *search_queue(struct list_head *queue, return NULL; } -void ldlm_lock_allow_match(struct ldlm_lock *lock) +void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) +{ + if (!lock->l_failed) { + lock->l_failed = 1; + cfs_waitq_broadcast(&lock->l_waitq); + } +} +EXPORT_SYMBOL(ldlm_lock_fail_match_locked); + +void ldlm_lock_fail_match(struct ldlm_lock *lock) { lock_res_and_lock(lock); - lock->l_flags |= LDLM_FL_LVB_READY; - cfs_waitq_signal(&lock->l_waitq); + ldlm_lock_fail_match_locked(lock); unlock_res_and_lock(lock); } +EXPORT_SYMBOL(ldlm_lock_fail_match); -/** - * Checks if requested extent lock is compatible with another owned lock. - * - * Checks if \a lock is compatible with a read or write lock - * (specified by \a rw) for an extent [\a start , \a end]. - * - * \param lock the already owned lock - * \param rw OBD_BRW_READ if requested for reading, - * OBD_BRW_WRITE if requested for writing - * \param start start of the requested extent - * \param end end of the requested extent - * \param cookie transparent parameter for passing locking context - * - * \post result == 1, *cookie == context, appropriate lock is referenced - * - * \retval 1 owned lock is reused for the request - * \retval 0 no lock reused for the request - * - * \see ldlm_lock_fast_release - */ -int ldlm_lock_fast_match(struct ldlm_lock *lock, int rw, - obd_off start, obd_off end, - void **cookie) -{ - LASSERT(rw == OBD_BRW_READ || rw == OBD_BRW_WRITE); - /* should LCK_GROUP be handled in a special way? */ - if (lock && (rw == OBD_BRW_READ || - (lock->l_granted_mode & (LCK_PW | LCK_GROUP))) && - (lock->l_policy_data.l_extent.start <= start) && - (lock->l_policy_data.l_extent.end >= end)) { - ldlm_lock_addref_internal(lock, rw == OBD_BRW_WRITE ? LCK_PW : LCK_PR); - *cookie = (void *)lock; - return 1; /* avoid using rc for stack relief */ - } - return 0; +void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) +{ + lock->l_flags |= LDLM_FL_LVB_READY; + cfs_waitq_broadcast(&lock->l_waitq); } -/** - * Releases a reference to a lock taken in a "fast" way. - * - * Releases a read or write (specified by \a rw) lock - * referenced by \a cookie. - * - * \param rw OBD_BRW_READ if requested for reading, - * OBD_BRW_WRITE if requested for writing - * \param cookie transparent parameter for passing locking context - * - * \post appropriate lock is dereferenced - * - * \see ldlm_lock_fast_lock - */ -void ldlm_lock_fast_release(void *cookie, int rw) +void ldlm_lock_allow_match(struct ldlm_lock *lock) { - struct ldlm_lock *lock = (struct ldlm_lock *)cookie; - - LASSERT(lock != NULL); - LASSERT(rw == OBD_BRW_READ || rw == OBD_BRW_WRITE); - LASSERT(rw == OBD_BRW_READ || - (lock->l_granted_mode & (LCK_PW | LCK_GROUP))); - ldlm_lock_decref_internal(lock, rw == OBD_BRW_WRITE ? LCK_PW : LCK_PR); + lock_res_and_lock(lock); + ldlm_lock_allow_match_locked(lock); + unlock_res_and_lock(lock); } /* Can be called in two ways: @@ -1008,7 +1149,7 @@ void ldlm_lock_fast_release(void *cookie, int rw) ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, const struct ldlm_res_id *res_id, ldlm_type_t type, ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) + struct lustre_handle *lockh, int unref) { struct ldlm_resource *res; struct ldlm_lock *lock, *old_lock = NULL; @@ -1019,7 +1160,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, old_lock = ldlm_handle2lock(lockh); LASSERT(old_lock); - ns = old_lock->l_resource->lr_namespace; + ns = ldlm_lock_to_ns(old_lock); res_id = &old_lock->l_resource->lr_name; type = old_lock->l_resource->lr_type; mode = old_lock->l_req_mode; @@ -1031,23 +1172,28 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, RETURN(0); } + LDLM_RESOURCE_ADDREF(res); lock_res(res); - lock = search_queue(&res->lr_granted, &mode, policy, old_lock, flags); + lock = search_queue(&res->lr_granted, &mode, policy, old_lock, + flags, unref); if (lock != NULL) GOTO(out, rc = 1); if (flags & LDLM_FL_BLOCK_GRANTED) GOTO(out, rc = 0); - lock = search_queue(&res->lr_converting, &mode, policy, old_lock, flags); + lock = search_queue(&res->lr_converting, &mode, policy, old_lock, + flags, unref); if (lock != NULL) GOTO(out, rc = 1); - lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, flags); + lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, + flags, unref); if (lock != NULL) GOTO(out, rc = 1); EXIT; out: unlock_res(res); + LDLM_RESOURCE_DELREF(res); ldlm_resource_putref(res); if (lock) { @@ -1061,20 +1207,30 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, NULL); if (err) { if (flags & LDLM_FL_TEST_LOCK) - LDLM_LOCK_PUT(lock); + LDLM_LOCK_RELEASE(lock); else - ldlm_lock_decref_internal(lock, mode); + ldlm_lock_decref_internal(lock, + mode); rc = 0; goto out2; } } - lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout), NULL, - LWI_ON_SIGNAL_NOOP, NULL); + lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout), + NULL, LWI_ON_SIGNAL_NOOP, NULL); /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */ l_wait_event(lock->l_waitq, - (lock->l_flags & LDLM_FL_LVB_READY), &lwi); + lock->l_flags & LDLM_FL_LVB_READY || + lock->l_failed, + &lwi); + if (!(lock->l_flags & LDLM_FL_LVB_READY)) { + if (flags & LDLM_FL_TEST_LOCK) + LDLM_LOCK_RELEASE(lock); + else + ldlm_lock_decref_internal(lock, mode); + rc = 0; + } } } out2: @@ -1095,7 +1251,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, } if (flags & LDLM_FL_TEST_LOCK) - LDLM_LOCK_PUT(lock); + LDLM_LOCK_RELEASE(lock); } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/ LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res " @@ -1103,7 +1259,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, type, mode, res_id->name[0], res_id->name[1], (type == LDLM_PLAIN || type == LDLM_IBITS) ? res_id->name[2] :policy->l_extent.start, - (type == LDLM_PLAIN || type == LDLM_IBITS) ? + (type == LDLM_PLAIN || type == LDLM_IBITS) ? res_id->name[3] : policy->l_extent.end); } if (old_lock) @@ -1112,14 +1268,47 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, return rc ? mode : 0; } +ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits) +{ + struct ldlm_lock *lock; + ldlm_mode_t mode = 0; + ENTRY; + + lock = ldlm_handle2lock(lockh); + if (lock != NULL) { + lock_res_and_lock(lock); + if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED || + lock->l_failed) + GOTO(out, mode); + + if (lock->l_flags & LDLM_FL_CBPENDING && + lock->l_readers == 0 && lock->l_writers == 0) + GOTO(out, mode); + + if (bits) + *bits = lock->l_policy_data.l_inodebits.bits; + mode = lock->l_granted_mode; + ldlm_lock_addref_internal_nolock(lock, mode); + } + + EXIT; + +out: + if (lock != NULL) { + unlock_res_and_lock(lock); + LDLM_LOCK_PUT(lock); + } + return mode; +} +EXPORT_SYMBOL(ldlm_revalidate_lock_handle); + /* Returns a referenced lock */ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_type_t type, ldlm_mode_t mode, - ldlm_blocking_callback blocking, - ldlm_completion_callback completion, - ldlm_glimpse_callback glimpse, + const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len) { struct ldlm_lock *lock; @@ -1131,17 +1320,20 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, RETURN(NULL); lock = ldlm_lock_new(res); - ldlm_resource_putref(res); if (lock == NULL) RETURN(NULL); lock->l_req_mode = mode; lock->l_ast_data = data; - lock->l_blocking_ast = blocking; - lock->l_completion_ast = completion; - lock->l_glimpse_ast = glimpse; lock->l_pid = cfs_curproc_pid(); + lock->l_ns_srv = !!ns_is_server(ns); + if (cbs) { + lock->l_blocking_ast = cbs->lcs_blocking; + lock->l_completion_ast = cbs->lcs_completion; + lock->l_glimpse_ast = cbs->lcs_glimpse; + lock->l_weigh_ast = cbs->lcs_weigh; + } lock->l_tree_node = NULL; /* if this is the extent lock, allocate the interval tree node */ @@ -1157,13 +1349,14 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, GOTO(out, 0); } + if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) + GOTO(out, 0); + RETURN(lock); out: - if (lock->l_lvb_data) - OBD_FREE(lock->l_lvb_data, lvb_len); - ldlm_interval_free(ldlm_interval_detach(lock)); - OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock)); + ldlm_lock_destroy(lock); + LDLM_LOCK_RELEASE(lock); return NULL; } @@ -1173,13 +1366,15 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, { struct ldlm_lock *lock = *lockp; struct ldlm_resource *res = lock->l_resource; - int local = ns_is_client(res->lr_namespace); + int local = ns_is_client(ldlm_res_to_ns(res)); +#ifdef HAVE_SERVER_SUPPORT ldlm_processing_policy policy; +#endif ldlm_error_t rc = ELDLM_OK; struct ldlm_interval *node = NULL; ENTRY; - do_gettimeofday(&lock->l_enqueued_time); + lock->l_last_activity = cfs_time_current_sec(); /* policies are not executed on the client or during replay */ if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT && !local && ns->ns_policy) { @@ -1192,7 +1387,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, * work here is done. */ if (lock != *lockp) { ldlm_lock_destroy(lock); - LDLM_LOCK_PUT(lock); + LDLM_LOCK_RELEASE(lock); } *flags |= LDLM_FL_LOCK_CHANGED; RETURN(0); @@ -1208,8 +1403,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, * have to allocate the interval node early otherwise we can't regrant * this lock in the future. - jay */ if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT) - OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, - sizeof(*node)); + OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO); lock_res_and_lock(lock); if (local && lock->l_req_mode == lock->l_granted_mode) { @@ -1256,6 +1450,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, else ldlm_grant_lock(lock, NULL); GOTO(out, ELDLM_OK); +#ifdef HAVE_SERVER_SUPPORT } else if (*flags & LDLM_FL_REPLAY) { if (*flags & LDLM_FL_BLOCK_CONV) { ldlm_resource_add_lock(res, &res->lr_converting, lock); @@ -1273,6 +1468,14 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, policy = ldlm_processing_policy_table[res->lr_type]; policy(lock, flags, 1, &rc, NULL); GOTO(out, rc); +#else + } else { + CERROR("This is client-side-only module, cannot handle " + "LDLM_NAMESPACE_SERVER resource type lock.\n"); + LBUG(); + } +#endif + out: unlock_res_and_lock(lock); if (node) @@ -1280,11 +1483,12 @@ out: return rc; } +#ifdef HAVE_SERVER_SUPPORT /* Must be called with namespace taken: queue is waiting or converting. */ -int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue, - struct list_head *work_list) +int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue, + cfs_list_t *work_list) { - struct list_head *tmp, *pos; + cfs_list_t *tmp, *pos; ldlm_processing_policy policy; int flags; int rc = LDLM_ITER_CONTINUE; @@ -1296,9 +1500,9 @@ int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue, policy = ldlm_processing_policy_table[res->lr_type]; LASSERT(policy); - list_for_each_safe(tmp, pos, queue) { + cfs_list_for_each_safe(tmp, pos, queue) { struct ldlm_lock *pending; - pending = list_entry(tmp, struct ldlm_lock, l_res_link); + pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link); CDEBUG(D_INFO, "Reprocessing lock %p\n", pending); @@ -1310,163 +1514,192 @@ int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue, RETURN(rc); } +#endif -/* Helper function for ldlm_run_ast_work(). - * - * Send an existing rpc set specified by @arg->set and then - * destroy it. Create new one if @do_create flag is set. */ -static void -ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create) +static int +ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) { - ENTRY; - - ptlrpc_set_wait(arg->set); - if (arg->type == LDLM_BL_CALLBACK) - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2); - ptlrpc_set_destroy(arg->set); - - if (do_create) - arg->set = ptlrpc_prep_set(); + struct ldlm_cb_set_arg *arg = opaq; + struct ldlm_lock_desc d; + int rc; + struct ldlm_lock *lock; + ENTRY; - EXIT; -} + if (cfs_list_empty(arg->list)) + RETURN(-ENOENT); -static int -ldlm_work_bl_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg) -{ - struct ldlm_lock_desc d; - struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_bl_ast); - ENTRY; + lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_bl_ast); - /* nobody should touch l_bl_ast */ - lock_res_and_lock(lock); - list_del_init(&lock->l_bl_ast); + /* nobody should touch l_bl_ast */ + lock_res_and_lock(lock); + cfs_list_del_init(&lock->l_bl_ast); - LASSERT(lock->l_flags & LDLM_FL_AST_SENT); - LASSERT(lock->l_bl_ast_run == 0); - LASSERT(lock->l_blocking_lock); - lock->l_bl_ast_run++; - unlock_res_and_lock(lock); + LASSERT(lock->l_flags & LDLM_FL_AST_SENT); + LASSERT(lock->l_bl_ast_run == 0); + LASSERT(lock->l_blocking_lock); + lock->l_bl_ast_run++; + unlock_res_and_lock(lock); - ldlm_lock2desc(lock->l_blocking_lock, &d); + ldlm_lock2desc(lock->l_blocking_lock, &d); - LDLM_LOCK_PUT(lock->l_blocking_lock); - lock->l_blocking_lock = NULL; - lock->l_blocking_ast(lock, &d, (void *)arg, - LDLM_CB_BLOCKING); - LDLM_LOCK_PUT(lock); + rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING); + LDLM_LOCK_RELEASE(lock->l_blocking_lock); + lock->l_blocking_lock = NULL; + LDLM_LOCK_RELEASE(lock); - RETURN(1); + RETURN(rc); } static int -ldlm_work_cp_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg) +ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) { - struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_cp_ast); - ldlm_completion_callback completion_callback; - int rc = 0; - ENTRY; - - /* It's possible to receive a completion AST before we've set - * the l_completion_ast pointer: either because the AST arrived - * before the reply, or simply because there's a small race - * window between receiving the reply and finishing the local - * enqueue. (bug 842) - * - * This can't happen with the blocking_ast, however, because we - * will never call the local blocking_ast until we drop our - * reader/writer reference, which we won't do until we get the - * reply and finish enqueueing. */ - - /* nobody should touch l_cp_ast */ - lock_res_and_lock(lock); - list_del_init(&lock->l_cp_ast); - LASSERT(lock->l_flags & LDLM_FL_CP_REQD); - /* save l_completion_ast since it can be changed by - * mds_intent_policy(), see bug 14225 */ - completion_callback = lock->l_completion_ast; - lock->l_flags &= ~LDLM_FL_CP_REQD; - unlock_res_and_lock(lock); - - if (completion_callback != NULL) { - completion_callback(lock, 0, (void *)arg); - rc = 1; - } - LDLM_LOCK_PUT(lock); - - RETURN(rc); + struct ldlm_cb_set_arg *arg = opaq; + int rc = 0; + struct ldlm_lock *lock; + ldlm_completion_callback completion_callback; + ENTRY; + + if (cfs_list_empty(arg->list)) + RETURN(-ENOENT); + + lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_cp_ast); + + /* It's possible to receive a completion AST before we've set + * the l_completion_ast pointer: either because the AST arrived + * before the reply, or simply because there's a small race + * window between receiving the reply and finishing the local + * enqueue. (bug 842) + * + * This can't happen with the blocking_ast, however, because we + * will never call the local blocking_ast until we drop our + * reader/writer reference, which we won't do until we get the + * reply and finish enqueueing. */ + + /* nobody should touch l_cp_ast */ + lock_res_and_lock(lock); + cfs_list_del_init(&lock->l_cp_ast); + LASSERT(lock->l_flags & LDLM_FL_CP_REQD); + /* save l_completion_ast since it can be changed by + * mds_intent_policy(), see bug 14225 */ + completion_callback = lock->l_completion_ast; + lock->l_flags &= ~LDLM_FL_CP_REQD; + unlock_res_and_lock(lock); + + if (completion_callback != NULL) + rc = completion_callback(lock, 0, (void *)arg); + LDLM_LOCK_RELEASE(lock); + + RETURN(rc); } static int -ldlm_work_revoke_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg) +ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) { - struct ldlm_lock_desc desc; - struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_export_chain); - ENTRY; + struct ldlm_cb_set_arg *arg = opaq; + struct ldlm_lock_desc desc; + int rc; + struct ldlm_lock *lock; + ENTRY; - list_del_init(&lock->l_export_chain); + if (cfs_list_empty(arg->list)) + RETURN(-ENOENT); - /* the desc just pretend to exclusive */ - ldlm_lock2desc(lock, &desc); - desc.l_req_mode = LCK_EX; - desc.l_granted_mode = 0; + lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_rk_ast); + cfs_list_del_init(&lock->l_rk_ast); - lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING); - LDLM_LOCK_PUT(lock); + /* the desc just pretend to exclusive */ + ldlm_lock2desc(lock, &desc); + desc.l_req_mode = LCK_EX; + desc.l_granted_mode = 0; - RETURN(1); + rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING); + LDLM_LOCK_RELEASE(lock); + + RETURN(rc); } -int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type) +int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) { - struct ldlm_cb_set_arg arg; - struct list_head *tmp, *pos; - int (*work_ast_lock)(struct list_head *tmp, struct ldlm_cb_set_arg *arg); - int ast_count; - ENTRY; + struct ldlm_cb_set_arg *arg = opaq; + struct ldlm_glimpse_work *gl_work; + struct ldlm_lock *lock; + int rc = 0; + ENTRY; - arg.set = ptlrpc_prep_set(); - atomic_set(&arg.restart, 0); - switch (ast_type) { - case LDLM_WORK_BL_AST: - arg.type = LDLM_BL_CALLBACK; - work_ast_lock = ldlm_work_bl_ast_lock; - break; - case LDLM_WORK_CP_AST: - arg.type = LDLM_CP_CALLBACK; - work_ast_lock = ldlm_work_cp_ast_lock; - break; - case LDLM_WORK_REVOKE_AST: - arg.type = LDLM_BL_CALLBACK; - work_ast_lock = ldlm_work_revoke_ast_lock; - break; - default: - LBUG(); - } + if (cfs_list_empty(arg->list)) + RETURN(-ENOENT); - ast_count = 0; - list_for_each_safe(tmp, pos, rpc_list) { - ast_count += work_ast_lock(tmp, &arg); + gl_work = cfs_list_entry(arg->list->next, struct ldlm_glimpse_work, + gl_list); + cfs_list_del_init(&gl_work->gl_list); - /* Send the request set if it exceeds the PARALLEL_AST_LIMIT, - * and create a new set for requests that remained in - * @rpc_list */ - if (unlikely(ast_count == PARALLEL_AST_LIMIT)) { - ldlm_send_and_maybe_create_set(&arg, 1); - ast_count = 0; - } - } + lock = gl_work->gl_lock; + if (lock->l_glimpse_ast(lock, (void*)arg) == 0) + rc = 1; - if (ast_count > 0) - ldlm_send_and_maybe_create_set(&arg, 0); - else - /* In case when number of ASTs is multiply of - * PARALLEL_AST_LIMIT or @rpc_list was initially empty, - * @arg.set must be destroyed here, otherwise we get - * write memory leaking. */ - ptlrpc_set_destroy(arg.set); + LDLM_LOCK_RELEASE(lock); + + if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0) + OBD_FREE_PTR(gl_work); - RETURN(atomic_read(&arg.restart) ? -ERESTART : 0); + RETURN(rc); +} + +int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list, + ldlm_desc_ast_t ast_type) +{ + struct ldlm_cb_set_arg *arg; + set_producer_func work_ast_lock; + int rc; + + if (cfs_list_empty(rpc_list)) + RETURN(0); + + OBD_ALLOC_PTR(arg); + if (arg == NULL) + RETURN(-ENOMEM); + + cfs_atomic_set(&arg->restart, 0); + arg->list = rpc_list; + + switch (ast_type) { + case LDLM_WORK_BL_AST: + arg->type = LDLM_BL_CALLBACK; + work_ast_lock = ldlm_work_bl_ast_lock; + break; + case LDLM_WORK_CP_AST: + arg->type = LDLM_CP_CALLBACK; + work_ast_lock = ldlm_work_cp_ast_lock; + break; + case LDLM_WORK_REVOKE_AST: + arg->type = LDLM_BL_CALLBACK; + work_ast_lock = ldlm_work_revoke_ast_lock; + break; + case LDLM_WORK_GL_AST: + arg->type = LDLM_GL_CALLBACK; + work_ast_lock = ldlm_work_gl_ast_lock; + break; + default: + LBUG(); + } + + /* We create a ptlrpc request set with flow control extension. + * This request set will use the work_ast_lock function to produce new + * requests and will send a new request each time one completes in order + * to keep the number of requests in flight to ns_max_parallel_ast */ + arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX, + work_ast_lock, arg); + if (arg->set == NULL) + GOTO(out, rc = -ENOMEM); + + ptlrpc_set_wait(arg->set); + ptlrpc_set_destroy(arg->set); + + rc = cfs_atomic_read(&arg->restart) ? -ERESTART : 0; + GOTO(out, rc); +out: + OBD_FREE_PTR(arg); + return rc; } static int reprocess_one_queue(struct ldlm_resource *res, void *closure) @@ -1475,64 +1708,62 @@ static int reprocess_one_queue(struct ldlm_resource *res, void *closure) return LDLM_ITER_CONTINUE; } -void ldlm_reprocess_all_ns(struct ldlm_namespace *ns) +static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd, + cfs_hlist_node_t *hnode, void *arg) { - struct list_head *tmp; - int i, rc; + struct ldlm_resource *res = cfs_hash_object(hs, hnode); + int rc; - if (ns == NULL) - return; - - ENTRY; - spin_lock(&ns->ns_hash_lock); - for (i = 0; i < RES_HASH_SIZE; i++) { - tmp = ns->ns_hash[i].next; - while (tmp != &(ns->ns_hash[i])) { - struct ldlm_resource *res = - list_entry(tmp, struct ldlm_resource, lr_hash); + rc = reprocess_one_queue(res, arg); - ldlm_resource_getref(res); - spin_unlock(&ns->ns_hash_lock); - - rc = reprocess_one_queue(res, NULL); + return rc == LDLM_ITER_STOP; +} - spin_lock(&ns->ns_hash_lock); - tmp = tmp->next; - ldlm_resource_putref_locked(res); +void ldlm_reprocess_all_ns(struct ldlm_namespace *ns) +{ + ENTRY; - if (rc == LDLM_ITER_STOP) - GOTO(out, rc); - } + if (ns != NULL) { + cfs_hash_for_each_nolock(ns->ns_rs_hash, + ldlm_reprocess_res, NULL); } - out: - spin_unlock(&ns->ns_hash_lock); EXIT; } void ldlm_reprocess_all(struct ldlm_resource *res) { CFS_LIST_HEAD(rpc_list); + +#ifdef HAVE_SERVER_SUPPORT int rc; ENTRY; - /* Local lock trees don't get reprocessed. */ - if (ns_is_client(res->lr_namespace)) { + if (ns_is_client(ldlm_res_to_ns(res))) { EXIT; return; } - restart: +restart: lock_res(res); rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list); if (rc == LDLM_ITER_CONTINUE) ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list); unlock_res(res); - rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST); + rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list, + LDLM_WORK_CP_AST); if (rc == -ERESTART) { - LASSERT(list_empty(&rpc_list)); + LASSERT(cfs_list_empty(&rpc_list)); goto restart; } +#else + ENTRY; + if (!ns_is_client(ldlm_res_to_ns(res))) { + CERROR("This is client-side-only module, cannot handle " + "LDLM_NAMESPACE_SERVER resource type lock.\n"); + LBUG(); + } +#endif EXIT; } @@ -1560,8 +1791,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req) req->l_resource->lr_type != LDLM_IBITS) return; - list_del_init(&req->l_sl_policy); - list_del_init(&req->l_sl_mode); + cfs_list_del_init(&req->l_sl_policy); + cfs_list_del_init(&req->l_sl_mode); } void ldlm_lock_cancel(struct ldlm_lock *lock) @@ -1573,7 +1804,7 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) lock_res_and_lock(lock); res = lock->l_resource; - ns = res->lr_namespace; + ns = ldlm_res_to_ns(res); /* Please do not, no matter how tempting, remove this LBUG without * talking to me first. -phik */ @@ -1582,14 +1813,17 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) LBUG(); } - ldlm_del_waiting_lock(lock); + if (lock->l_waited) + ldlm_del_waiting_lock(lock); /* Releases cancel callback. */ ldlm_cancel_callback(lock); /* Yes, second time, just in case it was added again while we were running with no res lock in ldlm_cancel_callback */ - ldlm_del_waiting_lock(lock); + if (lock->l_waited) + ldlm_del_waiting_lock(lock); + ldlm_resource_unlink_lock(lock); ldlm_lock_destroy_nolock(lock); @@ -1598,7 +1832,7 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) /* Make sure we will not be called again for same lock what is possible * if not to zero out lock->l_granted_mode */ - lock->l_granted_mode = 0; + lock->l_granted_mode = LCK_MINMODE; unlock_res_and_lock(lock); EXIT; @@ -1607,40 +1841,97 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) int ldlm_lock_set_data(struct lustre_handle *lockh, void *data) { struct ldlm_lock *lock = ldlm_handle2lock(lockh); + int rc = -EINVAL; ENTRY; - if (lock == NULL) - RETURN(-EINVAL); + if (lock) { + if (lock->l_ast_data == NULL) + lock->l_ast_data = data; + if (lock->l_ast_data == data) + rc = 0; + LDLM_LOCK_PUT(lock); + } + RETURN(rc); +} +EXPORT_SYMBOL(ldlm_lock_set_data); - lock->l_ast_data = data; - LDLM_LOCK_PUT(lock); - RETURN(0); +struct export_cl_data { + struct obd_export *ecl_exp; + int ecl_loop; +}; + +int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd, + cfs_hlist_node_t *hnode, void *data) + +{ + struct export_cl_data *ecl = (struct export_cl_data *)data; + struct obd_export *exp = ecl->ecl_exp; + struct ldlm_lock *lock = cfs_hash_object(hs, hnode); + struct ldlm_resource *res; + + res = ldlm_resource_getref(lock->l_resource); + LDLM_LOCK_GET(lock); + + LDLM_DEBUG(lock, "export %p", exp); + ldlm_res_lvbo_update(res, NULL, 1); + ldlm_lock_cancel(lock); + ldlm_reprocess_all(res); + ldlm_resource_putref(res); + LDLM_LOCK_RELEASE(lock); + + ecl->ecl_loop++; + if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) { + CDEBUG(D_INFO, + "Cancel lock %p for export %p (loop %d), still have " + "%d locks left on hash table.\n", + lock, exp, ecl->ecl_loop, + cfs_atomic_read(&hs->hs_count)); + } + + return 0; } void ldlm_cancel_locks_for_export(struct obd_export *exp) { - struct ldlm_lock *lock; - struct ldlm_resource *res; + struct export_cl_data ecl = { + .ecl_exp = exp, + .ecl_loop = 0, + }; - spin_lock(&exp->exp_ldlm_data.led_lock); - while(!list_empty(&exp->exp_ldlm_data.led_held_locks)) { - lock = list_entry(exp->exp_ldlm_data.led_held_locks.next, - struct ldlm_lock, l_export_chain); - res = ldlm_resource_getref(lock->l_resource); - LDLM_LOCK_GET(lock); - spin_unlock(&exp->exp_ldlm_data.led_lock); + cfs_hash_for_each_empty(exp->exp_lock_hash, + ldlm_cancel_locks_for_export_cb, &ecl); +} - LDLM_DEBUG(lock, "export %p", exp); - ldlm_res_lvbo_update(res, NULL, 0, 1); +/** + * Downgrade an exclusive lock. + * + * A fast variant of ldlm_lock_convert for convertion of exclusive + * locks. The convertion is always successful. + * + * \param lock A lock to convert + * \param new_mode new lock mode + */ +void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode) +{ + ENTRY; - ldlm_lock_cancel(lock); - ldlm_reprocess_all(res); + LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX)); + LASSERT(new_mode == LCK_COS); - ldlm_resource_putref(res); - LDLM_LOCK_PUT(lock); - spin_lock(&exp->exp_ldlm_data.led_lock); - } - spin_unlock(&exp->exp_ldlm_data.led_lock); + lock_res_and_lock(lock); + ldlm_resource_unlink_lock(lock); + /* + * Remove the lock from pool as it will be added again in + * ldlm_grant_lock() called below. + */ + ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock); + + lock->l_req_mode = new_mode; + ldlm_grant_lock(lock, NULL); + unlock_res_and_lock(lock); + ldlm_reprocess_all(lock->l_resource); + + EXIT; } struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, @@ -1650,9 +1941,10 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, struct ldlm_resource *res; struct ldlm_namespace *ns; int granted = 0; - int old_mode, rc; - struct sl_insert_point prev; - ldlm_error_t err; +#ifdef HAVE_SERVER_SUPPORT + int old_mode; + struct sl_insert_point prev; +#endif struct ldlm_interval *node; ENTRY; @@ -1663,32 +1955,36 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, /* I can't check the type of lock here because the bitlock of lock * is not held here, so do the allocation blindly. -jay */ - OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, sizeof(*node)); + OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO); if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */ RETURN(NULL); - LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR, + LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR), "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode); lock_res_and_lock(lock); res = lock->l_resource; - ns = res->lr_namespace; + ns = ldlm_res_to_ns(res); - old_mode = lock->l_req_mode; - lock->l_req_mode = new_mode; - if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) { - /* remember the lock position where the lock might be - * added back to the granted list later and also - * remember the join mode for skiplist fixing. */ - prev.res_link = lock->l_res_link.prev; - prev.mode_link = lock->l_sl_mode.prev; - prev.policy_link = lock->l_sl_policy.prev; +#ifdef HAVE_SERVER_SUPPORT + old_mode = lock->l_req_mode; +#endif + lock->l_req_mode = new_mode; + if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) { +#ifdef HAVE_SERVER_SUPPORT + /* remember the lock position where the lock might be + * added back to the granted list later and also + * remember the join mode for skiplist fixing. */ + prev.res_link = lock->l_res_link.prev; + prev.mode_link = lock->l_sl_mode.prev; + prev.policy_link = lock->l_sl_policy.prev; +#endif ldlm_resource_unlink_lock(lock); } else { ldlm_resource_unlink_lock(lock); if (res->lr_type == LDLM_EXTENT) { - /* FIXME: ugly code, I have to attach the lock to a + /* FIXME: ugly code, I have to attach the lock to a * interval node again since perhaps it will be granted * soon */ CFS_INIT_LIST_HEAD(&node->li_group); @@ -1697,8 +1993,14 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, } } + /* + * Remove old lock from the pool before adding the lock with new + * mode below in ->policy() + */ + ldlm_pool_del(&ns->ns_pool, lock); + /* If this is a local resource, put it on the appropriate list. */ - if (ns_is_client(res->lr_namespace)) { + if (ns_is_client(ldlm_res_to_ns(res))) { if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) { ldlm_resource_add_lock(res, &res->lr_converting, lock); } else { @@ -1710,11 +2012,14 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, ldlm_grant_lock(lock, &rpc_list); granted = 1; - /* FIXME: completion handling not with ns_lock held ! */ + /* FIXME: completion handling not with lr_lock held ! */ if (lock->l_completion_ast) lock->l_completion_ast(lock, 0, NULL); } +#ifdef HAVE_SERVER_SUPPORT } else { + int rc; + ldlm_error_t err; int pflags = 0; ldlm_processing_policy policy; policy = ldlm_processing_policy_table[res->lr_type]; @@ -1732,70 +2037,22 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, granted = 1; } } +#else + } else { + CERROR("This is client-side-only module, cannot handle " + "LDLM_NAMESPACE_SERVER resource type lock.\n"); + LBUG(); + } +#endif unlock_res_and_lock(lock); if (granted) - ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST); + ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST); if (node) OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node)); RETURN(res); } -void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos) -{ - struct obd_device *obd = NULL; - - if (!((libcfs_debug | D_ERROR) & level)) - return; - - if (!lock) { - CDEBUG(level, " NULL LDLM lock\n"); - return; - } - - CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n", - lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc), - pos, lock->l_pid); - if (lock->l_conn_export != NULL) - obd = lock->l_conn_export->exp_obd; - if (lock->l_export && lock->l_export->exp_connection) { - CDEBUG(level, " Node: NID %s (rhandle: "LPX64")\n", - libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid), - lock->l_remote_handle.cookie); - } else if (obd == NULL) { - CDEBUG(level, " Node: local\n"); - } else { - struct obd_import *imp = obd->u.cli.cl_import; - CDEBUG(level, " Node: NID %s (rhandle: "LPX64")\n", - libcfs_nid2str(imp->imp_connection->c_peer.nid), - lock->l_remote_handle.cookie); - } - CDEBUG(level, " Resource: %p ("LPU64"/"LPU64"/"LPU64")\n", - lock->l_resource, - lock->l_resource->lr_name.name[0], - lock->l_resource->lr_name.name[1], - lock->l_resource->lr_name.name[2]); - CDEBUG(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, " - "write: %d flags: %#x\n", ldlm_lockname[lock->l_req_mode], - ldlm_lockname[lock->l_granted_mode], - atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, - lock->l_flags); - if (lock->l_resource->lr_type == LDLM_EXTENT) - CDEBUG(level, " Extent: "LPU64" -> "LPU64 - " (req "LPU64"-"LPU64")\n", - lock->l_policy_data.l_extent.start, - lock->l_policy_data.l_extent.end, - lock->l_req_extent.start, lock->l_req_extent.end); - else if (lock->l_resource->lr_type == LDLM_FLOCK) - CDEBUG(level, " Pid: %d Extent: "LPU64" -> "LPU64"\n", - lock->l_policy_data.l_flock.pid, - lock->l_policy_data.l_flock.start, - lock->l_policy_data.l_flock.end); - else if (lock->l_resource->lr_type == LDLM_IBITS) - CDEBUG(level, " Bits: "LPX64"\n", - lock->l_policy_data.l_inodebits.bits); -} - void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh) { struct ldlm_lock *lock; @@ -1807,133 +2064,134 @@ void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh) if (lock == NULL) return; - ldlm_lock_dump(D_OTHER, lock, 0); + LDLM_DEBUG_LIMIT(level, lock, "###"); LDLM_LOCK_PUT(lock); } -void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level, - struct libcfs_debug_msg_data *data, const char *fmt, - ...) +void _ldlm_lock_debug(struct ldlm_lock *lock, + struct libcfs_debug_msg_data *msgdata, + const char *fmt, ...) { va_list args; - cfs_debug_limit_state_t *cdls = data->msg_cdls; - + struct obd_export *exp = lock->l_export; + struct ldlm_resource *resource = lock->l_resource; + char *nid = "local"; + va_start(args, fmt); - if (lock->l_resource == NULL) { - libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file, - data->msg_fn, data->msg_line, fmt, args, + if (exp && exp->exp_connection) { + nid = libcfs_nid2str(exp->exp_connection->c_peer.nid); + } else if (exp && exp->exp_obd != NULL) { + struct obd_import *imp = exp->exp_obd->u.cli.cl_import; + nid = libcfs_nid2str(imp->imp_connection->c_peer.nid); + } + + if (resource == NULL) { + libcfs_debug_vmsg2(msgdata, fmt, args, " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " - "res: \?\? rrc=\?\? type: \?\?\? flags: %x remote: " - LPX64" expref: %d pid: %u\n", lock, - lock->l_handle.h_cookie, atomic_read(&lock->l_refc), + "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" nid: %s " + "remote: "LPX64" expref: %d pid: %u timeout: %lu\n", + lock, + lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], - lock->l_flags, lock->l_remote_handle.cookie, - lock->l_export ? - atomic_read(&lock->l_export->exp_refcount) : -99, - lock->l_pid); + lock->l_flags, nid, lock->l_remote_handle.cookie, + exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + lock->l_pid, lock->l_callback_timeout); va_end(args); return; } - switch (lock->l_resource->lr_type) { + switch (resource->lr_type) { case LDLM_EXTENT: - libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file, - data->msg_fn, data->msg_line, fmt, args, + libcfs_debug_vmsg2(msgdata, fmt, args, " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64 - "] (req "LPU64"->"LPU64") flags: %x remote: "LPX64 - " expref: %d pid: %u\n", - lock->l_resource->lr_namespace->ns_name, lock, - lock->l_handle.h_cookie, atomic_read(&lock->l_refc), + "] (req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote:" + " "LPX64" expref: %d pid: %u timeout %lu\n", + ldlm_lock_to_ns_name(lock), lock, + lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], - lock->l_resource->lr_name.name[0], - lock->l_resource->lr_name.name[1], - atomic_read(&lock->l_resource->lr_refcount), - ldlm_typename[lock->l_resource->lr_type], + resource->lr_name.name[0], + resource->lr_name.name[1], + cfs_atomic_read(&resource->lr_refcount), + ldlm_typename[resource->lr_type], lock->l_policy_data.l_extent.start, lock->l_policy_data.l_extent.end, lock->l_req_extent.start, lock->l_req_extent.end, - lock->l_flags, lock->l_remote_handle.cookie, - lock->l_export ? - atomic_read(&lock->l_export->exp_refcount) : -99, - lock->l_pid); + lock->l_flags, nid, lock->l_remote_handle.cookie, + exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + lock->l_pid, lock->l_callback_timeout); break; case LDLM_FLOCK: - libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file, - data->msg_fn, data->msg_line, fmt, args, + libcfs_debug_vmsg2(msgdata, fmt, args, " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d " - "["LPU64"->"LPU64"] flags: %x remote: "LPX64 - " expref: %d pid: %u\n", - lock->l_resource->lr_namespace->ns_name, lock, - lock->l_handle.h_cookie, atomic_read(&lock->l_refc), + "["LPU64"->"LPU64"] flags: "LPX64" nid: %s remote: "LPX64 + " expref: %d pid: %u timeout: %lu\n", + ldlm_lock_to_ns_name(lock), lock, + lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], - lock->l_resource->lr_name.name[0], - lock->l_resource->lr_name.name[1], - atomic_read(&lock->l_resource->lr_refcount), - ldlm_typename[lock->l_resource->lr_type], + resource->lr_name.name[0], + resource->lr_name.name[1], + cfs_atomic_read(&resource->lr_refcount), + ldlm_typename[resource->lr_type], lock->l_policy_data.l_flock.pid, lock->l_policy_data.l_flock.start, lock->l_policy_data.l_flock.end, - lock->l_flags, lock->l_remote_handle.cookie, - lock->l_export ? - atomic_read(&lock->l_export->exp_refcount) : -99, - lock->l_pid); + lock->l_flags, nid, lock->l_remote_handle.cookie, + exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + lock->l_pid, lock->l_callback_timeout); break; case LDLM_IBITS: - libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file, - data->msg_fn, data->msg_line, fmt, args, + libcfs_debug_vmsg2(msgdata, fmt, args, " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s " - "flags: %x remote: "LPX64" expref: %d " - "pid %u\n", - lock->l_resource->lr_namespace->ns_name, + "flags: "LPX64" nid: %s remote: "LPX64" expref: %d " + "pid: %u timeout: %lu\n", + ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, - atomic_read (&lock->l_refc), + cfs_atomic_read (&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], - lock->l_resource->lr_name.name[0], - lock->l_resource->lr_name.name[1], + resource->lr_name.name[0], + resource->lr_name.name[1], lock->l_policy_data.l_inodebits.bits, - atomic_read(&lock->l_resource->lr_refcount), - ldlm_typename[lock->l_resource->lr_type], - lock->l_flags, lock->l_remote_handle.cookie, - lock->l_export ? - atomic_read(&lock->l_export->exp_refcount) : -99, - lock->l_pid); + cfs_atomic_read(&resource->lr_refcount), + ldlm_typename[resource->lr_type], + lock->l_flags, nid, lock->l_remote_handle.cookie, + exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + lock->l_pid, lock->l_callback_timeout); break; default: - libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file, - data->msg_fn, data->msg_line, fmt, args, + libcfs_debug_vmsg2(msgdata, fmt, args, " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s " - "res: "LPU64"/"LPU64" rrc: %d type: %s flags: %x " - "remote: "LPX64" expref: %d pid: %u\n", - lock->l_resource->lr_namespace->ns_name, + "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" " + "nid: %s remote: "LPX64" expref: %d pid: %u timeout %lu" + "\n", + ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, - atomic_read (&lock->l_refc), + cfs_atomic_read (&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], - lock->l_resource->lr_name.name[0], - lock->l_resource->lr_name.name[1], - atomic_read(&lock->l_resource->lr_refcount), - ldlm_typename[lock->l_resource->lr_type], - lock->l_flags, lock->l_remote_handle.cookie, - lock->l_export ? - atomic_read(&lock->l_export->exp_refcount) : -99, - lock->l_pid); + resource->lr_name.name[0], + resource->lr_name.name[1], + cfs_atomic_read(&resource->lr_refcount), + ldlm_typename[resource->lr_type], + lock->l_flags, nid, lock->l_remote_handle.cookie, + exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + lock->l_pid, lock->l_callback_timeout); break; } va_end(args);