X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fldlm%2Fl_lock.c;h=a4f7c85a42efb8352f353d67e0d5fc5b47ee1e98;hb=7525fd36a26631e4a5668e53f44e39ed600eea08;hp=262f196236750146e6f79eae20669d7bccf296b2;hpb=c5050e412572b00cbe93d8517d2d1f767bebfa92;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/l_lock.c b/lustre/ldlm/l_lock.c index 262f196..a4f7c85 100644 --- a/lustre/ldlm/l_lock.c +++ b/lustre/ldlm/l_lock.c @@ -1,138 +1,72 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START * - * Copyright (C) 2001, 2002 Cluster File Systems, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of Lustre, http://www.sf.net/projects/lustre/ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * GPL HEADER END + */ +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ #define DEBUG_SUBSYSTEM S_LDLM -#ifdef __KERNEL__ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#else -#include -#endif - -#include -#include - -/* invariants: - - only the owner of the lock changes l_owner/l_depth - - if a non-owner changes or checks the variables a spin lock is taken -*/ +#include -void l_lock_init(struct lustre_lock *lock) -{ - sema_init(&lock->l_sem, 1); - spin_lock_init(&lock->l_spin); -} - -void l_lock(struct lustre_lock *lock) -{ - int owner = 0; - - spin_lock(&lock->l_spin); - if (lock->l_owner == current) - owner = 1; - spin_unlock(&lock->l_spin); +#include +#include - /* This is safe to increment outside the spinlock because we - * can only have 1 CPU running on the current task - * (i.e. l_owner == current), regardless of the number of CPUs. - */ - if (owner) { - ++lock->l_depth; - } else { - down(&lock->l_sem); - spin_lock(&lock->l_spin); - lock->l_owner = current; - lock->l_depth = 0; - spin_unlock(&lock->l_spin); - } -} - -void l_unlock(struct lustre_lock *lock) -{ - LASSERT(lock->l_owner == current); - LASSERT(lock->l_depth >= 0); - - spin_lock(&lock->l_spin); - if (--lock->l_depth < 0) { - lock->l_owner = NULL; - spin_unlock(&lock->l_spin); - up(&lock->l_sem); - return; - } - spin_unlock(&lock->l_spin); -} - -int l_has_lock(struct lustre_lock *lock) +/** + * Lock a lock and its resource. + * + * LDLM locking uses resource to serialize access to locks + * but there is a case when we change resource of lock upon + * enqueue reply. We rely on lock->l_resource = new_res + * being an atomic operation. + */ +struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock) { - int depth = -1, owner = 0; + /* on server-side resource of lock doesn't change */ + if (!ldlm_is_ns_srv(lock)) + spin_lock(&lock->l_lock); - spin_lock(&lock->l_spin); - if (lock->l_owner == current) { - depth = lock->l_depth; - owner = 1; - } - spin_unlock(&lock->l_spin); + lock_res(lock->l_resource); - if (depth >= 0) - CDEBUG(D_INFO, "lock_depth: %d\n", depth); - return owner; + ldlm_set_res_locked(lock); + return lock->l_resource; } +EXPORT_SYMBOL(lock_res_and_lock); -#ifdef __KERNEL__ -#include -void l_check_no_ns_lock(struct ldlm_namespace *ns) +/** + * Unlock a lock and its resource previously locked with lock_res_and_lock + */ +void unlock_res_and_lock(struct ldlm_lock *lock) { - static long next_msg; + /* on server-side resource of lock doesn't change */ + ldlm_clear_res_locked(lock); - if (l_has_lock(&ns->ns_lock) && time_after(jiffies, next_msg)) { - CERROR("namespace %s lock held during RPCs; tell phil\n", - ns->ns_name); -#if (LUSTRE_KERNEL_VERSION >= 30) - CERROR(portals_debug_dumpstack()); -#endif - next_msg = jiffies + 60 * HZ; - } -} - -#else -void l_check_no_ns_lock(struct ldlm_namespace *ns) -{ -#warning "FIXME: check lock in user space??" + unlock_res(lock->l_resource); + if (!ldlm_is_ns_srv(lock)) + spin_unlock(&lock->l_lock); } -#endif /* __KERNEL__ */ +EXPORT_SYMBOL(unlock_res_and_lock);