X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fldlm%2Fl_lock.c;h=2c0865f82fa495478f67be7edd8e79efd483ce39;hb=98f973b5b2ce7dd5bcccdf126f39e59c376eaca2;hp=ebf49d6f246056507097c80f893f4b5eee024ddf;hpb=a4c80e2baf6f61118d9b4c965bf3eef928838176;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/l_lock.c b/lustre/ldlm/l_lock.c index ebf49d6..2c0865f 100644 --- a/lustre/ldlm/l_lock.c +++ b/lustre/ldlm/l_lock.c @@ -1,115 +1,67 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START * - * Copyright (C) 2001, 2002 Cluster File Systems, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of Lustre, http://www.sf.net/projects/lustre/ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. * + * GPL HEADER END + */ +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ #define DEBUG_SUBSYSTEM S_LDLM #ifdef __KERNEL__ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include +#include #else #include #endif -#include -#include - -/* invariants: - - only the owner of the lock changes l_owner/l_depth - - if a non-owner changes or checks the variables a spin lock is taken -*/ - -void l_lock_init(struct lustre_lock *lock) -{ - sema_init(&lock->l_sem, 1); - spin_lock_init(&lock->l_spin); -} - -void l_lock(struct lustre_lock *lock) -{ - int owner = 0; - - spin_lock(&lock->l_spin); - if (lock->l_owner == current) - owner = 1; - spin_unlock(&lock->l_spin); - - /* This is safe to increment outside the spinlock because we - * can only have 1 CPU running on the current task - * (i.e. l_owner == current), regardless of the number of CPUs. - */ - if (owner) { - ++lock->l_depth; - } else { - down(&lock->l_sem); - spin_lock(&lock->l_spin); - lock->l_owner = current; - lock->l_depth = 0; - spin_unlock(&lock->l_spin); - } -} +#include +#include -void l_unlock(struct lustre_lock *lock) +/* + * ldlm locking uses resource to serialize access to locks + * but there is a case when we change resource of lock upon + * enqueue reply. we rely on that lock->l_resource = new_res + * is atomic + */ +struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock) { - LASSERT(lock->l_owner == current); - LASSERT(lock->l_depth >= 0); + /* on server-side resource of lock doesn't change */ + if (!lock->l_ns_srv) + cfs_spin_lock(&lock->l_lock); - spin_lock(&lock->l_spin); - if (--lock->l_depth < 0) { - lock->l_owner = NULL; - spin_unlock(&lock->l_spin); - up(&lock->l_sem); - return; - } - spin_unlock(&lock->l_spin); + lock_res(lock->l_resource); + return lock->l_resource; } -int l_has_lock(struct lustre_lock *lock) +void unlock_res_and_lock(struct ldlm_lock *lock) { - int depth = -1, owner = 0; - - spin_lock(&lock->l_spin); - if (lock->l_owner == current) { - depth = lock->l_depth; - owner = 1; - } - spin_unlock(&lock->l_spin); - - if (depth >= 0) - CDEBUG(D_INFO, "lock_depth: %d\n", depth); - return owner; + /* on server-side resource of lock doesn't change */ + unlock_res(lock->l_resource); + if (!lock->l_ns_srv) + cfs_spin_unlock(&lock->l_lock); }