-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
*
- * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_extent.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
*/
#define DEBUG_SUBSYSTEM S_LDLM
#ifndef __KERNEL__
# include <liblustre.h>
+#else
+# include <libcfs/libcfs.h>
#endif
#include <lustre_dlm.h>
#include <obd_support.h>
+#include <obd.h>
+#include <obd_class.h>
#include <lustre_lib.h>
#include "ldlm_internal.h"
-#define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
+#ifdef HAVE_SERVER_SUPPORT
+# define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
/* fixup the ldlm_extent after expanding */
static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
__u64 req_start = req->l_req_extent.start;
__u64 req_end = req->l_req_extent.end;
__u64 req_align, mask;
-
+
if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
}
/* we need to ensure that the lock extent is properly aligned to what
- * the client requested. We align it to the lowest-common denominator
- * of the clients requested lock start and end alignment. */
- mask = 0x1000ULL;
+ * the client requested. Also we need to make sure it's also server
+ * page size aligned otherwise a server page can be covered by two
+ * write locks. */
+ mask = CFS_PAGE_SIZE;
req_align = (req_end + 1) | req_start;
- if (req_align != 0) {
+ if (req_align != 0 && (req_align & (mask - 1)) == 0) {
while ((req_align & mask) == 0)
mask <<= 1;
}
limiter.start = req_start;
if (interval_is_overlapped(tree->lit_root, &ext))
- printk("req_mode = %d, tree->lit_mode = %d, tree->lit_size = %d\n",
+ CDEBUG(D_INFO,
+ "req_mode = %d, tree->lit_mode = %d, "
+ "tree->lit_size = %d\n",
req_mode, tree->lit_mode, tree->lit_size);
interval_expand(tree->lit_root, &ext, &limiter);
limiter.start = max(limiter.start, ext.start);
ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
struct ldlm_extent *new_ex)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_resource *res = req->l_resource;
ldlm_mode_t req_mode = req->l_req_mode;
__u64 req_start = req->l_req_extent.start;
lockmode_verify(req_mode);
/* for waiting locks */
- list_for_each(tmp, &res->lr_waiting) {
+ cfs_list_for_each(tmp, &res->lr_waiting) {
struct ldlm_lock *lock;
struct ldlm_extent *l_extent;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
l_extent = &lock->l_policy_data.l_extent;
/* We already hit the minimum requested size, search no more */
continue;
/* We grow extents downwards only as far as they don't overlap
- * with already-granted locks, on the assumtion that clients
+ * with already-granted locks, on the assumption that clients
* will be writing beyond the initial requested end and would
* then need to enqueue a new lock beyond previous request.
* l_req_extent->end strictly < req_start, checked above. */
}
}
+static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ cfs_time_t now = cfs_time_current();
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
+ return 1;
+
+ CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
+ if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
+ res->lr_contention_time = now;
+ return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
+ cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
+}
+
struct ldlm_extent_compat_args {
- struct list_head *work_list;
+ cfs_list_t *work_list;
struct ldlm_lock *lock;
ldlm_mode_t mode;
+ int *locks;
int *compat;
};
{
struct ldlm_extent_compat_args *priv = data;
struct ldlm_interval *node = to_ldlm_interval(n);
- struct list_head *work_list = priv->work_list;
+ struct ldlm_extent *extent;
+ cfs_list_t *work_list = priv->work_list;
struct ldlm_lock *lock, *enq = priv->lock;
ldlm_mode_t mode = priv->mode;
+ int count = 0;
ENTRY;
- LASSERT(!list_empty(&node->li_group));
+ LASSERT(!cfs_list_empty(&node->li_group));
- list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
/* interval tree is for granted lock */
LASSERTF(mode == lock->l_granted_mode,
"mode = %s, lock->l_granted_mode = %s\n",
ldlm_lockname[mode],
ldlm_lockname[lock->l_granted_mode]);
-
+ count++;
if (lock->l_blocking_ast)
ldlm_add_ast_work_item(lock, enq, work_list);
}
+ /* don't count conflicting glimpse locks */
+ extent = ldlm_interval_extent(node);
+ if (!(mode == LCK_PR &&
+ extent->start == 0 && extent->end == OBD_OBJECT_EOF))
+ *priv->locks += count;
+
if (priv->compat)
*priv->compat = 0;
* negative error, such as EWOULDBLOCK for group locks
*/
static int
-ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
int *flags, ldlm_error_t *err,
- struct list_head *work_list)
+ cfs_list_t *work_list, int *contended_locks)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lock;
struct ldlm_resource *res = req->l_resource;
ldlm_mode_t req_mode = req->l_req_mode;
__u64 req_end = req->l_req_extent.end;
int compat = 1;
int scan = 0;
+ int check_contention;
ENTRY;
lockmode_verify(req_mode);
struct ldlm_interval_tree *tree;
struct ldlm_extent_compat_args data = {.work_list = work_list,
.lock = req,
+ .locks = contended_locks,
.compat = &compat };
struct interval_node_extent ex = { .start = req_start,
.end = req_end };
} else {
interval_search(tree->lit_root, &ex,
ldlm_extent_compat_cb, &data);
- if (!list_empty(work_list) && compat)
+ if (!cfs_list_empty(work_list) && compat)
compat = 0;
}
}
- RETURN(compat);
- }
+ } else { /* for waiting queue */
+ cfs_list_for_each(tmp, queue) {
+ check_contention = 1;
+
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
+
+ if (req == lock)
+ break;
+
+ if (unlikely(scan)) {
+ /* We only get here if we are queuing GROUP lock
+ and met some incompatible one. The main idea of this
+ code is to insert GROUP lock past compatible GROUP
+ lock in the waiting queue or if there is not any,
+ then in front of first non-GROUP lock */
+ if (lock->l_req_mode != LCK_GROUP) {
+ /* Ok, we hit non-GROUP lock, there should
+ * be no more GROUP locks later on, queue in
+ * front of first non-GROUP lock */
+
+ ldlm_resource_insert_lock_after(lock, req);
+ cfs_list_del_init(&lock->l_res_link);
+ ldlm_resource_insert_lock_after(req, lock);
+ compat = 0;
+ break;
+ }
+ if (req->l_policy_data.l_extent.gid ==
+ lock->l_policy_data.l_extent.gid) {
+ /* found it */
+ ldlm_resource_insert_lock_after(lock, req);
+ compat = 0;
+ break;
+ }
+ continue;
+ }
- /* for waiting queue */
- list_for_each(tmp, queue) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ /* locks are compatible, overlap doesn't matter */
+ if (lockmode_compat(lock->l_req_mode, req_mode)) {
+ if (req_mode == LCK_PR &&
+ ((lock->l_policy_data.l_extent.start <=
+ req->l_policy_data.l_extent.start) &&
+ (lock->l_policy_data.l_extent.end >=
+ req->l_policy_data.l_extent.end))) {
+ /* If we met a PR lock just like us or wider,
+ and nobody down the list conflicted with
+ it, that means we can skip processing of
+ the rest of the list and safely place
+ ourselves at the end of the list, or grant
+ (dependent if we met an conflicting locks
+ before in the list).
+ In case of 1st enqueue only we continue
+ traversing if there is something conflicting
+ down the list because we need to make sure
+ that something is marked as AST_SENT as well,
+ in cse of empy worklist we would exit on
+ first conflict met. */
+ /* There IS a case where such flag is
+ not set for a lock, yet it blocks
+ something. Luckily for us this is
+ only during destroy, so lock is
+ exclusive. So here we are safe */
+ if (!(lock->l_flags & LDLM_FL_AST_SENT)) {
+ RETURN(compat);
+ }
+ }
- if (req == lock)
- RETURN(compat);
-
- if (unlikely(scan)) {
- /* We only get here if we are queuing GROUP lock
- and met some incompatible one. The main idea of this
- code is to insert GROUP lock past compatible GROUP
- lock in the waiting queue or if there is not any,
- then in front of first non-GROUP lock */
- if (lock->l_req_mode != LCK_GROUP) {
- /* Ok, we hit non-GROUP lock, there should
- * be no more GROUP locks later on, queue in
- * front of first non-GROUP lock */
-
- ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
- ldlm_resource_insert_lock_after(req, lock);
- RETURN(0);
- }
- if (req->l_policy_data.l_extent.gid ==
- lock->l_policy_data.l_extent.gid) {
- /* found it */
- ldlm_resource_insert_lock_after(lock, req);
- RETURN(0);
- }
- continue;
- }
+ /* non-group locks are compatible, overlap doesn't
+ matter */
+ if (likely(req_mode != LCK_GROUP))
+ continue;
- /* locks are compatible, overlap doesn't matter */
- if (lockmode_compat(lock->l_req_mode, req_mode)) {
- if (req_mode == LCK_PR &&
- ((lock->l_policy_data.l_extent.start <=
- req->l_policy_data.l_extent.start) &&
- (lock->l_policy_data.l_extent.end >=
- req->l_policy_data.l_extent.end))) {
- /* If we met a PR lock just like us or wider,
- and nobody down the list conflicted with
- it, that means we can skip processing of
- the rest of the list and safely place
- ourselves at the end of the list, or grant
- (dependent if we met an conflicting locks
- before in the list).
- In case of 1st enqueue only we continue
- traversing if there is something conflicting
- down the list because we need to make sure
- that something is marked as AST_SENT as well,
- in cse of empy worklist we would exit on
- first conflict met. */
- /* There IS a case where such flag is
- not set for a lock, yet it blocks
- something. Luckily for us this is
- only during destroy, so lock is
- exclusive. So here we are safe */
- if (!(lock->l_flags & LDLM_FL_AST_SENT)) {
- RETURN(compat);
+ /* If we are trying to get a GROUP lock and there is
+ another one of this kind, we need to compare gid */
+ if (req->l_policy_data.l_extent.gid ==
+ lock->l_policy_data.l_extent.gid) {
+ /* If existing lock with matched gid is granted,
+ we grant new one too. */
+ if (lock->l_req_mode == lock->l_granted_mode)
+ RETURN(2);
+
+ /* Otherwise we are scanning queue of waiting
+ * locks and it means current request would
+ * block along with existing lock (that is
+ * already blocked.
+ * If we are in nonblocking mode - return
+ * immediately */
+ if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ compat = -EWOULDBLOCK;
+ goto destroylock;
+ }
+ /* If this group lock is compatible with another
+ * group lock on the waiting list, they must be
+ * together in the list, so they can be granted
+ * at the same time. Otherwise the later lock
+ * can get stuck behind another, incompatible,
+ * lock. */
+ ldlm_resource_insert_lock_after(lock, req);
+ /* Because 'lock' is not granted, we can stop
+ * processing this queue and return immediately.
+ * There is no need to check the rest of the
+ * list. */
+ RETURN(0);
}
}
- /* non-group locks are compatible, overlap doesn't
- matter */
- if (likely(req_mode != LCK_GROUP))
+ if (unlikely(req_mode == LCK_GROUP &&
+ (lock->l_req_mode != lock->l_granted_mode))) {
+ scan = 1;
+ compat = 0;
+ if (lock->l_req_mode != LCK_GROUP) {
+ /* Ok, we hit non-GROUP lock, there should be no
+ more GROUP locks later on, queue in front of
+ first non-GROUP lock */
+
+ ldlm_resource_insert_lock_after(lock, req);
+ cfs_list_del_init(&lock->l_res_link);
+ ldlm_resource_insert_lock_after(req, lock);
+ break;
+ }
+ if (req->l_policy_data.l_extent.gid ==
+ lock->l_policy_data.l_extent.gid) {
+ /* found it */
+ ldlm_resource_insert_lock_after(lock, req);
+ break;
+ }
continue;
+ }
- /* If we are trying to get a GROUP lock and there is
- another one of this kind, we need to compare gid */
- if (req->l_policy_data.l_extent.gid ==
- lock->l_policy_data.l_extent.gid) {
- /* If existing lock with matched gid is granted,
- we grant new one too. */
- if (lock->l_req_mode == lock->l_granted_mode)
- RETURN(2);
-
- /* Otherwise we are scanning queue of waiting
- * locks and it means current request would
- * block along with existing lock (that is
- * already blocked.
- * If we are in nonblocking mode - return
- * immediately */
+ if (unlikely(lock->l_req_mode == LCK_GROUP)) {
+ /* If compared lock is GROUP, then requested is PR/PW/
+ * so this is not compatible; extent range does not
+ * matter */
if (*flags & LDLM_FL_BLOCK_NOWAIT) {
compat = -EWOULDBLOCK;
goto destroylock;
+ } else {
+ *flags |= LDLM_FL_NO_TIMEOUT;
}
- /* If this group lock is compatible with another
- * group lock on the waiting list, they must be
- * together in the list, so they can be granted
- * at the same time. Otherwise the later lock
- * can get stuck behind another, incompatible,
- * lock. */
- ldlm_resource_insert_lock_after(lock, req);
- /* Because 'lock' is not granted, we can stop
- * processing this queue and return immediately.
- * There is no need to check the rest of the
- * list. */
- RETURN(0);
+ } else if (lock->l_policy_data.l_extent.end < req_start ||
+ lock->l_policy_data.l_extent.start > req_end) {
+ /* if a non group lock doesn't overlap skip it */
+ continue;
+ } else if (lock->l_req_extent.end < req_start ||
+ lock->l_req_extent.start > req_end) {
+ /* false contention, the requests doesn't really overlap */
+ check_contention = 0;
}
- }
- if (unlikely(req_mode == LCK_GROUP &&
- (lock->l_req_mode != lock->l_granted_mode))) {
- scan = 1;
- compat = 0;
- if (lock->l_req_mode != LCK_GROUP) {
- /* Ok, we hit non-GROUP lock, there should be no
- more GROUP locks later on, queue in front of
- first non-GROUP lock */
-
- ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
- ldlm_resource_insert_lock_after(req, lock);
+ if (!work_list)
RETURN(0);
- }
- if (req->l_policy_data.l_extent.gid ==
- lock->l_policy_data.l_extent.gid) {
- /* found it */
- ldlm_resource_insert_lock_after(lock, req);
- RETURN(0);
- }
- continue;
- }
- if (unlikely(lock->l_req_mode == LCK_GROUP)) {
- /* If compared lock is GROUP, then requested is PR/PW/
- * so this is not compatible; extent range does not
- * matter */
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
- compat = -EWOULDBLOCK;
- goto destroylock;
- } else {
- *flags |= LDLM_FL_NO_TIMEOUT;
- }
- } else if (lock->l_policy_data.l_extent.end < req_start ||
- lock->l_policy_data.l_extent.start > req_end) {
- /* if a non group lock doesn't overlap skip it */
- continue;
- }
+ /* don't count conflicting glimpse locks */
+ if (lock->l_req_mode == LCK_PR &&
+ lock->l_policy_data.l_extent.start == 0 &&
+ lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
+ check_contention = 0;
- if (!work_list)
- RETURN(0);
+ *contended_locks += check_contention;
- compat = 0;
- if (lock->l_blocking_ast)
- ldlm_add_ast_work_item(lock, req, work_list);
+ compat = 0;
+ if (lock->l_blocking_ast)
+ ldlm_add_ast_work_item(lock, req, work_list);
+ }
}
+ if (ldlm_check_contention(req, *contended_locks) &&
+ compat == 0 &&
+ (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
+ req->l_req_mode != LCK_GROUP &&
+ req_end - req_start <=
+ ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
+ GOTO(destroylock, compat = -EUSERS);
+
RETURN(compat);
destroylock:
- list_del_init(&req->l_res_link);
+ cfs_list_del_init(&req->l_res_link);
ldlm_lock_destroy_nolock(req);
*err = compat;
RETURN(compat);
}
+static void discard_bl_list(cfs_list_t *bl_list)
+{
+ cfs_list_t *tmp, *pos;
+ ENTRY;
+
+ cfs_list_for_each_safe(pos, tmp, bl_list) {
+ struct ldlm_lock *lock =
+ cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
+
+ cfs_list_del_init(&lock->l_bl_ast);
+ LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ lock->l_flags &= ~LDLM_FL_AST_SENT;
+ LASSERT(lock->l_bl_ast_run == 0);
+ LASSERT(lock->l_blocking_lock);
+ LDLM_LOCK_RELEASE(lock->l_blocking_lock);
+ lock->l_blocking_lock = NULL;
+ LDLM_LOCK_RELEASE(lock);
+ }
+ EXIT;
+}
+
/* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
* - blocking ASTs have already been sent
* - must call this function with the ns lock held
* - blocking ASTs have not been sent
* - must call this function with the ns lock held once */
int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
+ ldlm_error_t *err, cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
- struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
+ CFS_LIST_HEAD(rpc_list);
int rc, rc2;
+ int contended_locks = 0;
ENTRY;
- LASSERT(list_empty(&res->lr_converting));
+ LASSERT(cfs_list_empty(&res->lr_converting));
+ LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
+ !(lock->l_flags & LDLM_AST_DISCARD_DATA));
check_res_locked(res);
*err = ELDLM_OK;
* being true, we want to find out. */
LASSERT(*flags == 0);
rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
- err, NULL);
+ err, NULL, &contended_locks);
if (rc == 1) {
rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
- flags, err, NULL);
+ flags, err, NULL,
+ &contended_locks);
}
if (rc == 0)
RETURN(LDLM_ITER_STOP);
}
restart:
- rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err, &rpc_list);
+ contended_locks = 0;
+ rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
+ &rpc_list, &contended_locks);
if (rc < 0)
GOTO(out, rc); /* lock was destroyed */
if (rc == 2)
goto grant;
- rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err, &rpc_list);
+ rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
+ &rpc_list, &contended_locks);
if (rc2 < 0)
GOTO(out, rc = rc2); /* lock was destroyed */
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (list_empty(&lock->l_res_link))
+ if (cfs_list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
- rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
- lock_res(res);
+ rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
+ LDLM_WORK_BL_AST);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
+ !ns_is_client(ldlm_res_to_ns(res)))
+ class_fail_export(lock->l_export);
+ lock_res(res);
if (rc == -ERESTART) {
+
+ /* 15715: The lock was granted and destroyed after
+ * resource lock was dropped. Interval node was freed
+ * in ldlm_lock_destroy. Anyway, this always happens
+ * when a client is being evicted. So it would be
+ * ok to return an error. -jay */
+ if (lock->l_destroyed) {
+ *err = -EAGAIN;
+ GOTO(out, rc = -EAGAIN);
+ }
+
/* lock was granted while resource was unlocked. */
if (lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: if the lock has been granted,
* break earlier because otherwise, we will go
* to restart and ldlm_resource_unlink will be
* called and it causes the interval node to be
- * freed. Then we will fail at
+ * freed. Then we will fail at
* ldlm_extent_add_lock() */
*flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV |
LDLM_FL_BLOCK_WAIT);
*flags |= LDLM_FL_NO_TIMEOUT;
}
- rc = 0;
+ RETURN(0);
out:
+ if (!cfs_list_empty(&rpc_list)) {
+ LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
+ discard_bl_list(&rpc_list);
+ }
RETURN(rc);
}
+#endif /* HAVE_SERVER_SUPPORT */
/* When a lock is cancelled by a client, the KMS may undergo change if this
* is the "highest lock". This function returns the new KMS value.
- * Caller must hold ns_lock already.
+ * Caller must hold lr_lock already.
*
* NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
struct ldlm_resource *res = lock->l_resource;
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lck;
__u64 kms = 0;
ENTRY;
* calculation of the kms */
lock->l_flags |= LDLM_FL_KMS_IGNORE;
- list_for_each(tmp, &res->lr_granted) {
- lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (lck->l_flags & LDLM_FL_KMS_IGNORE)
continue;
RETURN(kms);
}
+EXPORT_SYMBOL(ldlm_extent_shift_kms);
cfs_mem_cache_t *ldlm_interval_slab;
struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
ENTRY;
LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
- OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, sizeof(*node));
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
if (node == NULL)
RETURN(NULL);
void ldlm_interval_free(struct ldlm_interval *node)
{
if (node) {
- LASSERT(list_empty(&node->li_group));
+ LASSERT(cfs_list_empty(&node->li_group));
+ LASSERT(!interval_is_intree(&node->li_node));
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
}
}
LASSERT(l->l_tree_node == NULL);
LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
- list_add_tail(&l->l_sl_policy, &n->li_group);
+ cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
l->l_tree_node = n;
}
if (n == NULL)
return NULL;
- LASSERT(!list_empty(&n->li_group));
+ LASSERT(!cfs_list_empty(&n->li_group));
l->l_tree_node = NULL;
- list_del_init(&l->l_sl_policy);
+ cfs_list_del_init(&l->l_sl_policy);
- return (list_empty(&n->li_group) ? n : NULL);
+ return (cfs_list_empty(&n->li_group) ? n : NULL);
}
static inline int lock_mode_to_index(ldlm_mode_t mode)
node = lock->l_tree_node;
LASSERT(node != NULL);
+ LASSERT(!interval_is_intree(&node->li_node));
idx = lock_mode_to_index(lock->l_granted_mode);
LASSERT(lock->l_granted_mode == 1 << idx);
void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
{
struct ldlm_resource *res = lock->l_resource;
- struct ldlm_interval *node;
+ struct ldlm_interval *node = lock->l_tree_node;
struct ldlm_interval_tree *tree;
int idx;
- if (lock->l_granted_mode != lock->l_req_mode)
+ if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
return;
- LASSERT(lock->l_tree_node != NULL);
idx = lock_mode_to_index(lock->l_granted_mode);
LASSERT(lock->l_granted_mode == 1 << idx);
tree = &res->lr_itree[idx];
ldlm_interval_free(node);
}
}
+
+void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_extent.start = wpolicy->l_extent.start;
+ lpolicy->l_extent.end = wpolicy->l_extent.end;
+ lpolicy->l_extent.gid = wpolicy->l_extent.gid;
+}
+
+void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_extent.start = lpolicy->l_extent.start;
+ wpolicy->l_extent.end = lpolicy->l_extent.end;
+ wpolicy->l_extent.gid = lpolicy->l_extent.gid;
+}
+