-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "ldlm_internal.h"
-#define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
+#ifdef HAVE_SERVER_SUPPORT
+# define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
/* fixup the ldlm_extent after expanding */
static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
}
/* we need to ensure that the lock extent is properly aligned to what
- * the client requested. We align it to the lowest-common denominator
- * of the clients requested lock start and end alignment. */
- mask = 0x1000ULL;
+ * the client requested. Also we need to make sure it's also server
+ * page size aligned otherwise a server page can be covered by two
+ * write locks. */
+ mask = CFS_PAGE_SIZE;
req_align = (req_end + 1) | req_start;
- if (req_align != 0) {
+ if (req_align != 0 && (req_align & (mask - 1)) == 0) {
while ((req_align & mask) == 0)
mask <<= 1;
}
ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
struct ldlm_extent *new_ex)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_resource *res = req->l_resource;
ldlm_mode_t req_mode = req->l_req_mode;
__u64 req_start = req->l_req_extent.start;
lockmode_verify(req_mode);
/* for waiting locks */
- list_for_each(tmp, &res->lr_waiting) {
+ cfs_list_for_each(tmp, &res->lr_waiting) {
struct ldlm_lock *lock;
struct ldlm_extent *l_extent;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
l_extent = &lock->l_policy_data.l_extent;
/* We already hit the minimum requested size, search no more */
return 1;
CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
- if (contended_locks > res->lr_namespace->ns_contended_locks)
+ if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
res->lr_contention_time = now;
return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
- cfs_time_seconds(res->lr_namespace->ns_contention_time)));
+ cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
}
struct ldlm_extent_compat_args {
- struct list_head *work_list;
+ cfs_list_t *work_list;
struct ldlm_lock *lock;
ldlm_mode_t mode;
int *locks;
struct ldlm_extent_compat_args *priv = data;
struct ldlm_interval *node = to_ldlm_interval(n);
struct ldlm_extent *extent;
- struct list_head *work_list = priv->work_list;
+ cfs_list_t *work_list = priv->work_list;
struct ldlm_lock *lock, *enq = priv->lock;
ldlm_mode_t mode = priv->mode;
int count = 0;
ENTRY;
- LASSERT(!list_empty(&node->li_group));
+ LASSERT(!cfs_list_empty(&node->li_group));
- list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
/* interval tree is for granted lock */
LASSERTF(mode == lock->l_granted_mode,
"mode = %s, lock->l_granted_mode = %s\n",
* negative error, such as EWOULDBLOCK for group locks
*/
static int
-ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
int *flags, ldlm_error_t *err,
- struct list_head *work_list, int *contended_locks)
+ cfs_list_t *work_list, int *contended_locks)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lock;
struct ldlm_resource *res = req->l_resource;
ldlm_mode_t req_mode = req->l_req_mode;
} else {
interval_search(tree->lit_root, &ex,
ldlm_extent_compat_cb, &data);
- if (!list_empty(work_list) && compat)
+ if (!cfs_list_empty(work_list) && compat)
compat = 0;
}
}
} else { /* for waiting queue */
- list_for_each(tmp, queue) {
+ cfs_list_for_each(tmp, queue) {
check_contention = 1;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (req == lock)
break;
* front of first non-GROUP lock */
ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
+ cfs_list_del_init(&lock->l_res_link);
ldlm_resource_insert_lock_after(req, lock);
compat = 0;
break;
first non-GROUP lock */
ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
+ cfs_list_del_init(&lock->l_res_link);
ldlm_resource_insert_lock_after(req, lock);
break;
}
(*flags & LDLM_FL_DENY_ON_CONTENTION) &&
req->l_req_mode != LCK_GROUP &&
req_end - req_start <=
- req->l_resource->lr_namespace->ns_max_nolock_size)
+ ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
GOTO(destroylock, compat = -EUSERS);
RETURN(compat);
destroylock:
- list_del_init(&req->l_res_link);
+ cfs_list_del_init(&req->l_res_link);
ldlm_lock_destroy_nolock(req);
*err = compat;
RETURN(compat);
}
-static void discard_bl_list(struct list_head *bl_list)
+static void discard_bl_list(cfs_list_t *bl_list)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
ENTRY;
- list_for_each_safe(pos, tmp, bl_list) {
+ cfs_list_for_each_safe(pos, tmp, bl_list) {
struct ldlm_lock *lock =
- list_entry(pos, struct ldlm_lock, l_bl_ast);
+ cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
- list_del_init(&lock->l_bl_ast);
+ cfs_list_del_init(&lock->l_bl_ast);
LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
lock->l_flags &= ~LDLM_FL_AST_SENT;
LASSERT(lock->l_bl_ast_run == 0);
* - blocking ASTs have not been sent
* - must call this function with the ns lock held once */
int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
+ ldlm_error_t *err, cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
CFS_LIST_HEAD(rpc_list);
int contended_locks = 0;
ENTRY;
- LASSERT(list_empty(&res->lr_converting));
+ LASSERT(cfs_list_empty(&res->lr_converting));
LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
!(lock->l_flags & LDLM_AST_DISCARD_DATA));
check_res_locked(res);
* bug 2322: we used to unlink and re-add here, which was a
* terrible folly -- if we goto restart, we could get
* re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (list_empty(&lock->l_res_link))
+ if (cfs_list_empty(&lock->l_res_link))
ldlm_resource_add_lock(res, &res->lr_waiting, lock);
unlock_res(res);
- rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
+ rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
+ LDLM_WORK_BL_AST);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
- !ns_is_client(res->lr_namespace))
+ !ns_is_client(ldlm_res_to_ns(res)))
class_fail_export(lock->l_export);
-
+
lock_res(res);
if (rc == -ERESTART) {
}
RETURN(0);
out:
- if (!list_empty(&rpc_list)) {
+ if (!cfs_list_empty(&rpc_list)) {
LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
discard_bl_list(&rpc_list);
}
RETURN(rc);
}
+#endif /* HAVE_SERVER_SUPPORT */
/* When a lock is cancelled by a client, the KMS may undergo change if this
* is the "highest lock". This function returns the new KMS value.
- * Caller must hold ns_lock already.
+ * Caller must hold lr_lock already.
*
* NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
struct ldlm_resource *res = lock->l_resource;
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lck;
__u64 kms = 0;
ENTRY;
* calculation of the kms */
lock->l_flags |= LDLM_FL_KMS_IGNORE;
- list_for_each(tmp, &res->lr_granted) {
- lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (lck->l_flags & LDLM_FL_KMS_IGNORE)
continue;
RETURN(kms);
}
+EXPORT_SYMBOL(ldlm_extent_shift_kms);
cfs_mem_cache_t *ldlm_interval_slab;
struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
void ldlm_interval_free(struct ldlm_interval *node)
{
if (node) {
- LASSERT(list_empty(&node->li_group));
+ LASSERT(cfs_list_empty(&node->li_group));
LASSERT(!interval_is_intree(&node->li_node));
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
}
LASSERT(l->l_tree_node == NULL);
LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
- list_add_tail(&l->l_sl_policy, &n->li_group);
+ cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
l->l_tree_node = n;
}
if (n == NULL)
return NULL;
- LASSERT(!list_empty(&n->li_group));
+ LASSERT(!cfs_list_empty(&n->li_group));
l->l_tree_node = NULL;
- list_del_init(&l->l_sl_policy);
+ cfs_list_del_init(&l->l_sl_policy);
- return (list_empty(&n->li_group) ? n : NULL);
+ return (cfs_list_empty(&n->li_group) ? n : NULL);
}
static inline int lock_mode_to_index(ldlm_mode_t mode)
ldlm_interval_free(node);
}
}
+
+void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_extent.start = wpolicy->l_extent.start;
+ lpolicy->l_extent.end = wpolicy->l_extent.end;
+ lpolicy->l_extent.gid = wpolicy->l_extent.gid;
+}
+
+void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_extent.start = lpolicy->l_extent.start;
+ wpolicy->l_extent.end = lpolicy->l_extent.end;
+ wpolicy->l_extent.gid = lpolicy->l_extent.gid;
+}
+