* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2013, Intel Corporation.
+ * Copyright (c) 2010, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LDLM
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-# include <linux/lustre_intent.h>
-#else
-# include <liblustre.h>
-#endif
-
+#include <libcfs/libcfs.h>
#include <obd_class.h>
#include "ldlm_internal.h"
[LDLM_FLOCK] = "FLK",
[LDLM_IBITS] = "IBT",
};
-EXPORT_SYMBOL(ldlm_typename);
-static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
+static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
[LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
[LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
-};
-
-static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local,
[LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
};
/**
* Converts lock policy from local format to on the wire lock_desc format
*/
-void ldlm_convert_policy_to_wire(ldlm_type_t type,
- const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_convert_policy_to_wire(enum ldlm_type type,
+ const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
- ldlm_policy_local_to_wire_t convert;
+ ldlm_policy_local_to_wire_t convert;
- convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
+ convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
- convert(lpolicy, wpolicy);
+ convert(lpolicy, wpolicy);
}
/**
* Converts lock policy from on the wire lock_desc format to local format
*/
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
+ const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
ldlm_policy_wire_to_local_t convert;
- int new_client;
- /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */
- new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
- if (new_client)
- convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
- else
- convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
+ convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
convert(wpolicy, lpolicy);
}
static ldlm_processing_policy ldlm_processing_policy_table[] = {
[LDLM_PLAIN] = ldlm_process_plain_lock,
[LDLM_EXTENT] = ldlm_process_extent_lock,
-# ifdef __KERNEL__
[LDLM_FLOCK] = ldlm_process_flock_lock,
-# endif
[LDLM_IBITS] = ldlm_process_inodebits_lock,
};
res = lock->l_resource;
LASSERT(ldlm_is_destroyed(lock));
- LASSERT(cfs_list_empty(&lock->l_res_link));
- LASSERT(cfs_list_empty(&lock->l_pending_chain));
+ LASSERT(list_empty(&lock->l_exp_list));
+ LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_pending_chain));
lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
LDLM_NSS_LOCKS);
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
{
int rc = 0;
- if (!cfs_list_empty(&lock->l_lru)) {
+ if (!list_empty(&lock->l_lru)) {
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- cfs_list_del_init(&lock->l_lru);
+ list_del_init(&lock->l_lru);
LASSERT(ns->ns_nr_unused > 0);
ns->ns_nr_unused--;
rc = 1;
/**
* Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ *
+ * If \a last_use is non-zero, it will remove the lock from LRU only if
+ * it matches lock's l_last_used.
+ *
+ * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
+ * doesn't match lock's l_last_used;
+ * otherwise, the lock hasn't been in the LRU list.
+ * \retval 1 the lock was in LRU list and removed.
*/
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, cfs_time_t last_use)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc;
+ int rc = 0;
ENTRY;
if (ldlm_is_ns_srv(lock)) {
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
RETURN(0);
}
spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
+ if (last_use == 0 || last_use == lock->l_last_used)
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
- EXIT;
- return rc;
+
+ RETURN(rc);
}
/**
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
lock->l_last_used = cfs_time_current();
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+ list_add_tail(&lock->l_lru, &ns->ns_unused_list);
ldlm_clear_skipped(lock);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
ENTRY;
if (ldlm_is_ns_srv(lock)) {
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
EXIT;
return;
}
spin_lock(&ns->ns_lock);
- if (!cfs_list_empty(&lock->l_lru)) {
+ if (!list_empty(&lock->l_lru)) {
ldlm_lock_remove_from_lru_nolock(lock);
ldlm_lock_add_to_lru_nolock(lock);
}
* ldlm_lock_destroy, you can never drop your final references on this lock.
* Because it's not in the hash table anymore. -phil
*/
-int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
+static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
{
ENTRY;
LBUG();
}
- if (!cfs_list_empty(&lock->l_res_link)) {
+ if (!list_empty(&lock->l_res_link)) {
LDLM_ERROR(lock, "lock still on resource");
LBUG();
}
if (ldlm_is_destroyed(lock)) {
- LASSERT(cfs_list_empty(&lock->l_lru));
+ LASSERT(list_empty(&lock->l_lru));
EXIT;
return 0;
}
OBD_SLAB_FREE(lock, ldlm_lock_slab, size);
}
-struct portals_handle_ops lock_handle_ops = {
+static struct portals_handle_ops lock_handle_ops = {
.hop_addref = lock_handle_addref,
.hop_free = lock_handle_free,
};
lu_ref_add(&resource->lr_reference, "lock", lock);
atomic_set(&lock->l_refc, 2);
- CFS_INIT_LIST_HEAD(&lock->l_res_link);
- CFS_INIT_LIST_HEAD(&lock->l_lru);
- CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
- CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
- CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
- CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
+ INIT_LIST_HEAD(&lock->l_res_link);
+ INIT_LIST_HEAD(&lock->l_lru);
+ INIT_LIST_HEAD(&lock->l_pending_chain);
+ INIT_LIST_HEAD(&lock->l_bl_ast);
+ INIT_LIST_HEAD(&lock->l_cp_ast);
+ INIT_LIST_HEAD(&lock->l_rk_ast);
init_waitqueue_head(&lock->l_waitq);
lock->l_blocking_lock = NULL;
- CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
- CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
- CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
- CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash);
+ INIT_LIST_HEAD(&lock->l_sl_mode);
+ INIT_LIST_HEAD(&lock->l_sl_policy);
+ INIT_HLIST_NODE(&lock->l_exp_hash);
+ INIT_HLIST_NODE(&lock->l_exp_flock_hash);
lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
LDLM_NSS_LOCKS);
- CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
+ INIT_LIST_HEAD(&lock->l_handle.h_link);
class_handle_hash(&lock->l_handle, &lock_handle_ops);
lu_ref_init(&lock->l_reference);
lock->l_callback_timeout = 0;
#if LUSTRE_TRACKS_LOCK_EXP_REFS
- CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link);
+ INIT_LIST_HEAD(&lock->l_exp_refs_link);
lock->l_exp_refs_nr = 0;
lock->l_exp_refs_target = NULL;
#endif
- CFS_INIT_LIST_HEAD(&lock->l_exp_list);
+ INIT_LIST_HEAD(&lock->l_exp_list);
RETURN(lock);
}
LASSERT(new_resid->name[0] != 0);
/* This function assumes that the lock isn't on any lists */
- LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_res_link));
type = oldres->lr_type;
unlock_res_and_lock(lock);
- newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
- if (newres == NULL)
- RETURN(-ENOMEM);
+ newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
+ if (IS_ERR(newres))
+ RETURN(PTR_ERR(newres));
lu_ref_add(&newres->lr_reference, "lock", lock);
/*
RETURN(0);
}
-EXPORT_SYMBOL(ldlm_lock_change_resource);
/** \defgroup ldlm_handles LDLM HANDLES
* Ways to get hold of locks without any addresses.
*/
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
{
- struct obd_export *exp = lock->l_export ?: lock->l_conn_export;
-
- /* INODEBITS_INTEROP: If the other side does not support
- * inodebits, reply with a plain lock descriptor. */
- if ((lock->l_resource->lr_type == LDLM_IBITS) &&
- (exp && !(exp_connect_flags(exp) & OBD_CONNECT_IBITS))) {
- /* Make sure all the right bits are set in this lock we
- are going to pass to client */
- LASSERTF(lock->l_policy_data.l_inodebits.bits ==
- (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LAYOUT),
- "Inappropriate inode lock bits during "
- "conversion " LPU64 "\n",
- lock->l_policy_data.l_inodebits.bits);
-
- ldlm_res2desc(lock->l_resource, &desc->l_resource);
- desc->l_resource.lr_type = LDLM_PLAIN;
-
- /* Convert "new" lock mode to something old client can
- understand */
- if ((lock->l_req_mode == LCK_CR) ||
- (lock->l_req_mode == LCK_CW))
- desc->l_req_mode = LCK_PR;
- else
- desc->l_req_mode = lock->l_req_mode;
- if ((lock->l_granted_mode == LCK_CR) ||
- (lock->l_granted_mode == LCK_CW)) {
- desc->l_granted_mode = LCK_PR;
- } else {
- /* We never grant PW/EX locks to clients */
- LASSERT((lock->l_granted_mode != LCK_PW) &&
- (lock->l_granted_mode != LCK_EX));
- desc->l_granted_mode = lock->l_granted_mode;
- }
-
- /* We do not copy policy here, because there is no
- policy for plain locks */
- } else {
- ldlm_res2desc(lock->l_resource, &desc->l_resource);
- desc->l_req_mode = lock->l_req_mode;
- desc->l_granted_mode = lock->l_granted_mode;
- ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
- &lock->l_policy_data,
- &desc->l_policy_data);
- }
+ ldlm_res2desc(lock->l_resource, &desc->l_resource);
+ desc->l_req_mode = lock->l_req_mode;
+ desc->l_granted_mode = lock->l_granted_mode;
+ ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
+ &lock->l_policy_data,
+ &desc->l_policy_data);
}
-EXPORT_SYMBOL(ldlm_lock2desc);
/**
* Add a lock to list of conflicting locks to send AST to.
*
* Only add if we have not sent a blocking AST to the lock yet.
*/
-void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- cfs_list_t *work_list)
+static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
+ struct list_head *work_list)
{
if (!ldlm_is_ast_sent(lock)) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
* discard dirty data, rather than writing back. */
if (ldlm_is_ast_discard_data(new))
ldlm_set_discard_data(lock);
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, work_list);
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
LASSERT(lock->l_blocking_lock == NULL);
lock->l_blocking_lock = LDLM_LOCK_GET(new);
/**
* Add a lock to list of just granted locks to send completion AST to.
*/
-void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
+static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
+ struct list_head *work_list)
{
if (!ldlm_is_cp_reqd(lock)) {
ldlm_set_cp_reqd(lock);
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
- LASSERT(cfs_list_empty(&lock->l_cp_ast));
- cfs_list_add(&lock->l_cp_ast, work_list);
+ LASSERT(list_empty(&lock->l_cp_ast));
+ list_add(&lock->l_cp_ast, work_list);
LDLM_LOCK_GET(lock);
}
}
* Must be called with lr_lock held.
*/
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
ENTRY;
check_res_locked(lock->l_resource);
* r/w reference type is determined by \a mode
* Calls ldlm_lock_addref_internal.
*/
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(struct lustre_handle *lockh, enum ldlm_mode mode)
{
- struct ldlm_lock *lock;
+ struct ldlm_lock *lock;
- lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
- ldlm_lock_addref_internal(lock, mode);
- LDLM_LOCK_PUT(lock);
+ lock = ldlm_handle2lock(lockh);
+ LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
+ ldlm_lock_addref_internal(lock, mode);
+ LDLM_LOCK_PUT(lock);
}
EXPORT_SYMBOL(ldlm_lock_addref);
* Removes lock from LRU if it is there.
* Assumes the LDLM lock is already locked.
*/
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
ldlm_lock_remove_from_lru(lock);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
*
* \retval -EAGAIN lock is being canceled.
*/
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock;
int result;
* Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
* Only called for local locks.
*/
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
- lock_res_and_lock(lock);
- ldlm_lock_addref_internal_nolock(lock, mode);
- unlock_res_and_lock(lock);
+ lock_res_and_lock(lock);
+ ldlm_lock_addref_internal_nolock(lock, mode);
+ unlock_res_and_lock(lock);
}
/**
* Does NOT add lock to LRU if no r/w references left to accomodate flock locks
* that cannot be placed in LRU.
*/
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
* on the namespace.
* For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
*/
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
struct ldlm_namespace *ns;
ENTRY;
ldlm_set_cbpending(lock);
}
- if (!lock->l_readers && !lock->l_writers &&
- ldlm_is_cbpending(lock)) {
- /* If we received a blocked AST and this was the last reference,
- * run the callback. */
+ if (!lock->l_readers && !lock->l_writers &&
+ (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
+ /* If we received a blocked AST and this was the last reference,
+ * run the callback.
+ * Group locks are special:
+ * They must not go in LRU, but they are not called back
+ * like non-group locks, instead they are manually released.
+ * They have an l_writers reference which they keep until
+ * they are manually released, so we remove them when they have
+ * no more reader or writer references. - LU-6368 */
if (ldlm_is_ns_srv(lock) && lock->l_export)
CERROR("FL_CBPENDING set on non-local lock--just a "
"warning\n");
/**
* Decrease reader/writer refcount for LDLM lock with handle \a lockh
*/
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
* \a lockh and mark it for subsequent cancellation once r/w refcount
* drops to zero instead of putting into LRU.
*
- * Typical usage is for GROUP locks which we cannot allow to be cached.
*/
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh,
+ enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
ENTRY;
EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
struct sl_insert_point {
- cfs_list_t *res_link;
- cfs_list_t *mode_link;
- cfs_list_t *policy_link;
+ struct list_head *res_link;
+ struct list_head *mode_link;
+ struct list_head *policy_link;
};
/**
* NOTE: called by
* - ldlm_grant_lock_with_skiplist
*/
-static void search_granted_lock(cfs_list_t *queue,
+static void search_granted_lock(struct list_head *queue,
struct ldlm_lock *req,
struct sl_insert_point *prev)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
struct ldlm_lock *lock, *mode_end, *policy_end;
ENTRY;
- cfs_list_for_each(tmp, queue) {
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each(tmp, queue) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
- mode_end = cfs_list_entry(lock->l_sl_mode.prev,
+ mode_end = list_entry(lock->l_sl_mode.prev,
struct ldlm_lock, l_sl_mode);
if (lock->l_req_mode != req->l_req_mode) {
} else if (lock->l_resource->lr_type == LDLM_IBITS) {
for (;;) {
policy_end =
- cfs_list_entry(lock->l_sl_policy.prev,
+ list_entry(lock->l_sl_policy.prev,
struct ldlm_lock,
l_sl_policy);
/* go to next policy group within mode group */
tmp = policy_end->l_res_link.next;
- lock = cfs_list_entry(tmp, struct ldlm_lock,
+ lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
} /* loop over policy groups within the mode group */
return;
}
- LASSERT(cfs_list_empty(&lock->l_res_link));
- LASSERT(cfs_list_empty(&lock->l_sl_mode));
- LASSERT(cfs_list_empty(&lock->l_sl_policy));
+ LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_sl_mode));
+ LASSERT(list_empty(&lock->l_sl_policy));
/*
* lock->link == prev->link means lock is first starting the group.
* Don't re-add to itself to suppress kernel warnings.
*/
if (&lock->l_res_link != prev->res_link)
- cfs_list_add(&lock->l_res_link, prev->res_link);
+ list_add(&lock->l_res_link, prev->res_link);
if (&lock->l_sl_mode != prev->mode_link)
- cfs_list_add(&lock->l_sl_mode, prev->mode_link);
+ list_add(&lock->l_sl_mode, prev->mode_link);
if (&lock->l_sl_policy != prev->policy_link)
- cfs_list_add(&lock->l_sl_policy, prev->policy_link);
+ list_add(&lock->l_sl_policy, prev->policy_link);
EXIT;
}
*
* must be called with lr_lock held
*/
-void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
+void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
ENTRY;
else
ldlm_resource_add_lock(res, &res->lr_granted, lock);
- if (lock->l_granted_mode < res->lr_most_restr)
- res->lr_most_restr = lock->l_granted_mode;
-
ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
EXIT;
}
/**
- * Search for a lock with given properties in a queue.
+ * Describe the overlap between two locks. itree_overlap_cb data.
+ */
+struct lock_match_data {
+ struct ldlm_lock *lmd_old;
+ struct ldlm_lock *lmd_lock;
+ enum ldlm_mode *lmd_mode;
+ union ldlm_policy_data *lmd_policy;
+ __u64 lmd_flags;
+ int lmd_unref;
+};
+
+/**
+ * Check if the given @lock meets the criteria for a match.
+ * A reference on the lock is taken if matched.
*
- * \retval a referenced lock or NULL. See the flag descriptions below, in the
- * comment above ldlm_lock_match
+ * \param lock test-against this lock
+ * \param data parameters
*/
-static struct ldlm_lock *search_queue(cfs_list_t *queue,
- ldlm_mode_t *mode,
- ldlm_policy_data_t *policy,
- struct ldlm_lock *old_lock,
- __u64 flags, int unref)
-{
- struct ldlm_lock *lock;
- cfs_list_t *tmp;
+static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
+{
+ union ldlm_policy_data *lpol = &lock->l_policy_data;
+ enum ldlm_mode match;
+
+ if (lock == data->lmd_old)
+ return INTERVAL_ITER_STOP;
+
+ /* Check if this lock can be matched.
+ * Used by LU-2919(exclusive open) for open lease lock */
+ if (ldlm_is_excl(lock))
+ return INTERVAL_ITER_CONT;
+
+ /* llite sometimes wants to match locks that will be
+ * canceled when their users drop, but we allow it to match
+ * if it passes in CBPENDING and the lock still has users.
+ * this is generally only going to be used by children
+ * whose parents already hold a lock so forward progress
+ * can still happen. */
+ if (ldlm_is_cbpending(lock) &&
+ !(data->lmd_flags & LDLM_FL_CBPENDING))
+ return INTERVAL_ITER_CONT;
+ if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
+ lock->l_readers == 0 && lock->l_writers == 0)
+ return INTERVAL_ITER_CONT;
+
+ if (!(lock->l_req_mode & *data->lmd_mode))
+ return INTERVAL_ITER_CONT;
+ match = lock->l_req_mode;
+
+ switch (lock->l_resource->lr_type) {
+ case LDLM_EXTENT:
+ if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
+ lpol->l_extent.end < data->lmd_policy->l_extent.end)
+ return INTERVAL_ITER_CONT;
+
+ if (unlikely(match == LCK_GROUP) &&
+ data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
+ lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
+ return INTERVAL_ITER_CONT;
+ break;
+ case LDLM_IBITS:
+ /* We match if we have existing lock with same or wider set
+ of bits. */
+ if ((lpol->l_inodebits.bits &
+ data->lmd_policy->l_inodebits.bits) !=
+ data->lmd_policy->l_inodebits.bits)
+ return INTERVAL_ITER_CONT;
+ break;
+ default:
+ ;
+ }
- cfs_list_for_each(tmp, queue) {
- ldlm_mode_t match;
+ /* We match if we have existing lock with same or wider set
+ of bits. */
+ if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
+ return INTERVAL_ITER_CONT;
- lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
+ return INTERVAL_ITER_CONT;
- if (lock == old_lock)
- break;
+ if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
+ LDLM_LOCK_GET(lock);
+ ldlm_lock_touch_in_lru(lock);
+ } else {
+ ldlm_lock_addref_internal_nolock(lock, match);
+ }
- /* Check if this lock can be matched.
- * Used by LU-2919(exclusive open) for open lease lock */
- if (ldlm_is_excl(lock))
- continue;
+ *data->lmd_mode = match;
+ data->lmd_lock = lock;
- /* llite sometimes wants to match locks that will be
- * canceled when their users drop, but we allow it to match
- * if it passes in CBPENDING and the lock still has users.
- * this is generally only going to be used by children
- * whose parents already hold a lock so forward progress
- * can still happen. */
- if (ldlm_is_cbpending(lock) &&
- !(flags & LDLM_FL_CBPENDING))
- continue;
- if (!unref && ldlm_is_cbpending(lock) &&
- lock->l_readers == 0 && lock->l_writers == 0)
- continue;
+ return INTERVAL_ITER_STOP;
+}
- if (!(lock->l_req_mode & *mode))
- continue;
- match = lock->l_req_mode;
+static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
+{
+ struct ldlm_interval *node = to_ldlm_interval(in);
+ struct lock_match_data *data = args;
+ struct ldlm_lock *lock;
+ int rc;
- if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_policy_data.l_extent.start >
- policy->l_extent.start ||
- lock->l_policy_data.l_extent.end < policy->l_extent.end))
- continue;
+ list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ rc = lock_matches(lock, data);
+ if (rc == INTERVAL_ITER_STOP)
+ return INTERVAL_ITER_STOP;
+ }
+ return INTERVAL_ITER_CONT;
+}
- if (unlikely(match == LCK_GROUP) &&
- lock->l_resource->lr_type == LDLM_EXTENT &&
- lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
- continue;
+/**
+ * Search for a lock with given parameters in interval trees.
+ *
+ * \param res search for a lock in this resource
+ * \param data parameters
+ *
+ * \retval a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_itree(struct ldlm_resource *res,
+ struct lock_match_data *data)
+{
+ struct interval_node_extent ext = {
+ .start = data->lmd_policy->l_extent.start,
+ .end = data->lmd_policy->l_extent.end
+ };
+ int idx;
- /* We match if we have existing lock with same or wider set
- of bits. */
- if (lock->l_resource->lr_type == LDLM_IBITS &&
- ((lock->l_policy_data.l_inodebits.bits &
- policy->l_inodebits.bits) !=
- policy->l_inodebits.bits))
- continue;
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ struct ldlm_interval_tree *tree = &res->lr_itree[idx];
- if (!unref && LDLM_HAVE_MASK(lock, GONE))
- continue;
+ if (tree->lit_root == NULL)
+ continue;
- if ((flags & LDLM_FL_LOCAL_ONLY) &&
- !ldlm_is_local(lock))
- continue;
+ if (!(tree->lit_mode & *data->lmd_mode))
+ continue;
- if (flags & LDLM_FL_TEST_LOCK) {
- LDLM_LOCK_GET(lock);
- ldlm_lock_touch_in_lru(lock);
- } else {
- ldlm_lock_addref_internal_nolock(lock, match);
- }
- *mode = match;
- return lock;
- }
+ interval_search(tree->lit_root, &ext,
+ itree_overlap_cb, data);
+ }
+ return data->lmd_lock;
+}
+
+
+/**
+ * Search for a lock with given properties in a queue.
+ *
+ * \param queue search for a lock in this queue
+ * \param data parameters
+ *
+ * \retval a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_queue(struct list_head *queue,
+ struct lock_match_data *data)
+{
+ struct ldlm_lock *lock;
+ int rc;
- return NULL;
+ list_for_each_entry(lock, queue, l_res_link) {
+ rc = lock_matches(lock, data);
+ if (rc == INTERVAL_ITER_STOP)
+ return data->lmd_lock;
+ }
+ return NULL;
}
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
ldlm_lock_fail_match_locked(lock);
unlock_res_and_lock(lock);
}
-EXPORT_SYMBOL(ldlm_lock_fail_match);
/**
* Mark lock as "matchable" by OST.
* keep caller code unchanged), the context failure will be discovered by
* caller sometime later.
*/
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *res_id, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh, int unref)
-{
- struct ldlm_resource *res;
- struct ldlm_lock *lock, *old_lock = NULL;
- int rc = 0;
- ENTRY;
+enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+ const struct ldlm_res_id *res_id,
+ enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
+ struct lustre_handle *lockh, int unref)
+{
+ struct lock_match_data data = {
+ .lmd_old = NULL,
+ .lmd_lock = NULL,
+ .lmd_mode = &mode,
+ .lmd_policy = policy,
+ .lmd_flags = flags,
+ .lmd_unref = unref,
+ };
+ struct ldlm_resource *res;
+ struct ldlm_lock *lock;
+ int rc = 0;
+ ENTRY;
- if (ns == NULL) {
- old_lock = ldlm_handle2lock(lockh);
- LASSERT(old_lock);
+ if (ns == NULL) {
+ data.lmd_old = ldlm_handle2lock(lockh);
+ LASSERT(data.lmd_old != NULL);
- ns = ldlm_lock_to_ns(old_lock);
- res_id = &old_lock->l_resource->lr_name;
- type = old_lock->l_resource->lr_type;
- mode = old_lock->l_req_mode;
- }
+ ns = ldlm_lock_to_ns(data.lmd_old);
+ res_id = &data.lmd_old->l_resource->lr_name;
+ type = data.lmd_old->l_resource->lr_type;
+ *data.lmd_mode = data.lmd_old->l_req_mode;
+ }
- res = ldlm_resource_get(ns, NULL, res_id, type, 0);
- if (res == NULL) {
- LASSERT(old_lock == NULL);
- RETURN(0);
- }
+ res = ldlm_resource_get(ns, NULL, res_id, type, 0);
+ if (IS_ERR(res)) {
+ LASSERT(data.lmd_old == NULL);
+ RETURN(0);
+ }
- LDLM_RESOURCE_ADDREF(res);
- lock_res(res);
+ LDLM_RESOURCE_ADDREF(res);
+ lock_res(res);
- lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
- flags, unref);
- if (lock != NULL)
- GOTO(out, rc = 1);
- if (flags & LDLM_FL_BLOCK_GRANTED)
- GOTO(out, rc = 0);
- lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
- flags, unref);
- if (lock != NULL)
- GOTO(out, rc = 1);
- lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
- flags, unref);
- if (lock != NULL)
- GOTO(out, rc = 1);
+ if (res->lr_type == LDLM_EXTENT)
+ lock = search_itree(res, &data);
+ else
+ lock = search_queue(&res->lr_granted, &data);
+ if (lock != NULL)
+ GOTO(out, rc = 1);
+ if (flags & LDLM_FL_BLOCK_GRANTED)
+ GOTO(out, rc = 0);
+ lock = search_queue(&res->lr_converting, &data);
+ if (lock != NULL)
+ GOTO(out, rc = 1);
+ lock = search_queue(&res->lr_waiting, &data);
+ if (lock != NULL)
+ GOTO(out, rc = 1);
EXIT;
out:
(type == LDLM_PLAIN || type == LDLM_IBITS) ?
res_id->name[3] : policy->l_extent.end);
}
- if (old_lock)
- LDLM_LOCK_PUT(old_lock);
+ if (data.lmd_old != NULL)
+ LDLM_LOCK_PUT(data.lmd_old);
- return rc ? mode : 0;
+ return rc ? mode : 0;
}
EXPORT_SYMBOL(ldlm_lock_match);
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
- __u64 *bits)
+enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits)
{
- struct ldlm_lock *lock;
- ldlm_mode_t mode = 0;
- ENTRY;
+ struct ldlm_lock *lock;
+ enum ldlm_mode mode = 0;
+ ENTRY;
- lock = ldlm_handle2lock(lockh);
- if (lock != NULL) {
- lock_res_and_lock(lock);
+ lock = ldlm_handle2lock(lockh);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
if (LDLM_HAVE_MASK(lock, GONE))
- GOTO(out, mode);
+ GOTO(out, mode);
if (ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
* Returns a referenced lock
*/
struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_type_t type,
- ldlm_mode_t mode,
- const struct ldlm_callback_suite *cbs,
+ const struct ldlm_res_id *res_id,
+ enum ldlm_type type,
+ enum ldlm_mode mode,
+ const struct ldlm_callback_suite *cbs,
void *data, __u32 lvb_len,
enum lvb_type lvb_type)
{
- struct ldlm_lock *lock;
- struct ldlm_resource *res;
- ENTRY;
-
- res = ldlm_resource_get(ns, NULL, res_id, type, 1);
- if (res == NULL)
- RETURN(NULL);
+ struct ldlm_lock *lock;
+ struct ldlm_resource *res;
+ int rc;
+ ENTRY;
- lock = ldlm_lock_new(res);
+ res = ldlm_resource_get(ns, NULL, res_id, type, 1);
+ if (IS_ERR(res))
+ RETURN(ERR_CAST(res));
- if (lock == NULL)
- RETURN(NULL);
+ lock = ldlm_lock_new(res);
+ if (lock == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
lock->l_req_mode = mode;
lock->l_ast_data = data;
lock->l_pid = current_pid();
if (ns_is_server(ns))
ldlm_set_ns_srv(lock);
- if (cbs) {
- lock->l_blocking_ast = cbs->lcs_blocking;
- lock->l_completion_ast = cbs->lcs_completion;
- lock->l_glimpse_ast = cbs->lcs_glimpse;
- }
-
- lock->l_tree_node = NULL;
- /* if this is the extent lock, allocate the interval tree node */
- if (type == LDLM_EXTENT) {
- if (ldlm_interval_alloc(lock) == NULL)
- GOTO(out, 0);
- }
+ if (cbs) {
+ lock->l_blocking_ast = cbs->lcs_blocking;
+ lock->l_completion_ast = cbs->lcs_completion;
+ lock->l_glimpse_ast = cbs->lcs_glimpse;
+ }
- if (lvb_len) {
- lock->l_lvb_len = lvb_len;
- OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
- if (lock->l_lvb_data == NULL)
- GOTO(out, 0);
- }
+ lock->l_tree_node = NULL;
+ /* if this is the extent lock, allocate the interval tree node */
+ if (type == LDLM_EXTENT)
+ if (ldlm_interval_alloc(lock) == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ if (lvb_len) {
+ lock->l_lvb_len = lvb_len;
+ OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
+ if (lock->l_lvb_data == NULL)
+ GOTO(out, rc = -ENOMEM);
+ }
lock->l_lvb_type = lvb_type;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
- GOTO(out, 0);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
+ GOTO(out, rc = -ENOENT);
- RETURN(lock);
+ RETURN(lock);
out:
- ldlm_lock_destroy(lock);
- LDLM_LOCK_RELEASE(lock);
- return NULL;
+ ldlm_lock_destroy(lock);
+ LDLM_LOCK_RELEASE(lock);
+ RETURN(ERR_PTR(rc));
}
/**
* set, skip all the enqueueing and delegate lock processing to intent policy
* function.
*/
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
- struct ldlm_lock **lockp,
- void *cookie, __u64 *flags)
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
+ struct ldlm_lock **lockp,
+ void *cookie, __u64 *flags)
{
- struct ldlm_lock *lock = *lockp;
- struct ldlm_resource *res = lock->l_resource;
- int local = ns_is_client(ldlm_res_to_ns(res));
+ struct ldlm_lock *lock = *lockp;
+ struct ldlm_resource *res = lock->l_resource;
+ int local = ns_is_client(ldlm_res_to_ns(res));
#ifdef HAVE_SERVER_SUPPORT
- ldlm_processing_policy policy;
+ ldlm_processing_policy policy;
#endif
- ldlm_error_t rc = ELDLM_OK;
- struct ldlm_interval *node = NULL;
- ENTRY;
+ enum ldlm_error rc = ELDLM_OK;
+ struct ldlm_interval *node = NULL;
+ ENTRY;
- lock->l_last_activity = cfs_time_current_sec();
/* policies are not executed on the client or during replay */
if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
&& !local && ns->ns_policy) {
}
}
+ if (*flags & LDLM_FL_RESENT) {
+ /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply.
+ * Set LOCK_CHANGED always.
+ * Check if the lock is granted for BLOCK_GRANTED.
+ * Take NO_TIMEOUT from the lock as it is inherited through
+ * LDLM_FL_INHERIT_MASK */
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ if (lock->l_req_mode != lock->l_granted_mode)
+ *flags |= LDLM_FL_BLOCK_GRANTED;
+ *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
+ RETURN(ELDLM_OK);
+ }
+
/* For a replaying lock, it might be already in granted list. So
* unlinking the lock will cause the interval node to be freed, we
* have to allocate the interval node early otherwise we can't regrant
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
* need to do anything else. */
- *flags &= ~(LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
+ *flags &= ~LDLM_FL_BLOCKED_MASK;
GOTO(out, rc = ELDLM_OK);
}
GOTO(out, rc = -ENOMEM);
}
- CFS_INIT_LIST_HEAD(&node->li_group);
+ INIT_LIST_HEAD(&node->li_group);
ldlm_interval_attach(node, lock);
node = NULL;
}
*
* Must be called with resource lock held.
*/
-int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
- cfs_list_t *work_list)
+int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
+ struct list_head *work_list)
{
- cfs_list_t *tmp, *pos;
- ldlm_processing_policy policy;
+ struct list_head *tmp, *pos;
+ ldlm_processing_policy policy;
__u64 flags;
- int rc = LDLM_ITER_CONTINUE;
- ldlm_error_t err;
- ENTRY;
+ int rc = LDLM_ITER_CONTINUE;
+ enum ldlm_error err;
+ ENTRY;
- check_res_locked(res);
+ check_res_locked(res);
- policy = ldlm_processing_policy_table[res->lr_type];
- LASSERT(policy);
+ policy = ldlm_processing_policy_table[res->lr_type];
+ LASSERT(policy);
+
+ list_for_each_safe(tmp, pos, queue) {
+ struct ldlm_lock *pending;
- cfs_list_for_each_safe(tmp, pos, queue) {
- struct ldlm_lock *pending;
- pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
+ pending = list_entry(tmp, struct ldlm_lock, l_res_link);
CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
struct ldlm_lock *lock;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
/* nobody should touch l_bl_ast */
lock_res_and_lock(lock);
- cfs_list_del_init(&lock->l_bl_ast);
+ list_del_init(&lock->l_bl_ast);
LASSERT(ldlm_is_ast_sent(lock));
LASSERT(lock->l_bl_ast_run == 0);
ldlm_completion_callback completion_callback;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
/* It's possible to receive a completion AST before we've set
* the l_completion_ast pointer: either because the AST arrived
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
- cfs_list_del_init(&lock->l_cp_ast);
+ list_del_init(&lock->l_cp_ast);
LASSERT(ldlm_is_cp_reqd(lock));
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225 */
struct ldlm_lock *lock;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- lock = cfs_list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
- cfs_list_del_init(&lock->l_rk_ast);
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
+ list_del_init(&lock->l_rk_ast);
/* the desc just pretend to exclusive */
ldlm_lock2desc(lock, &desc);
int rc = 0;
ENTRY;
- if (cfs_list_empty(arg->list))
+ if (list_empty(arg->list))
RETURN(-ENOENT);
- gl_work = cfs_list_entry(arg->list->next, struct ldlm_glimpse_work,
+ gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
gl_list);
- cfs_list_del_init(&gl_work->gl_list);
+ list_del_init(&gl_work->gl_list);
lock = gl_work->gl_lock;
* Used on server to send multiple ASTs together instead of sending one by
* one.
*/
-int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list,
+int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
ldlm_desc_ast_t ast_type)
{
struct ldlm_cb_set_arg *arg;
set_producer_func work_ast_lock;
int rc;
- if (cfs_list_empty(rpc_list))
+ if (list_empty(rpc_list))
RETURN(0);
OBD_ALLOC_PTR(arg);
return LDLM_ITER_CONTINUE;
}
-static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+ struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
int rc;
*/
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
{
- ENTRY;
+ ENTRY;
- if (ns != NULL) {
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_reprocess_res, NULL);
- }
- EXIT;
+ if (ns != NULL) {
+ cfs_hash_for_each_nolock(ns->ns_rs_hash,
+ ldlm_reprocess_res, NULL, 0);
+ }
+ EXIT;
}
-EXPORT_SYMBOL(ldlm_reprocess_all_ns);
/**
* Try to grant all waiting locks on a resource.
*/
void ldlm_reprocess_all(struct ldlm_resource *res)
{
- CFS_LIST_HEAD(rpc_list);
-
+ struct list_head rpc_list;
#ifdef HAVE_SERVER_SUPPORT
int rc;
ENTRY;
+
+ INIT_LIST_HEAD(&rpc_list);
/* Local lock trees don't get reprocessed. */
if (ns_is_client(ldlm_res_to_ns(res))) {
EXIT;
rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
LDLM_WORK_CP_AST);
if (rc == -ERESTART) {
- LASSERT(cfs_list_empty(&rpc_list));
+ LASSERT(list_empty(&rpc_list));
goto restart;
}
#else
ENTRY;
+
+ INIT_LIST_HEAD(&rpc_list);
if (!ns_is_client(ldlm_res_to_ns(res))) {
CERROR("This is client-side-only module, cannot handle "
"LDLM_NAMESPACE_SERVER resource type lock.\n");
req->l_resource->lr_type != LDLM_IBITS)
return;
- cfs_list_del_init(&req->l_sl_policy);
- cfs_list_del_init(&req->l_sl_mode);
+ list_del_init(&req->l_sl_policy);
+ list_del_init(&req->l_sl_mode);
}
/**
/* Releases cancel callback. */
ldlm_cancel_callback(lock);
- /* Yes, second time, just in case it was added again while we were
- * running with no res lock in ldlm_cancel_callback */
- if (ldlm_is_waited(lock))
- ldlm_del_waiting_lock(lock);
+ LASSERT(!ldlm_is_waited(lock));
ldlm_resource_unlink_lock(lock);
ldlm_lock_destroy_nolock(lock);
int ecl_loop;
};
+static void ldlm_cancel_lock_for_export(struct obd_export *exp,
+ struct ldlm_lock *lock,
+ struct export_cl_data *ecl)
+{
+ struct ldlm_resource *res;
+
+ res = ldlm_resource_getref(lock->l_resource);
+
+ ldlm_res_lvbo_update(res, NULL, 1);
+ ldlm_lock_cancel(lock);
+ if (!exp->exp_obd->obd_stopping)
+ ldlm_reprocess_all(res);
+ ldlm_resource_putref(res);
+
+ ecl->ecl_loop++;
+ if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
+ CDEBUG(D_INFO, "Export %p, %d locks cancelled.\n",
+ exp, ecl->ecl_loop);
+ }
+}
+
/**
- * Iterator function for ldlm_cancel_locks_for_export.
+ * Iterator function for ldlm_export_cancel_locks.
* Cancels passed locks.
*/
-int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+static int
+ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+ struct hlist_node *hnode, void *data)
{
struct export_cl_data *ecl = (struct export_cl_data *)data;
struct obd_export *exp = ecl->ecl_exp;
- struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
- struct ldlm_resource *res;
+ struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
- res = ldlm_resource_getref(lock->l_resource);
- LDLM_LOCK_GET(lock);
+ LDLM_LOCK_GET(lock);
+ ldlm_cancel_lock_for_export(exp, lock, ecl);
+ LDLM_LOCK_RELEASE(lock);
- LDLM_DEBUG(lock, "export %p", exp);
- ldlm_res_lvbo_update(res, NULL, 1);
- ldlm_lock_cancel(lock);
- ldlm_reprocess_all(res);
- ldlm_resource_putref(res);
- LDLM_LOCK_RELEASE(lock);
+ return 0;
+}
- ecl->ecl_loop++;
- if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
- CDEBUG(D_INFO,
- "Cancel lock %p for export %p (loop %d), still have "
- "%d locks left on hash table.\n",
- lock, exp, ecl->ecl_loop,
- atomic_read(&hs->hs_count));
+/**
+ * Cancel all blocked locks for given export.
+ *
+ * Typically called on client disconnection/eviction
+ */
+int ldlm_export_cancel_blocked_locks(struct obd_export *exp)
+{
+ struct export_cl_data ecl = {
+ .ecl_exp = exp,
+ .ecl_loop = 0,
+ };
+
+ while (!list_empty(&exp->exp_bl_list)) {
+ struct ldlm_lock *lock;
+
+ spin_lock_bh(&exp->exp_bl_list_lock);
+ if (!list_empty(&exp->exp_bl_list)) {
+ lock = list_entry(exp->exp_bl_list.next,
+ struct ldlm_lock, l_exp_list);
+ LDLM_LOCK_GET(lock);
+ list_del_init(&lock->l_exp_list);
+ } else {
+ lock = NULL;
+ }
+ spin_unlock_bh(&exp->exp_bl_list_lock);
+
+ if (lock == NULL)
+ break;
+
+ ldlm_cancel_lock_for_export(exp, lock, &ecl);
+ LDLM_LOCK_RELEASE(lock);
}
- return 0;
+ CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, "
+ "left on hash table %d.\n", exp, ecl.ecl_loop,
+ atomic_read(&exp->exp_lock_hash->hs_count));
+
+ return ecl.ecl_loop;
}
/**
* Cancel all locks for given export.
*
- * Typically called on client disconnection/eviction
+ * Typically called after client disconnection/eviction
*/
-void ldlm_cancel_locks_for_export(struct obd_export *exp)
+int ldlm_export_cancel_locks(struct obd_export *exp)
{
struct export_cl_data ecl = {
.ecl_exp = exp,
cfs_hash_for_each_empty(exp->exp_lock_hash,
ldlm_cancel_locks_for_export_cb, &ecl);
+
+ CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, "
+ "left on hash table %d.\n", exp, ecl.ecl_loop,
+ atomic_read(&exp->exp_lock_hash->hs_count));
+
+ return ecl.ecl_loop;
}
/**
* \param lock A lock to convert
* \param new_mode new lock mode
*/
-void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
+void ldlm_lock_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode)
{
ENTRY;
* optimizations could take advantage of it to avoid discarding cached
* pages on a file.
*/
-struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
- __u32 *flags)
+struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock,
+ enum ldlm_mode new_mode, __u32 *flags)
{
- CFS_LIST_HEAD(rpc_list);
+ struct list_head rpc_list;
struct ldlm_resource *res;
struct ldlm_namespace *ns;
int granted = 0;
struct ldlm_interval *node;
ENTRY;
+ INIT_LIST_HEAD(&rpc_list);
/* Just return if mode is unchanged. */
if (new_mode == lock->l_granted_mode) {
*flags |= LDLM_FL_BLOCK_GRANTED;
/* FIXME: ugly code, I have to attach the lock to a
* interval node again since perhaps it will be granted
* soon */
- CFS_INIT_LIST_HEAD(&node->li_group);
+ INIT_LIST_HEAD(&node->li_group);
ldlm_interval_attach(node, lock);
node = NULL;
}
lock->l_completion_ast(lock, 0, NULL);
}
#ifdef HAVE_SERVER_SUPPORT
- } else {
- int rc;
- ldlm_error_t err;
+ } else {
+ int rc;
+ enum ldlm_error err;
__u64 pflags = 0;
- ldlm_processing_policy policy;
+ ldlm_processing_policy policy;
+
policy = ldlm_processing_policy_table[res->lr_type];
rc = policy(lock, &pflags, 0, &err, &rpc_list);
if (rc == LDLM_ITER_STOP) {
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
RETURN(res);
}
-EXPORT_SYMBOL(ldlm_lock_convert);
/**
* Print lock with lock handle \a lockh description into debug log.