*/
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
{
- atomic_inc(&lock->l_refc);
+ refcount_inc(&lock->l_handle.h_ref);
return lock;
}
EXPORT_SYMBOL(ldlm_lock_get);
+static void lock_handle_free(struct rcu_head *rcu)
+{
+ struct ldlm_lock *lock = container_of(rcu, struct ldlm_lock,
+ l_handle.h_rcu);
+
+ OBD_FREE_PRE(lock, sizeof(*lock), "slab-freed");
+ kmem_cache_free(ldlm_lock_slab, lock);
+}
+
/**
* Release lock reference.
*
ENTRY;
LASSERT(lock->l_resource != LP_POISON);
- LASSERT(atomic_read(&lock->l_refc) > 0);
- if (atomic_dec_and_test(&lock->l_refc)) {
+ LASSERT(refcount_read(&lock->l_handle.h_ref) > 0);
+ if (refcount_dec_and_test(&lock->l_handle.h_ref)) {
struct ldlm_resource *res;
LDLM_DEBUG(lock,
ldlm_resource_putref(res);
lock->l_resource = NULL;
lu_ref_fini(&lock->l_reference);
- OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
+ call_rcu(&lock->l_handle.h_rcu, lock_handle_free);
}
EXIT;
EXIT;
}
-/* this is called by portals_handle2object with the handle lock taken */
-static void lock_handle_addref(void *lock)
-{
- LDLM_LOCK_GET((struct ldlm_lock *)lock);
-}
-
-static void lock_handle_free(void *lock, int size)
-{
- LASSERT(size == sizeof(struct ldlm_lock));
- OBD_SLAB_FREE(lock, ldlm_lock_slab, size);
-}
-
-static struct portals_handle_ops lock_handle_ops = {
- .hop_addref = lock_handle_addref,
- .hop_free = lock_handle_free,
-};
+static const char lock_handle_owner[] = "ldlm";
/**
*
lock->l_resource = resource;
lu_ref_add(&resource->lr_reference, "lock", lock);
- atomic_set(&lock->l_refc, 2);
+ refcount_set(&lock->l_handle.h_ref, 2);
INIT_LIST_HEAD(&lock->l_res_link);
INIT_LIST_HEAD(&lock->l_lru);
INIT_LIST_HEAD(&lock->l_pending_chain);
lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
LDLM_NSS_LOCKS);
- INIT_LIST_HEAD_RCU(&lock->l_handle.h_link);
- class_handle_hash(&lock->l_handle, &lock_handle_ops);
+ INIT_HLIST_NODE(&lock->l_handle.h_link);
+ class_handle_hash(&lock->l_handle, lock_handle_owner);
lu_ref_init(&lock->l_reference);
lu_ref_add(&lock->l_reference, "hash", lock);
LASSERT(handle);
- lock = class_handle2object(handle->cookie, &lock_handle_ops);
+ if (!lustre_handle_is_used(handle))
+ RETURN(NULL);
+
+ lock = class_handle2object(handle->cookie, lock_handle_owner);
if (lock == NULL)
RETURN(NULL);
prev->mode_link = &req->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
EXIT;
- return;
}
/**
(!ldlm_is_lvb_ready(lock))) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
- struct l_wait_info lwi;
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
GOTO(out_fail_match, matched = 0);
}
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
- NULL, LWI_ON_SIGNAL_NOOP, NULL);
+ wait_event_idle_timeout(
+ lock->l_waitq,
+ lock->l_flags & wait_flags,
+ cfs_time_seconds(obd_timeout));
- /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
- l_wait_event(lock->l_waitq, lock->l_flags & wait_flags,
- &lwi);
if (!ldlm_is_lvb_ready(lock))
GOTO(out_fail_match, matched = 0);
}
{
struct ldlm_resource *res = lock->l_resource;
enum ldlm_error rc = ELDLM_OK;
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ LIST_HEAD(rpc_list);
ldlm_processing_policy policy;
ENTRY;
int local = ns_is_client(ldlm_res_to_ns(res));
enum ldlm_error rc = ELDLM_OK;
struct ldlm_interval *node = NULL;
+#ifdef HAVE_SERVER_SUPPORT
+ bool reconstruct = false;
+#endif
ENTRY;
/* policies are not executed on the client or during replay */
if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
+#ifdef HAVE_SERVER_SUPPORT
+ reconstruct = !local && res->lr_type == LDLM_FLOCK &&
+ !(*flags & LDLM_FL_TEST_LOCK);
+ if (reconstruct) {
+ rc = req_can_reconstruct(cookie, NULL);
+ if (rc != 0) {
+ if (rc == 1)
+ rc = 0;
+ RETURN(rc);
+ }
+ }
+#endif
+
lock_res_and_lock(lock);
if (local && ldlm_is_granted(lock)) {
/* The server returned a blocked lock, but it was granted
out:
unlock_res_and_lock(lock);
+
+#ifdef HAVE_SERVER_SUPPORT
+ if (reconstruct) {
+ struct ptlrpc_request *req = cookie;
+
+ tgt_mk_reply_data(NULL, NULL,
+ &req->rq_export->exp_target_data,
+ req, 0, NULL, false, 0);
+ }
+#endif
if (node)
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
return rc;
__u64 flags;
int rc = LDLM_ITER_CONTINUE;
enum ldlm_error err;
- struct list_head bl_ast_list = LIST_HEAD_INIT(bl_ast_list);
+ LIST_HEAD(bl_ast_list);
ENTRY;
restart:
list_for_each_safe(tmp, pos, queue) {
struct ldlm_lock *pending;
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ LIST_HEAD(rpc_list);
pending = list_entry(tmp, struct ldlm_lock, l_res_link);
enum ldlm_process_intention intention,
struct ldlm_lock *hint)
{
- struct list_head rpc_list;
+ LIST_HEAD(rpc_list);
#ifdef HAVE_SERVER_SUPPORT
ldlm_reprocessing_policy reprocess;
struct obd_device *obd;
ENTRY;
- INIT_LIST_HEAD(&rpc_list);
/* Local lock trees don't get reprocessed. */
if (ns_is_client(ldlm_res_to_ns(res))) {
EXIT;
#else
ENTRY;
- INIT_LIST_HEAD(&rpc_list);
if (!ns_is_client(ldlm_res_to_ns(res))) {
CERROR("This is client-side-only module, cannot handle "
"LDLM_NAMESPACE_SERVER resource type lock.\n");
ldlm_set_bl_done(lock);
wake_up_all(&lock->l_waitq);
} else if (!ldlm_is_bl_done(lock)) {
- struct l_wait_info lwi = { 0 };
-
/* The lock is guaranteed to have been canceled once
* returning from this function. */
unlock_res_and_lock(lock);
- l_wait_event(lock->l_waitq, is_bl_done(lock), &lwi);
+ wait_event_idle(lock->l_waitq, is_bl_done(lock));
lock_res_and_lock(lock);
}
}
&vaf,
lock,
lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
va_end(args);
&vaf,
ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_req_extent.start, lock->l_req_extent.end,
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
break;
&vaf,
ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_policy_data.l_flock.end,
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
&vaf,
ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
ldlm_typename[resource->lr_type],
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
break;
&vaf,
ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
+ refcount_read(&lock->l_handle.h_ref),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
ldlm_typename[resource->lr_type],
lock->l_flags, nid,
lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
+ exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
break;