*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
/** \defgroup LDLM Lustre Distributed Lock Manager
*/
struct portals_handle l_handle;
/**
- * Internal spinlock protects l_resource. We should hold this lock
- * first before taking res_lock.
- */
- spinlock_t l_lock;
- /**
* Pointer to actual resource this lock is in.
- * ldlm_lock_change_resource() can change this.
+ * ldlm_lock_change_resource() can change this on the client.
+ * When this is possible, rcu must be used to stablise
+ * the resource while we lock and check it hasn't been changed.
*/
struct ldlm_resource *l_resource;
/**
/**
* List item for list in namespace hash.
- * protected by ns_lock
+ * protected by ns_lock.
+ * Shared with linkage for RCU-delayed free.
*/
- struct hlist_node lr_hash;
+ union {
+ struct hlist_node lr_hash;
+ struct rcu_head lr_rcu;
+ };
/** Reference count for this resource */
atomic_t lr_refcount;
/* init lvb now if not already */
rc = ldlm_lvbo_init(lock->l_resource);
if (rc < 0) {
- CERROR("lock %p: delayed lvb init failed (rc %d)",
+ CERROR("lock %p: delayed lvb init failed (rc %d)\n",
lock, rc);
return rc;
}
struct list_head *queue,
struct list_head *work_list,
enum ldlm_process_intention intention,
- struct ldlm_lock *hint);
+ __u64 hint);
/**
* Return values for lock iterators.
__u64 *bits);
void ldlm_lock_mode_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode);
void ldlm_lock_cancel(struct ldlm_lock *lock);
-void ldlm_reprocess_all(struct ldlm_resource *res, struct ldlm_lock *hint);
+void ldlm_reprocess_all(struct ldlm_resource *res, __u64 hint);
void ldlm_reprocess_recovery_done(struct ldlm_namespace *ns);
void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);