*/
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
{
- atomic_inc(&lock->l_refc);
+ cfs_atomic_inc(&lock->l_refc);
return lock;
}
ENTRY;
LASSERT(lock->l_resource != LP_POISON);
- LASSERT(atomic_read(&lock->l_refc) > 0);
- if (atomic_dec_and_test(&lock->l_refc)) {
+ LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
+ if (cfs_atomic_dec_and_test(&lock->l_refc)) {
struct ldlm_resource *res;
LDLM_DEBUG(lock,
res = lock->l_resource;
LASSERT(lock->l_destroyed);
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_pending_chain));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_pending_chain));
- atomic_dec(&res->lr_namespace->ns_locks);
+ cfs_atomic_dec(&res->lr_namespace->ns_locks);
lu_ref_del(&res->lr_reference, "lock", lock);
ldlm_resource_putref(res);
lock->l_resource = NULL;
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
{
int rc = 0;
- if (!list_empty(&lock->l_lru)) {
+ if (!cfs_list_empty(&lock->l_lru)) {
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_del_init(&lock->l_lru);
+ cfs_list_del_init(&lock->l_lru);
LASSERT(ns->ns_nr_unused > 0);
ns->ns_nr_unused--;
rc = 1;
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
int rc;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
rc = ldlm_lock_remove_from_lru_nolock(lock);
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
EXIT;
return rc;
}
{
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
lock->l_last_used = cfs_time_current();
- LASSERT(list_empty(&lock->l_lru));
+ LASSERT(cfs_list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+ cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
}
{
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
ldlm_lock_add_to_lru_nolock(lock);
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
EXIT;
}
{
struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
- if (!list_empty(&lock->l_lru)) {
+ cfs_spin_lock(&ns->ns_unused_lock);
+ if (!cfs_list_empty(&lock->l_lru)) {
ldlm_lock_remove_from_lru_nolock(lock);
ldlm_lock_add_to_lru_nolock(lock);
}
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
EXIT;
}
LBUG();
}
- if (!list_empty(&lock->l_res_link)) {
+ if (!cfs_list_empty(&lock->l_res_link)) {
LDLM_ERROR(lock, "lock still on resource");
ldlm_lock_dump(D_ERROR, lock, 0);
LBUG();
}
if (lock->l_destroyed) {
- LASSERT(list_empty(&lock->l_lru));
+ LASSERT(cfs_list_empty(&lock->l_lru));
EXIT;
return 0;
}
lock->l_destroyed = 1;
if (lock->l_export && lock->l_export->exp_lock_hash &&
- !hlist_unhashed(&lock->l_exp_hash))
+ !cfs_hlist_unhashed(&lock->l_exp_hash))
cfs_hash_del(lock->l_export->exp_lock_hash,
&lock->l_remote_handle, &lock->l_exp_hash);
if (lock == NULL)
RETURN(NULL);
- spin_lock_init(&lock->l_lock);
+ cfs_spin_lock_init(&lock->l_lock);
lock->l_resource = ldlm_resource_getref(resource);
lu_ref_add(&resource->lr_reference, "lock", lock);
- atomic_set(&lock->l_refc, 2);
+ cfs_atomic_set(&lock->l_refc, 2);
CFS_INIT_LIST_HEAD(&lock->l_res_link);
CFS_INIT_LIST_HEAD(&lock->l_lru);
CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
- atomic_inc(&resource->lr_namespace->ns_locks);
+ cfs_atomic_inc(&resource->lr_namespace->ns_locks);
CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
class_handle_hash(&lock->l_handle, lock_handle_addref);
CFS_INIT_LIST_HEAD(&lock->l_extents_list);
- spin_lock_init(&lock->l_extents_list_lock);
+ cfs_spin_lock_init(&lock->l_extents_list_lock);
CFS_INIT_LIST_HEAD(&lock->l_cache_locks_list);
lu_ref_init(&lock->l_reference);
lu_ref_add(&lock->l_reference, "hash", lock);
LASSERT(new_resid->name[0] != 0);
/* This function assumes that the lock isn't on any lists */
- LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
type = oldres->lr_type;
unlock_res_and_lock(lock);
* lock->l_lock, and are taken in the memory address order to avoid
* dead-locks.
*/
- spin_lock(&lock->l_lock);
+ cfs_spin_lock(&lock->l_lock);
oldres = lock->l_resource;
if (oldres < newres) {
lock_res(oldres);
}
void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list)
+ cfs_list_t *work_list)
{
if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
* discard dirty data, rather than writing back. */
if (new->l_flags & LDLM_AST_DISCARD_DATA)
lock->l_flags |= LDLM_FL_DISCARD_DATA;
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, work_list);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
LASSERT(lock->l_blocking_lock == NULL);
lock->l_blocking_lock = LDLM_LOCK_GET(new);
}
}
-void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
+void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
{
if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
lock->l_flags |= LDLM_FL_CP_REQD;
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
- LASSERT(list_empty(&lock->l_cp_ast));
- list_add(&lock->l_cp_ast, work_list);
+ LASSERT(cfs_list_empty(&lock->l_cp_ast));
+ cfs_list_add(&lock->l_cp_ast, work_list);
LDLM_LOCK_GET(lock);
}
}
/* must be called with lr_lock held */
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list)
+ cfs_list_t *work_list)
{
ENTRY;
check_res_locked(lock->l_resource);
}
struct sl_insert_point {
- struct list_head *res_link;
- struct list_head *mode_link;
- struct list_head *policy_link;
+ cfs_list_t *res_link;
+ cfs_list_t *mode_link;
+ cfs_list_t *policy_link;
};
/*
* NOTE: called by
* - ldlm_grant_lock_with_skiplist
*/
-static void search_granted_lock(struct list_head *queue,
+static void search_granted_lock(cfs_list_t *queue,
struct ldlm_lock *req,
struct sl_insert_point *prev)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
struct ldlm_lock *lock, *mode_end, *policy_end;
ENTRY;
- list_for_each(tmp, queue) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, queue) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
- mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
- l_sl_mode);
+ mode_end = cfs_list_entry(lock->l_sl_mode.prev,
+ struct ldlm_lock, l_sl_mode);
if (lock->l_req_mode != req->l_req_mode) {
/* jump to last lock of mode group */
return;
} else if (lock->l_resource->lr_type == LDLM_IBITS) {
for (;;) {
- policy_end = list_entry(lock->l_sl_policy.prev,
- struct ldlm_lock,
- l_sl_policy);
+ policy_end =
+ cfs_list_entry(lock->l_sl_policy.prev,
+ struct ldlm_lock,
+ l_sl_policy);
if (lock->l_policy_data.l_inodebits.bits ==
req->l_policy_data.l_inodebits.bits) {
/* go to next policy group within mode group */
tmp = policy_end->l_res_link.next;
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
return;
}
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_sl_mode));
- LASSERT(list_empty(&lock->l_sl_policy));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_sl_mode));
+ LASSERT(cfs_list_empty(&lock->l_sl_policy));
- list_add(&lock->l_res_link, prev->res_link);
- list_add(&lock->l_sl_mode, prev->mode_link);
- list_add(&lock->l_sl_policy, prev->policy_link);
+ cfs_list_add(&lock->l_res_link, prev->res_link);
+ cfs_list_add(&lock->l_sl_mode, prev->mode_link);
+ cfs_list_add(&lock->l_sl_policy, prev->policy_link);
EXIT;
}
*
* must be called with lr_lock held
*/
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
+void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
ENTRY;
/* returns a referenced lock or NULL. See the flag descriptions below, in the
* comment above ldlm_lock_match */
-static struct ldlm_lock *search_queue(struct list_head *queue,
+static struct ldlm_lock *search_queue(cfs_list_t *queue,
ldlm_mode_t *mode,
ldlm_policy_data_t *policy,
struct ldlm_lock *old_lock,
int flags, int unref)
{
struct ldlm_lock *lock;
- struct list_head *tmp;
+ cfs_list_t *tmp;
- list_for_each(tmp, queue) {
+ cfs_list_for_each(tmp, queue) {
ldlm_mode_t match;
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (lock == old_lock)
break;
}
/* Must be called with namespace taken: queue is waiting or converting. */
-int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
- struct list_head *work_list)
+int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
+ cfs_list_t *work_list)
{
- struct list_head *tmp, *pos;
+ cfs_list_t *tmp, *pos;
ldlm_processing_policy policy;
int flags;
int rc = LDLM_ITER_CONTINUE;
policy = ldlm_processing_policy_table[res->lr_type];
LASSERT(policy);
- list_for_each_safe(tmp, pos, queue) {
+ cfs_list_for_each_safe(tmp, pos, queue) {
struct ldlm_lock *pending;
- pending = list_entry(tmp, struct ldlm_lock, l_res_link);
+ pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
}
static int
-ldlm_work_bl_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_bl_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
{
struct ldlm_lock_desc d;
- struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_bl_ast);
+ struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_bl_ast);
ENTRY;
/* nobody should touch l_bl_ast */
lock_res_and_lock(lock);
- list_del_init(&lock->l_bl_ast);
+ cfs_list_del_init(&lock->l_bl_ast);
LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
LASSERT(lock->l_bl_ast_run == 0);
}
static int
-ldlm_work_cp_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_cp_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
{
- struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_cp_ast);
+ struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock, l_cp_ast);
ldlm_completion_callback completion_callback;
int rc = 0;
ENTRY;
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
- list_del_init(&lock->l_cp_ast);
+ cfs_list_del_init(&lock->l_cp_ast);
LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225 */
}
static int
-ldlm_work_revoke_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
+ldlm_work_revoke_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
{
struct ldlm_lock_desc desc;
- struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_rk_ast);
+ struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_rk_ast);
ENTRY;
- list_del_init(&lock->l_rk_ast);
+ cfs_list_del_init(&lock->l_rk_ast);
/* the desc just pretend to exclusive */
ldlm_lock2desc(lock, &desc);
RETURN(1);
}
-int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type)
+int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type)
{
struct ldlm_cb_set_arg arg;
- struct list_head *tmp, *pos;
- int (*work_ast_lock)(struct list_head *tmp,struct ldlm_cb_set_arg *arg);
+ cfs_list_t *tmp, *pos;
+ int (*work_ast_lock)(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg);
int ast_count;
ENTRY;
- if (list_empty(rpc_list))
+ if (cfs_list_empty(rpc_list))
RETURN(0);
arg.set = ptlrpc_prep_set();
if (NULL == arg.set)
RETURN(-ERESTART);
- atomic_set(&arg.restart, 0);
+ cfs_atomic_set(&arg.restart, 0);
switch (ast_type) {
case LDLM_WORK_BL_AST:
arg.type = LDLM_BL_CALLBACK;
}
ast_count = 0;
- list_for_each_safe(tmp, pos, rpc_list) {
+ cfs_list_for_each_safe(tmp, pos, rpc_list) {
ast_count += work_ast_lock(tmp, &arg);
/* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
* write memory leaking. */
ptlrpc_set_destroy(arg.set);
- RETURN(atomic_read(&arg.restart) ? -ERESTART : 0);
+ RETURN(cfs_atomic_read(&arg.restart) ? -ERESTART : 0);
}
static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
{
- struct list_head *tmp;
+ cfs_list_t *tmp;
int i, rc;
if (ns == NULL)
return;
ENTRY;
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
for (i = 0; i < RES_HASH_SIZE; i++) {
tmp = ns->ns_hash[i].next;
while (tmp != &(ns->ns_hash[i])) {
struct ldlm_resource *res =
- list_entry(tmp, struct ldlm_resource, lr_hash);
+ cfs_list_entry(tmp, struct ldlm_resource,
+ lr_hash);
ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
LDLM_RESOURCE_ADDREF(res);
rc = reprocess_one_queue(res, NULL);
LDLM_RESOURCE_DELREF(res);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
}
}
out:
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
EXIT;
}
rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
if (rc == -ERESTART) {
- LASSERT(list_empty(&rpc_list));
+ LASSERT(cfs_list_empty(&rpc_list));
goto restart;
}
EXIT;
req->l_resource->lr_type != LDLM_IBITS)
return;
- list_del_init(&req->l_sl_policy);
- list_del_init(&req->l_sl_mode);
+ cfs_list_del_init(&req->l_sl_policy);
+ cfs_list_del_init(&req->l_sl_mode);
}
void ldlm_lock_cancel(struct ldlm_lock *lock)
}
CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
- lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock, lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
pos, lock->l_pid);
if (lock->l_conn_export != NULL)
obd = lock->l_conn_export->exp_obd;
CDEBUG(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, "
"write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
ldlm_lockname[lock->l_granted_mode],
- atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
+ cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
lock->l_flags);
if (lock->l_resource->lr_type == LDLM_EXTENT)
CDEBUG(level, " Extent: "LPU64" -> "LPU64
" ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
"res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
LPX64" expref: %d pid: %u timeout: %lu\n", lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
va_end(args);
return;
"] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
" expref: %d pid: %u timeout %lu\n",
lock->l_resource->lr_namespace->ns_name, lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_policy_data.l_extent.start,
lock->l_policy_data.l_extent.end,
lock->l_req_extent.start, lock->l_req_extent.end,
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
"["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
" expref: %d pid: %u timeout: %lu\n",
lock->l_resource->lr_namespace->ns_name, lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_policy_data.l_flock.pid,
lock->l_policy_data.l_flock.start,
lock->l_policy_data.l_flock.end,
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
"pid: %u timeout: %lu\n",
lock->l_resource->lr_namespace->ns_name,
lock, lock->l_handle.h_cookie,
- atomic_read (&lock->l_refc),
+ cfs_atomic_read (&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
lock->l_policy_data.l_inodebits.bits,
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
"remote: "LPX64" expref: %d pid: %u timeout %lu\n",
lock->l_resource->lr_namespace->ns_name,
lock, lock->l_handle.h_cookie,
- atomic_read (&lock->l_refc),
+ cfs_atomic_read (&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
lock->l_resource->lr_name.name[0],
lock->l_resource->lr_name.name[1],
- atomic_read(&lock->l_resource->lr_refcount),
+ cfs_atomic_read(&lock->l_resource->lr_refcount),
ldlm_typename[lock->l_resource->lr_type],
lock->l_flags, lock->l_remote_handle.cookie,
lock->l_export ?
- atomic_read(&lock->l_export->exp_refcount) : -99,
+ cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
}