X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_lock.c;h=e7a507292e466c81110b79776aaca2c3559a8c97;hb=98c2e6b446166ba8f89e60a0d4f38683b920f506;hp=bde3bd4c7475db5bfc737bf0d088ffdacfa8941c;hpb=06e652ce88cf485135df249b29ca8d0afdc9af64;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index bde3bd4..e7a5072 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -196,7 +196,7 @@ EXPORT_SYMBOL(ldlm_register_intent); */ struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) { - cfs_atomic_inc(&lock->l_refc); + atomic_inc(&lock->l_refc); return lock; } EXPORT_SYMBOL(ldlm_lock_get); @@ -211,15 +211,15 @@ void ldlm_lock_put(struct ldlm_lock *lock) ENTRY; LASSERT(lock->l_resource != LP_POISON); - LASSERT(cfs_atomic_read(&lock->l_refc) > 0); - if (cfs_atomic_dec_and_test(&lock->l_refc)) { + LASSERT(atomic_read(&lock->l_refc) > 0); + if (atomic_dec_and_test(&lock->l_refc)) { struct ldlm_resource *res; LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing it."); res = lock->l_resource; - LASSERT(lock->l_flags & LDLM_FL_DESTROYED); + LASSERT(ldlm_is_destroyed(lock)); LASSERT(cfs_list_empty(&lock->l_res_link)); LASSERT(cfs_list_empty(&lock->l_pending_chain)); @@ -234,7 +234,7 @@ void ldlm_lock_put(struct ldlm_lock *lock) } if (lock->l_lvb_data != NULL) - OBD_FREE(lock->l_lvb_data, lock->l_lvb_len); + OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len); ldlm_interval_free(ldlm_interval_detach(lock)); lu_ref_fini(&lock->l_reference); @@ -250,19 +250,17 @@ EXPORT_SYMBOL(ldlm_lock_put); */ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) { - int rc = 0; - if (!cfs_list_empty(&lock->l_lru)) { - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - cfs_list_del_init(&lock->l_lru); - if (lock->l_flags & LDLM_FL_SKIPPED) - lock->l_flags &= ~LDLM_FL_SKIPPED; - LASSERT(ns->ns_nr_unused > 0); - ns->ns_nr_unused--; - rc = 1; - } - return rc; + int rc = 0; + if (!cfs_list_empty(&lock->l_lru)) { + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + + LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); + cfs_list_del_init(&lock->l_lru); + LASSERT(ns->ns_nr_unused > 0); + ns->ns_nr_unused--; + rc = 1; + } + return rc; } /** @@ -274,7 +272,7 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) int rc; ENTRY; - if (lock->l_flags & LDLM_FL_NS_SRV) { + if (ldlm_is_ns_srv(lock)) { LASSERT(cfs_list_empty(&lock->l_lru)); RETURN(0); } @@ -291,14 +289,15 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) */ void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) { - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); + struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - lock->l_last_used = cfs_time_current(); - LASSERT(cfs_list_empty(&lock->l_lru)); - LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list); - LASSERT(ns->ns_nr_unused >= 0); - ns->ns_nr_unused++; + lock->l_last_used = cfs_time_current(); + LASSERT(cfs_list_empty(&lock->l_lru)); + LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); + cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list); + ldlm_clear_skipped(lock); + LASSERT(ns->ns_nr_unused >= 0); + ns->ns_nr_unused++; } /** @@ -325,7 +324,7 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ENTRY; - if (lock->l_flags & LDLM_FL_NS_SRV) { + if (ldlm_is_ns_srv(lock)) { LASSERT(cfs_list_empty(&lock->l_lru)); EXIT; return; @@ -373,12 +372,12 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock) LBUG(); } - if (lock->l_flags & LDLM_FL_DESTROYED) { + if (ldlm_is_destroyed(lock)) { LASSERT(cfs_list_empty(&lock->l_lru)); EXIT; return 0; } - lock->l_flags |= LDLM_FL_DESTROYED; + ldlm_set_destroyed(lock); if (lock->l_export && lock->l_export->exp_lock_hash) { /* NB: it's safe to call cfs_hash_del() even lock isn't @@ -483,18 +482,18 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) lock->l_resource = resource; lu_ref_add(&resource->lr_reference, "lock", lock); - cfs_atomic_set(&lock->l_refc, 2); - CFS_INIT_LIST_HEAD(&lock->l_res_link); - CFS_INIT_LIST_HEAD(&lock->l_lru); - CFS_INIT_LIST_HEAD(&lock->l_pending_chain); - CFS_INIT_LIST_HEAD(&lock->l_bl_ast); - CFS_INIT_LIST_HEAD(&lock->l_cp_ast); - CFS_INIT_LIST_HEAD(&lock->l_rk_ast); - cfs_waitq_init(&lock->l_waitq); - lock->l_blocking_lock = NULL; - CFS_INIT_LIST_HEAD(&lock->l_sl_mode); - CFS_INIT_LIST_HEAD(&lock->l_sl_policy); - CFS_INIT_HLIST_NODE(&lock->l_exp_hash); + atomic_set(&lock->l_refc, 2); + CFS_INIT_LIST_HEAD(&lock->l_res_link); + CFS_INIT_LIST_HEAD(&lock->l_lru); + CFS_INIT_LIST_HEAD(&lock->l_pending_chain); + CFS_INIT_LIST_HEAD(&lock->l_bl_ast); + CFS_INIT_LIST_HEAD(&lock->l_cp_ast); + CFS_INIT_LIST_HEAD(&lock->l_rk_ast); + init_waitqueue_head(&lock->l_waitq); + lock->l_blocking_lock = NULL; + CFS_INIT_LIST_HEAD(&lock->l_sl_mode); + CFS_INIT_LIST_HEAD(&lock->l_sl_policy); + CFS_INIT_HLIST_NODE(&lock->l_exp_hash); CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash); lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats, @@ -605,45 +604,47 @@ EXPORT_SYMBOL(ldlm_lock2handle); struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, __u64 flags) { - struct ldlm_lock *lock; - ENTRY; + struct ldlm_lock *lock; + ENTRY; - LASSERT(handle); + LASSERT(handle); lock = class_handle2object(handle->cookie, NULL); - if (lock == NULL) - RETURN(NULL); - - /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it */ - if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) { - lu_ref_add(&lock->l_reference, "handle", cfs_current()); - RETURN(lock); - } + if (lock == NULL) + RETURN(NULL); + + /* It's unlikely but possible that someone marked the lock as + * destroyed after we did handle2object on it */ + if ((flags == 0) && !ldlm_is_destroyed(lock)) { + lu_ref_add(&lock->l_reference, "handle", current); + RETURN(lock); + } - lock_res_and_lock(lock); + lock_res_and_lock(lock); - LASSERT(lock->l_resource != NULL); + LASSERT(lock->l_resource != NULL); - lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current()); - if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { - unlock_res_and_lock(lock); - CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); - LDLM_LOCK_PUT(lock); - RETURN(NULL); - } + lu_ref_add_atomic(&lock->l_reference, "handle", current); + if (unlikely(ldlm_is_destroyed(lock))) { + unlock_res_and_lock(lock); + CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); + LDLM_LOCK_PUT(lock); + RETURN(NULL); + } - if (flags && (lock->l_flags & flags)) { - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - RETURN(NULL); - } + /* If we're setting flags, make sure none of them are already set. */ + if (flags != 0) { + if ((lock->l_flags & flags) != 0) { + unlock_res_and_lock(lock); + LDLM_LOCK_PUT(lock); + RETURN(NULL); + } - if (flags) - lock->l_flags |= flags; + lock->l_flags |= flags; + } - unlock_res_and_lock(lock); - RETURN(lock); + unlock_res_and_lock(lock); + RETURN(lock); } EXPORT_SYMBOL(__ldlm_handle2lock); /** @} ldlm_handles */ @@ -710,13 +711,13 @@ EXPORT_SYMBOL(ldlm_lock2desc); void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, cfs_list_t *work_list) { - if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) { - LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); - lock->l_flags |= LDLM_FL_AST_SENT; - /* If the enqueuing client said so, tell the AST recipient to - * discard dirty data, rather than writing back. */ - if (new->l_flags & LDLM_FL_AST_DISCARD_DATA) - lock->l_flags |= LDLM_FL_DISCARD_DATA; + if (!ldlm_is_ast_sent(lock)) { + LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); + ldlm_set_ast_sent(lock); + /* If the enqueuing client said so, tell the AST recipient to + * discard dirty data, rather than writing back. */ + if (ldlm_is_ast_discard_data(new)) + ldlm_set_discard_data(lock); LASSERT(cfs_list_empty(&lock->l_bl_ast)); cfs_list_add(&lock->l_bl_ast, work_list); LDLM_LOCK_GET(lock); @@ -730,8 +731,8 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, */ void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list) { - if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) { - lock->l_flags |= LDLM_FL_CP_REQD; + if (!ldlm_is_cp_reqd(lock)) { + ldlm_set_cp_reqd(lock); LDLM_DEBUG(lock, "lock granted; sending completion AST."); LASSERT(cfs_list_empty(&lock->l_cp_ast)); cfs_list_add(&lock->l_cp_ast, work_list); @@ -814,7 +815,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) if (lock != NULL) { lock_res_and_lock(lock); if (lock->l_readers != 0 || lock->l_writers != 0 || - !(lock->l_flags & LDLM_FL_CBPENDING)) { + !ldlm_is_cbpending(lock)) { ldlm_lock_addref_internal_nolock(lock, mode); result = 0; } @@ -881,19 +882,19 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_decref_internal_nolock(lock, mode); - if (lock->l_flags & LDLM_FL_LOCAL && + if (ldlm_is_local(lock) && !lock->l_readers && !lock->l_writers) { /* If this is a local lock on a server namespace and this was * the last reference, cancel the lock. */ CDEBUG(D_INFO, "forcing cancel of local lock\n"); - lock->l_flags |= LDLM_FL_CBPENDING; + ldlm_set_cbpending(lock); } if (!lock->l_readers && !lock->l_writers && - (lock->l_flags & LDLM_FL_CBPENDING)) { + ldlm_is_cbpending(lock)) { /* If we received a blocked AST and this was the last reference, * run the callback. */ - if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export) + if (ldlm_is_ns_srv(lock) && lock->l_export) CERROR("FL_CBPENDING set on non-local lock--just a " "warning\n"); @@ -903,16 +904,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_remove_from_lru(lock); unlock_res_and_lock(lock); - if (lock->l_flags & LDLM_FL_FAIL_LOC) + if (ldlm_is_fail_loc(lock)) OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); - if ((lock->l_flags & LDLM_FL_ATOMIC_CB) || + if (ldlm_is_atomic_cb(lock) || ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) ldlm_handle_bl_callback(ns, NULL, lock); } else if (ns_is_client(ns) && !lock->l_readers && !lock->l_writers && - !(lock->l_flags & LDLM_FL_NO_LRU) && - !(lock->l_flags & LDLM_FL_BL_AST)) { + !ldlm_is_no_lru(lock) && + !ldlm_is_bl_ast(lock)) { LDLM_DEBUG(lock, "add lock into lru list"); @@ -921,7 +922,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); - if (lock->l_flags & LDLM_FL_FAIL_LOC) + if (ldlm_is_fail_loc(lock)) OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE @@ -966,7 +967,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); lock_res_and_lock(lock); - lock->l_flags |= LDLM_FL_CBPENDING; + ldlm_set_cbpending(lock); unlock_res_and_lock(lock); ldlm_lock_decref_internal(lock, mode); LDLM_LOCK_PUT(lock); @@ -1089,7 +1090,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, ldlm_resource_dump(D_INFO, res); LDLM_DEBUG(lock, "About to add lock:"); - if (lock->l_flags & LDLM_FL_DESTROYED) { + if (ldlm_is_destroyed(lock)) { CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); return; } @@ -1198,10 +1199,10 @@ static struct ldlm_lock *search_queue(cfs_list_t *queue, * this is generally only going to be used by children * whose parents already hold a lock so forward progress * can still happen. */ - if (lock->l_flags & LDLM_FL_CBPENDING && + if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING)) continue; - if (!unref && lock->l_flags & LDLM_FL_CBPENDING && + if (!unref && ldlm_is_cbpending(lock) && lock->l_readers == 0 && lock->l_writers == 0) continue; @@ -1228,11 +1229,11 @@ static struct ldlm_lock *search_queue(cfs_list_t *queue, policy->l_inodebits.bits)) continue; - if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK)) + if (!unref && LDLM_HAVE_MASK(lock, GONE)) continue; if ((flags & LDLM_FL_LOCAL_ONLY) && - !(lock->l_flags & LDLM_FL_LOCAL)) + !ldlm_is_local(lock)) continue; if (flags & LDLM_FL_TEST_LOCK) { @@ -1252,7 +1253,7 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) { if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) { lock->l_flags |= LDLM_FL_FAIL_NOTIFIED; - cfs_waitq_broadcast(&lock->l_waitq); + wake_up_all(&lock->l_waitq); } } EXPORT_SYMBOL(ldlm_lock_fail_match_locked); @@ -1274,8 +1275,8 @@ EXPORT_SYMBOL(ldlm_lock_fail_match); */ void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) { - lock->l_flags |= LDLM_FL_LVB_READY; - cfs_waitq_broadcast(&lock->l_waitq); + ldlm_set_lvb_ready(lock); + wake_up_all(&lock->l_waitq); } EXPORT_SYMBOL(ldlm_lock_allow_match_locked); @@ -1374,7 +1375,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, if (lock) { ldlm_lock2handle(lock, lockh); if ((flags & LDLM_FL_LVB_READY) && - (!(lock->l_flags & LDLM_FL_LVB_READY))) { + (!ldlm_is_lvb_ready(lock))) { __u64 wait_flags = LDLM_FL_LVB_READY | LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED; struct l_wait_info lwi; @@ -1400,7 +1401,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, l_wait_event(lock->l_waitq, lock->l_flags & wait_flags, &lwi); - if (!(lock->l_flags & LDLM_FL_LVB_READY)) { + if (!ldlm_is_lvb_ready(lock)) { if (flags & LDLM_FL_TEST_LOCK) LDLM_LOCK_RELEASE(lock); else @@ -1455,10 +1456,10 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, lock = ldlm_handle2lock(lockh); if (lock != NULL) { lock_res_and_lock(lock); - if (lock->l_flags & LDLM_FL_GONE_MASK) + if (LDLM_HAVE_MASK(lock, GONE)) GOTO(out, mode); - if (lock->l_flags & LDLM_FL_CBPENDING && + if (ldlm_is_cbpending(lock) && lock->l_readers == 0 && lock->l_writers == 0) GOTO(out, mode); @@ -1603,11 +1604,11 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, if (lock == NULL) RETURN(NULL); - lock->l_req_mode = mode; - lock->l_ast_data = data; - lock->l_pid = cfs_curproc_pid(); + lock->l_req_mode = mode; + lock->l_ast_data = data; + lock->l_pid = current_pid(); if (ns_is_server(ns)) - lock->l_flags |= LDLM_FL_NS_SRV; + ldlm_set_ns_srv(lock); if (cbs) { lock->l_blocking_ast = cbs->lcs_blocking; lock->l_completion_ast = cbs->lcs_completion; @@ -1623,7 +1624,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, if (lvb_len) { lock->l_lvb_len = lvb_len; - OBD_ALLOC(lock->l_lvb_data, lvb_len); + OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len); if (lock->l_lvb_data == NULL) GOTO(out, 0); } @@ -1702,7 +1703,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, * need to do anything else. */ *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); } ldlm_resource_unlink_lock(lock); @@ -1719,7 +1720,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, /* Some flags from the enqueue want to make it into the AST, via the * lock's l_flags. */ - lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; + if (*flags & LDLM_FL_AST_DISCARD_DATA) + ldlm_set_ast_discard_data(lock); /* This distinction between local lock trees is very important; a client * namespace only has information about locks taken by that client, and @@ -1739,18 +1741,18 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, ldlm_resource_add_lock(res, &res->lr_waiting, lock); else ldlm_grant_lock(lock, NULL); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); #ifdef HAVE_SERVER_SUPPORT } else if (*flags & LDLM_FL_REPLAY) { if (*flags & LDLM_FL_BLOCK_CONV) { ldlm_resource_add_lock(res, &res->lr_converting, lock); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); } else if (*flags & LDLM_FL_BLOCK_WAIT) { ldlm_resource_add_lock(res, &res->lr_waiting, lock); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); } else if (*flags & LDLM_FL_BLOCK_GRANTED) { ldlm_grant_lock(lock, NULL); - GOTO(out, ELDLM_OK); + GOTO(out, rc = ELDLM_OK); } /* If no flags, fall through to normal enqueue path. */ } @@ -1832,7 +1834,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) lock_res_and_lock(lock); cfs_list_del_init(&lock->l_bl_ast); - LASSERT(lock->l_flags & LDLM_FL_AST_SENT); + LASSERT(ldlm_is_ast_sent(lock)); LASSERT(lock->l_bl_ast_run == 0); LASSERT(lock->l_blocking_lock); lock->l_bl_ast_run++; @@ -1879,11 +1881,11 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) /* nobody should touch l_cp_ast */ lock_res_and_lock(lock); cfs_list_del_init(&lock->l_cp_ast); - LASSERT(lock->l_flags & LDLM_FL_CP_REQD); + LASSERT(ldlm_is_cp_reqd(lock)); /* save l_completion_ast since it can be changed by * mds_intent_policy(), see bug 14225 */ completion_callback = lock->l_completion_ast; - lock->l_flags &= ~LDLM_FL_CP_REQD; + ldlm_clear_cp_reqd(lock); unlock_res_and_lock(lock); if (completion_callback != NULL) @@ -1977,7 +1979,7 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list, if (arg == NULL) RETURN(-ENOMEM); - cfs_atomic_set(&arg->restart, 0); + atomic_set(&arg->restart, 0); arg->list = rpc_list; switch (ast_type) { @@ -2013,7 +2015,7 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list, ptlrpc_set_wait(arg->set); ptlrpc_set_destroy(arg->set); - rc = cfs_atomic_read(&arg->restart) ? -ERESTART : 0; + rc = atomic_read(&arg->restart) ? -ERESTART : 0; GOTO(out, rc); out: OBD_FREE_PTR(arg); @@ -2097,6 +2099,7 @@ restart: #endif EXIT; } +EXPORT_SYMBOL(ldlm_reprocess_all); /** * Helper function to call blocking AST for LDLM lock \a lock in a @@ -2105,8 +2108,8 @@ restart: void ldlm_cancel_callback(struct ldlm_lock *lock) { check_res_locked(lock->l_resource); - if (!(lock->l_flags & LDLM_FL_CANCEL)) { - lock->l_flags |= LDLM_FL_CANCEL; + if (!ldlm_is_cancel(lock)) { + ldlm_set_cancel(lock); if (lock->l_blocking_ast) { unlock_res_and_lock(lock); lock->l_blocking_ast(lock, NULL, lock->l_ast_data, @@ -2116,7 +2119,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock) LDLM_DEBUG(lock, "no blocking ast"); } } - lock->l_flags |= LDLM_FL_BL_DONE; + ldlm_set_bl_done(lock); } /** @@ -2153,7 +2156,7 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) LBUG(); } - if (lock->l_flags & LDLM_FL_WAITED) + if (ldlm_is_waited(lock)) ldlm_del_waiting_lock(lock); /* Releases cancel callback. */ @@ -2161,7 +2164,7 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) /* Yes, second time, just in case it was added again while we were * running with no res lock in ldlm_cancel_callback */ - if (lock->l_flags & LDLM_FL_WAITED) + if (ldlm_is_waited(lock)) ldlm_del_waiting_lock(lock); ldlm_resource_unlink_lock(lock); @@ -2233,7 +2236,7 @@ int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd, "Cancel lock %p for export %p (loop %d), still have " "%d locks left on hash table.\n", lock, exp, ecl->ecl_loop, - cfs_atomic_read(&hs->hs_count)); + atomic_read(&hs->hs_count)); } return 0; @@ -2468,12 +2471,12 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "remote: "LPX64" expref: %d pid: %u timeout: %lu " "lvb_type: %d\n", lock, - lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), + lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); va_end(args); return; @@ -2487,18 +2490,18 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: " LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, - lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), + lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_policy_data.l_extent.start, lock->l_policy_data.l_extent.end, lock->l_req_extent.start, lock->l_req_extent.end, lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); break; @@ -2510,18 +2513,18 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "["LPU64"->"LPU64"] flags: "LPX64" nid: %s " "remote: "LPX64" expref: %d pid: %u timeout: %lu\n", ldlm_lock_to_ns_name(lock), lock, - lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc), + lock->l_handle.h_cookie, atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_policy_data.l_flock.pid, lock->l_policy_data.l_flock.start, lock->l_policy_data.l_flock.end, lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout); break; @@ -2533,16 +2536,16 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "pid: %u timeout: %lu lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, - cfs_atomic_read(&lock->l_refc), + atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), lock->l_policy_data.l_inodebits.bits, - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); break; @@ -2555,15 +2558,15 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, "timeout: %lu lvb_type: %d\n", ldlm_lock_to_ns_name(lock), lock, lock->l_handle.h_cookie, - cfs_atomic_read(&lock->l_refc), + atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers, ldlm_lockname[lock->l_granted_mode], ldlm_lockname[lock->l_req_mode], PLDLMRES(resource), - cfs_atomic_read(&resource->lr_refcount), + atomic_read(&resource->lr_refcount), ldlm_typename[resource->lr_type], lock->l_flags, nid, lock->l_remote_handle.cookie, - exp ? cfs_atomic_read(&exp->exp_refcount) : -99, + exp ? atomic_read(&exp->exp_refcount) : -99, lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); break;