* - spin_unlock(x)
* - spin_unlock_bh(x)
* - spin_trylock(x)
- * - spin_is_locked(x)
+ * - assert_spin_locked(x)
*
* - spin_lock_irq(x)
* - spin_lock_irqsave(x, f)
* - spin_lock_bh(x)
* - spin_unlock_bh(x)
*
- * - spin_is_locked(x)
+ * - assert_spin_locked(x)
* - spin_lock_irqsave(x, f)
* - spin_unlock_irqrestore(x, f)
*
void spin_lock_bh(spinlock_t *lock);
void spin_unlock_bh(spinlock_t *lock);
-static inline int spin_is_locked(spinlock_t *l) { return 1; }
static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
+#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+
/*
* Semaphore
*
return rc;
}
-static int spin_is_locked(spinlock_t *lock)
+static int assert_spin_locked(spinlock_t *lock)
{
#if _WIN32_WINNT >= 0x502
/* KeTestSpinLock only avalilable on 2k3 server or later */
/** Check if resource is already locked, assert if not. */
static inline void check_res_locked(struct ldlm_resource *res)
{
- LASSERT(spin_is_locked(&res->lr_lock));
+ assert_spin_locked(&res->lr_lock);
}
struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
* \a nrq
* \param[in,out] nrq The request
*
- * \pre spin_is_locked(&svcpt->scp_req_lock)
+ * \pre assert_spin_locked(&svcpt->scp_req_lock)
*
* \see ptlrpc_nrs_req_stop_nolock()
*/
int i;
int rc = 0;
- LASSERT(spin_is_locked(&lsm->lsm_lock));
+ assert_spin_locked(&lsm->lsm_lock);
#ifdef __KERNEL__
LASSERT(lsm->lsm_lock_owner == current_pid());
#endif
ENTRY;
head = cl_object_header(obj);
- LINVRNT(spin_is_locked(&head->coh_lock_guard));
+ assert_spin_locked(&head->coh_lock_guard);
CS_LOCK_INC(obj, lookup);
cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
struct lu_object_header *top;
int result;
- LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ assert_spin_locked(cl_object_attr_guard(obj));
ENTRY;
top = obj->co_lu.lo_header;
struct lu_object_header *top;
int result;
- LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ assert_spin_locked(cl_object_attr_guard(obj));
ENTRY;
top = obj->co_lu.lo_header;
{
struct filter_export_data *fed = &exp->exp_filter_data;
- LASSERT(spin_is_locked(&fed->fed_lock));
+ assert_spin_locked(&fed->fed_lock);
if (--fmd->fmd_refcount == 0) {
/* XXX when we have persistent reservations and the handle
* is stored herein we need to drop it here. */
cfs_time_t now = cfs_time_current();
- LASSERT(spin_is_locked(&fed->fed_lock));
+ assert_spin_locked(&fed->fed_lock);
cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
if (lu_fid_eq(&fmd->fmd_fid, fid)) {
obd_size unstable;
ENTRY;
- LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
+ assert_spin_locked(&ofd->ofd_grant_lock);
spin_lock(&ofd->ofd_osfs_lock);
/* get available space from cached statfs data */
long dirty, dropped, grant_chunk;
ENTRY;
- LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
+ assert_spin_locked(&ofd->ofd_grant_lock);
if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
(OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
struct obd_device *obd = exp->exp_obd;
long grant_shrink;
- LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
+ assert_spin_locked(&ofd->ofd_grant_lock);
LASSERT(exp);
if (left_space >= ofd->ofd_tot_granted_clients *
OFD_GRANT_SHRINK_LIMIT(exp))
ENTRY;
- LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
+ assert_spin_locked(&ofd->ofd_grant_lock);
if ((oa->o_valid & OBD_MD_FLFLAGS) &&
(oa->o_flags & OBD_FL_RECOV_RESEND)) {
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+ assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_CACHE_SIZE;
{
ENTRY;
- LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+ assert_spin_locked(&cli->cl_loi_list_lock.lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
EXIT;
return;
static inline int osc_object_is_locked(struct osc_object *obj)
{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return spin_is_locked(&obj->oo_lock);
+#else
+ /*
+ * It is not perfect to return true all the time.
+ * But since this function is only used for assertion
+ * and checking, it seems OK.
+ */
+ return 1;
+#endif
}
/*
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
- LASSERT(spin_is_locked(&request->rq_import->imp_lock));
+ assert_spin_locked(&request->rq_import->imp_lock);
(void)__ptlrpc_req_finished(request, 1);
}
EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
ENTRY;
LASSERT(imp != NULL);
- LASSERT(spin_is_locked(&imp->imp_lock));
-
+ assert_spin_locked(&imp->imp_lock);
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
{
cfs_list_t *tmp;
- LASSERT(spin_is_locked(&imp->imp_lock));
+ assert_spin_locked(&imp->imp_lock);
if (req->rq_transno == 0) {
DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
static
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
{
- LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
+ assert_spin_locked(&ctx->cc_sec->ps_lock);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
__u32 idx = gmsg->gum_mechidx;
LASSERT(idx < MECH_MAX);
- LASSERT(spin_is_locked(&upcall_locks[idx]));
+ assert_spin_locked(&upcall_locks[idx]);
if (cfs_list_empty(&gmsg->gum_list))
return;
static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
{
ENTRY;
- LASSERT(spin_is_locked(&imp->imp_lock));
+ assert_spin_locked(&imp->imp_lock);
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
imp->imp_invalid = 1;
{
LASSERT(policy != NULL);
LASSERT(info != NULL);
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
* \param[in] opc the opcode
* \param[in,out] arg used for passing parameters and information
*
- * \pre spin_is_locked(&policy->pol_nrs->->nrs_lock)
- * \post spin_is_locked(&policy->pol_nrs->->nrs_lock)
+ * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
+ * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
*
* \retval 0 operation carried out successfully
* \retval -ve error
int nrs_crrn_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
void *arg)
{
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
switch((enum nrs_ctl_crr)opc) {
default:
* \param[in] opc the opcode
* \param[in,out] arg used for passing parameters and information
*
- * \pre spin_is_locked(&policy->pol_nrs->->nrs_lock)
- * \post spin_is_locked(&policy->pol_nrs->->nrs_lock)
+ * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
+ * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
*
* \retval 0 operation carried successfully
* \retval -ve error
int nrs_orr_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
void *arg)
{
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
switch((enum nrs_ctl_orr)opc) {
default:
{
struct nrs_tbf_rule *rule;
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
rule = nrs_tbf_rule_find(head, change->tc_name);
if (rule == NULL)
{
struct nrs_tbf_rule *rule;
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
if (strcmp(stop->tc_name, NRS_TBF_DEFAULT_RULE) == 0)
return -EPERM;
{
int rc;
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
switch (cmd->tc_cmd) {
case NRS_CTL_TBF_START_RULE:
* \param[in] opc the opcode
* \param[in,out] arg used for passing parameters and information
*
- * \pre spin_is_locked(&policy->pol_nrs->->nrs_lock)
- * \post spin_is_locked(&policy->pol_nrs->->nrs_lock)
+ * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
+ * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
*
* \retval 0 operation carried out successfully
* \retval -ve error
int rc = 0;
ENTRY;
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
switch ((enum nrs_ctl_tbf)opc) {
default:
struct nrs_tbf_client *cli;
cfs_binheap_node_t *node;
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock);
if (!peek && policy->pol_nrs->nrs_throttling)
return NULL;
struct nrs_tbf_client *cli;
int rc = 0;
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock);
cli = container_of(nrs_request_resource(nrq),
struct nrs_tbf_client, tc_res);
struct nrs_tbf_head *head;
struct nrs_tbf_client *cli;
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock);
cli = container_of(nrs_request_resource(nrq),
struct nrs_tbf_client, tc_res);
struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request,
rq_nrq);
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_svcpt->scp_req_lock);
CDEBUG(D_RPCTRACE, "NRS stop %s request from %s, seq: "LPU64"\n",
policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer),
void ptlrpc_pinger_commit_expected(struct obd_import *imp)
{
ptlrpc_update_next_ping(imp, 1);
- LASSERT(spin_is_locked(&imp->imp_lock));
+ assert_spin_locked(&imp->imp_lock);
/*
* Avoid reading stale imp_connect_data. When not sure if pings are
* expected or not on next connection, we assume they are not and force
static inline void enc_pools_wakeup(void)
{
- LASSERT(spin_is_locked(&page_pools.epp_lock));
+ assert_spin_locked(&page_pools.epp_lock);
if (unlikely(page_pools.epp_waitqlen)) {
LASSERT(waitqueue_active(&page_pools.epp_waitq));
{
ENTRY;
- LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
- LASSERT(spin_is_locked(&rs->rs_lock));
+ assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
+ assert_spin_locked(&rs->rs_lock);
LASSERT (rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */