This patch removes all reference to LASSERT_ATOMIC_POS macro.
Once all the access is removed it would be easier to just
toggle atomic_* API calls with recount_* counts.
Signed-off-by: Arshad Hussain <arshad.hussain@aeoncomputing.com>
Change-Id: I2051de3707106532259e51ec3e4c890c65836b1a
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/50881
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Timothy Day <timday@amazon.com>
Reviewed-by: Neil Brown <neilb@suse.de>
#endif /* LASSERT_ATOMIC_ENABLED */
#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
-#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)));
#define CFS_ALLOC_PTR_ARRAY(ptr, count) \
CDEBUG(D_NET, "conn[%p] (%d)--\n",
(conn), atomic_read(&(conn)->ibc_refcount));
#endif
- LASSERT_ATOMIC_POS(&(conn)->ibc_refcount);
+ LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);
if (atomic_dec_and_test(&(conn)->ibc_refcount)) {
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
list_add_tail(&(conn)->ibc_list,
#define class_export_rpc_inc(exp) \
({ \
- atomic_inc(&(exp)->exp_rpc_count); \
+ atomic_inc(&(exp)->exp_rpc_count); \
CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n", \
- (exp), atomic_read(&(exp)->exp_rpc_count)); \
+ (exp), atomic_read(&(exp)->exp_rpc_count)); \
})
#define class_export_rpc_dec(exp) \
({ \
- LASSERT_ATOMIC_POS(&exp->exp_rpc_count); \
- atomic_dec(&(exp)->exp_rpc_count); \
+ LASSERT(atomic_read(&(exp)->exp_rpc_count) > 0); \
+ atomic_dec(&(exp)->exp_rpc_count); \
CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n", \
- (exp), atomic_read(&(exp)->exp_rpc_count)); \
+ (exp), atomic_read(&(exp)->exp_rpc_count)); \
})
#define class_export_lock_get(exp, lock) \
({ \
- atomic_inc(&(exp)->exp_locks_count); \
+ atomic_inc(&(exp)->exp_locks_count); \
__class_export_add_lock_ref(exp, lock); \
CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", \
- (exp), atomic_read(&(exp)->exp_locks_count)); \
+ (exp), atomic_read(&(exp)->exp_locks_count)); \
class_export_get(exp); \
})
#define class_export_lock_put(exp, lock) \
({ \
- LASSERT_ATOMIC_POS(&exp->exp_locks_count); \
- atomic_dec(&(exp)->exp_locks_count); \
+ LASSERT(atomic_read(&(exp)->exp_locks_count) > 0); \
+ atomic_dec(&(exp)->exp_locks_count); \
__class_export_del_lock_ref(exp, lock); \
CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
- (exp), atomic_read(&(exp)->exp_locks_count)); \
+ (exp), atomic_read(&(exp)->exp_locks_count)); \
class_export_put(exp); \
})
#define class_export_cb_get(exp) \
({ \
- atomic_inc(&(exp)->exp_cb_count); \
+ atomic_inc(&(exp)->exp_cb_count); \
CDEBUG(D_INFO, "callback GETting export %p : new cb_count %d\n",\
- (exp), atomic_read(&(exp)->exp_cb_count)); \
+ (exp), atomic_read(&(exp)->exp_cb_count)); \
class_export_get(exp); \
})
#define class_export_cb_put(exp) \
({ \
- LASSERT_ATOMIC_POS(&exp->exp_cb_count); \
- atomic_dec(&(exp)->exp_cb_count); \
+ LASSERT(atomic_read(&(exp)->exp_cb_count) > 0); \
+ atomic_dec(&(exp)->exp_cb_count); \
CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
- (exp), atomic_read(&(exp)->exp_cb_count)); \
+ (exp), atomic_read(&(exp)->exp_cb_count)); \
class_export_put(exp); \
})
static void target_request_copy_put(struct ptlrpc_request *req)
{
LASSERT(list_empty(&req->rq_replay_list));
- LASSERT_ATOMIC_POS(&req->rq_export->exp_replay_count);
+ LASSERT(atomic_read(&(req)->rq_export->exp_replay_count) > 0);
atomic_dec(&req->rq_export->exp_replay_count);
class_export_rpc_dec(req->rq_export);
exp->exp_req_replay_needed = 0;
spin_unlock(&exp->exp_lock);
- LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
+ LASSERT(atomic_read(&(obd)->obd_req_replay_clients) >
+ 0);
atomic_dec(&obd->obd_req_replay_clients);
} else {
spin_unlock(&exp->exp_lock);
exp->exp_lock_replay_needed = 0;
spin_unlock(&exp->exp_lock);
- LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
+ LASSERT(atomic_read(&(obd)->obd_lock_replay_clients) >
+ 0);
atomic_dec(&obd->obd_lock_replay_clients);
} else {
spin_unlock(&exp->exp_lock);
spin_lock(&exp->exp_lock);
exp->exp_in_recovery = 0;
spin_unlock(&exp->exp_lock);
- LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
+ LASSERT(atomic_read(&(obd)->obd_connected_clients) > 0);
atomic_dec(&obd->obd_connected_clients);
}
struct ptlrpc_sec *sec = ctx->cc_sec;
LASSERT(sec);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+ LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
if (!atomic_dec_and_test(&ctx->cc_refcount))
return;
static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
{
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
+ LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
if (sec->ps_policy->sp_cops->kill_sec) {
sec->ps_policy->sp_cops->kill_sec(sec);
void sptlrpc_sec_put(struct ptlrpc_sec *sec)
{
if (sec) {
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
+ LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
if (atomic_dec_and_test(&sec->ps_refcount)) {
sptlrpc_gc_del_sec(sec);
{
struct ptlrpc_sec *old_sec;
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
+ LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
write_lock(&imp->imp_sec_lock);
old_sec = imp->imp_sec;
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
LASSERT(req->rq_reqmsg == NULL);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+ LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
policy = ctx->cc_sec->ps_policy;
rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
LASSERT(ctx);
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+ LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
return;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+ LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
if (req->rq_repbuf == NULL)
return;
if (ctx == NULL)
return;
- LASSERT_ATOMIC_POS(&ctx->sc_refcount);
+ LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
if (atomic_dec_and_test(&ctx->sc_refcount)) {
if (ctx->sc_policy->sp_sops->free_ctx)
ctx->sc_policy->sp_sops->free_ctx(ctx);
if (ctx == NULL)
return;
- LASSERT_ATOMIC_POS(&ctx->sc_refcount);
+ LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
if (ctx->sc_policy->sp_sops->invalidate_ctx)
ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
}