/* !__KERNEL__ */
#endif
+#define LASSERT_ATOMIC_ENABLED (1)
+
+#if LASSERT_ATOMIC_ENABLED
+
+/** assert value of @a is equal to @v */
+#define LASSERT_ATOMIC_EQ(a, v) \
+do { \
+ LASSERTF(cfs_atomic_read(a) == v, \
+ "value: %d\n", cfs_atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is unequal to @v */
+#define LASSERT_ATOMIC_NE(a, v) \
+do { \
+ LASSERTF(cfs_atomic_read(a) != v, \
+ "value: %d\n", cfs_atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is little than @v */
+#define LASSERT_ATOMIC_LT(a, v) \
+do { \
+ LASSERTF(cfs_atomic_read(a) < v, \
+ "value: %d\n", cfs_atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is little/equal to @v */
+#define LASSERT_ATOMIC_LE(a, v) \
+do { \
+ LASSERTF(cfs_atomic_read(a) <= v, \
+ "value: %d\n", cfs_atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is great than @v */
+#define LASSERT_ATOMIC_GT(a, v) \
+do { \
+ LASSERTF(cfs_atomic_read(a) > v, \
+ "value: %d\n", cfs_atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is great/equal to @v */
+#define LASSERT_ATOMIC_GE(a, v) \
+do { \
+ LASSERTF(cfs_atomic_read(a) >= v, \
+ "value: %d\n", cfs_atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is great than @v1 and little than @v2 */
+#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
+do { \
+ int __v = cfs_atomic_read(a); \
+ LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
+} while (0)
+
+/** assert value of @a is great than @v1 and little/equal to @v2 */
+#define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
+do { \
+ int __v = cfs_atomic_read(a); \
+ LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
+} while (0)
+
+/** assert value of @a is great/equal to @v1 and little than @v2 */
+#define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
+do { \
+ int __v = cfs_atomic_read(a); \
+ LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
+} while (0)
+
+/** assert value of @a is great/equal to @v1 and little/equal to @v2 */
+#define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
+do { \
+ int __v = cfs_atomic_read(a); \
+ LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
+} while (0)
+
+#else /* !LASSERT_ATOMIC_ENABLED */
+
+#define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
+#define LASSERT_ATOMIC_NE(a, v) do {} while (0)
+#define LASSERT_ATOMIC_LT(a, v) do {} while (0)
+#define LASSERT_ATOMIC_LE(a, v) do {} while (0)
+#define LASSERT_ATOMIC_GT(a, v) do {} while (0)
+#define LASSERT_ATOMIC_GE(a, v) do {} while (0)
+#define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
+#define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
+#define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
+#define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
+
+#endif /* LASSERT_ATOMIC_ENABLED */
+
+#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
+#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
+
#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof (*(ptr)));
#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof (*(ptr)));
do { \
CDEBUG(D_NET, "conn[%p] (%d)++\n", \
(conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
- LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0); \
cfs_atomic_inc(&(conn)->ibc_refcount); \
} while (0)
\
CDEBUG(D_NET, "conn[%p] (%d)--\n", \
(conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
- LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0); \
+ LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \
cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
cfs_list_add_tail(&(conn)->ibc_list, \
CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
cfs_atomic_read (&(peer)->ibp_refcount)); \
- LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0); \
cfs_atomic_inc(&(peer)->ibp_refcount); \
} while (0)
CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
cfs_atomic_read (&(peer)->ibp_refcount)); \
- LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0); \
+ LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount)) \
kiblnd_destroy_peer(peer); \
} while (0)
static inline struct llog_commit_master
*lcm_get(struct llog_commit_master *lcm)
{
- LASSERT(cfs_atomic_read(&lcm->lcm_refcount) > 0);
cfs_atomic_inc(&lcm->lcm_refcount);
return lcm;
}
static inline void
lcm_put(struct llog_commit_master *lcm)
{
- if (!cfs_atomic_dec_and_test(&lcm->lcm_refcount)) {
- return ;
- }
- OBD_FREE_PTR(lcm);
+ LASSERT_ATOMIC_POS(&lcm->lcm_refcount);
+ if (cfs_atomic_dec_and_test(&lcm->lcm_refcount))
+ OBD_FREE_PTR(lcm);
}
struct llog_canceld_ctxt {
static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
{
- LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 0);
cfs_atomic_inc(&ctxt->loc_refcount);
CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
cfs_atomic_read(&ctxt->loc_refcount));
{
if (ctxt == NULL)
return;
- LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 0);
- LASSERT(cfs_atomic_read(&ctxt->loc_refcount) < 0x5a5a5a);
+ LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, 0x5a5a5a);
CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
cfs_atomic_read(&ctxt->loc_refcount) - 1);
__llog_ctxt_put(ctxt);
#define class_export_rpc_put(exp) \
({ \
- LASSERT(cfs_atomic_read(&exp->exp_rpc_count) > 0); \
+ LASSERT_ATOMIC_POS(&exp->exp_rpc_count); \
cfs_atomic_dec(&(exp)->exp_rpc_count); \
CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n", \
(exp), cfs_atomic_read(&(exp)->exp_rpc_count)); \
#define class_export_lock_put(exp, lock) \
({ \
- LASSERT(cfs_atomic_read(&exp->exp_locks_count) > 0); \
+ LASSERT_ATOMIC_POS(&exp->exp_locks_count); \
cfs_atomic_dec(&(exp)->exp_locks_count); \
__class_export_del_lock_ref(exp, lock); \
CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
#define class_export_cb_put(exp) \
({ \
- LASSERT(cfs_atomic_read(&exp->exp_cb_count) > 0); \
+ LASSERT_ATOMIC_POS(&exp->exp_cb_count); \
cfs_atomic_dec(&(exp)->exp_cb_count); \
CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
(exp), cfs_atomic_read(&(exp)->exp_cb_count)); \
if (exp->exp_imp_reverse != NULL)
client_destroy_import(exp->exp_imp_reverse);
- LASSERT(cfs_atomic_read(&exp->exp_locks_count) == 0);
- LASSERT(cfs_atomic_read(&exp->exp_rpc_count) == 0);
- LASSERT(cfs_atomic_read(&exp->exp_cb_count) == 0);
- LASSERT(cfs_atomic_read(&exp->exp_replay_count) == 0);
+ LASSERT_ATOMIC_ZERO(&exp->exp_locks_count);
+ LASSERT_ATOMIC_ZERO(&exp->exp_rpc_count);
+ LASSERT_ATOMIC_ZERO(&exp->exp_cb_count);
+ LASSERT_ATOMIC_ZERO(&exp->exp_replay_count);
}
/*
class_export_rpc_get(req->rq_export);
LASSERT(cfs_list_empty(&req->rq_list));
CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+
/* increase refcount to keep request in queue */
- LASSERT(cfs_atomic_read(&req->rq_refcount));
cfs_atomic_inc(&req->rq_refcount);
/** let export know it has replays to be handled */
cfs_atomic_inc(&req->rq_export->exp_replay_count);
static void target_request_copy_put(struct ptlrpc_request *req)
{
LASSERT(cfs_list_empty(&req->rq_replay_list));
- LASSERT(cfs_atomic_read(&req->rq_export->exp_replay_count) > 0);
+ LASSERT_ATOMIC_POS(&req->rq_export->exp_replay_count);
+
cfs_atomic_dec(&req->rq_export->exp_replay_count);
class_export_rpc_put(req->rq_export);
/* ptlrpc_server_drop_request() assumes the request is active */
if (exp->exp_req_replay_needed) {
exp->exp_req_replay_needed = 0;
cfs_spin_unlock(&exp->exp_lock);
- LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
+
+ LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
cfs_atomic_dec(&obd->obd_req_replay_clients);
} else {
cfs_spin_unlock(&exp->exp_lock);
if (exp->exp_lock_replay_needed) {
exp->exp_lock_replay_needed = 0;
cfs_spin_unlock(&exp->exp_lock);
- LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
+
+ LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
cfs_atomic_dec(&obd->obd_lock_replay_clients);
} else {
cfs_spin_unlock(&exp->exp_lock);
int ldlm_resource_putref(struct ldlm_resource *res)
{
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- int ref = cfs_atomic_read(&res->lr_refcount);
cfs_hash_bd_t bd;
- CDEBUG(D_INFO, "putref res: %p count: %d\n", res, ref - 1);
- LASSERTF(ref > 0 && ref < LI_POISON, "%d", ref);
+ LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "putref res: %p count: %d\n",
+ res, cfs_atomic_read(&res->lr_refcount) - 1);
+
cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
__ldlm_resource_putref_final(&bd, res);
int ldlm_resource_putref_locked(struct ldlm_resource *res)
{
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- int ref = cfs_atomic_read(&res->lr_refcount);
- CDEBUG(D_INFO, "putref res: %p count: %d\n", res, ref - 1);
- LASSERTF(ref > 0 && ref < LI_POISON, "%d", ref);
+ LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "putref res: %p count: %d\n",
+ res, cfs_atomic_read(&res->lr_refcount) - 1);
+
if (cfs_atomic_dec_and_test(&res->lr_refcount)) {
cfs_hash_bd_t bd;
struct obd_device *obd = exp->exp_obd;
ENTRY;
- LASSERT (cfs_atomic_read(&exp->exp_refcount) == 0);
+ LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
exp->exp_client_uuid.uuid, obd->obd_name);
void class_export_put(struct obd_export *exp)
{
LASSERT(exp != NULL);
+ LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, 0x5a5a5a);
CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
cfs_atomic_read(&exp->exp_refcount) - 1);
- LASSERT(cfs_atomic_read(&exp->exp_refcount) > 0);
- LASSERT(cfs_atomic_read(&exp->exp_refcount) < 0x5a5a5a);
if (cfs_atomic_dec_and_test(&exp->exp_refcount)) {
LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
imp->imp_obd->obd_name);
- LASSERT(cfs_atomic_read(&imp->imp_refcount) == 0);
+ LASSERT_ATOMIC_ZERO(&imp->imp_refcount);
ptlrpc_put_connection_superhack(imp->imp_connection);
struct obd_import *class_import_get(struct obd_import *import)
{
- LASSERT(cfs_atomic_read(&import->imp_refcount) >= 0);
- LASSERT(cfs_atomic_read(&import->imp_refcount) < 0x5a5a5a);
cfs_atomic_inc(&import->imp_refcount);
CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import,
cfs_atomic_read(&import->imp_refcount),
{
ENTRY;
- LASSERT(cfs_atomic_read(&imp->imp_refcount) > 0);
- LASSERT(cfs_atomic_read(&imp->imp_refcount) < 0x5a5a5a);
LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
+ LASSERT_ATOMIC_GE_LT(&imp->imp_refcount, 0, 0x5a5a5a);
CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
cfs_atomic_read(&imp->imp_refcount) - 1,
struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
cfs_atomic_inc(&ctx->cc_refcount);
return ctx;
}
struct ptlrpc_sec *sec = ctx->cc_sec;
LASSERT(sec);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount));
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
return;
{
struct ptlrpc_sec_policy *policy = sec->ps_policy;
- LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
- LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
+ LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
+ LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
LASSERT(policy->sp_cops->destroy_sec);
CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
{
- LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
if (sec->ps_policy->sp_cops->kill_sec) {
sec->ps_policy->sp_cops->kill_sec(sec);
struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
{
- if (sec) {
- LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ if (sec)
cfs_atomic_inc(&sec->ps_refcount);
- }
return sec;
}
void sptlrpc_sec_put(struct ptlrpc_sec *sec)
{
if (sec) {
- LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
- LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
-
sptlrpc_gc_del_sec(sec);
sec_cop_destroy_sec(sec);
}
{
struct ptlrpc_sec *old_sec;
- LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
cfs_spin_lock(&imp->imp_lock);
old_sec = imp->imp_sec;
int rc;
LASSERT(ctx);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
LASSERT(req->rq_reqmsg == NULL);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
policy = ctx->cc_sec->ps_policy;
rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
struct ptlrpc_sec_policy *policy;
LASSERT(ctx);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
return;
ENTRY;
LASSERT(ctx);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
ENTRY;
LASSERT(ctx);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
if (req->rq_repbuf == NULL)
return;
{
struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
- if (ctx == NULL)
- return;
-
- LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
- cfs_atomic_inc(&ctx->sc_refcount);
+ if (ctx != NULL)
+ cfs_atomic_inc(&ctx->sc_refcount);
}
void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
if (ctx == NULL)
return;
- LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
+ LASSERT_ATOMIC_POS(&ctx->sc_refcount);
if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
if (ctx->sc_policy->sp_sops->free_ctx)
ctx->sc_policy->sp_sops->free_ctx(ctx);
if (ctx == NULL)
return;
- LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
+ LASSERT_ATOMIC_POS(&ctx->sc_refcount);
if (ctx->sc_policy->sp_sops->invalidate_ctx)
ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
}
static
void null_free_rs(struct ptlrpc_reply_state *rs)
{
- LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+ LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1);
cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
if (!rs->rs_prealloc)