while (count++ < LRU_CAPA_DELETE_COUNT) {
ocapa = cfs_list_entry(node, struct obd_capa, c_list);
node = node->next;
- if (cfs_atomic_read(&ocapa->c_refc))
+ if (atomic_read(&ocapa->c_refc))
continue;
DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
{
ENTRY;
init_waitqueue_head(&anchor->csi_waitq);
- cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
- cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
+ atomic_set(&anchor->csi_sync_nr, nrpages);
+ atomic_set(&anchor->csi_barrier, nrpages > 0);
anchor->csi_sync_rc = 0;
EXIT;
}
LASSERT(timeout >= 0);
rc = l_wait_event(anchor->csi_waitq,
- cfs_atomic_read(&anchor->csi_sync_nr) == 0,
+ atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
CERROR("SYNC IO failed with error: %d, try to cancel "
"%d remaining pages\n",
- rc, cfs_atomic_read(&anchor->csi_sync_nr));
+ rc, atomic_read(&anchor->csi_sync_nr));
(void)cl_io_cancel(env, io, queue);
lwi = (struct l_wait_info) { 0 };
(void)l_wait_event(anchor->csi_waitq,
- cfs_atomic_read(&anchor->csi_sync_nr) == 0,
+ atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
} else {
rc = anchor->csi_sync_rc;
}
- LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
+ LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
cl_page_list_assume(env, io, queue);
/* wait until cl_sync_io_note() has done wakeup */
- while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
+ while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
#ifdef __KERNEL__
cpu_relax();
#endif
* ->{prepare,commit}_write(). Completion is used to signal the end of
* IO.
*/
- LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
- if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
+ LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
+ if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
wake_up_all(&anchor->csi_waitq);
/* it's safe to nuke or reuse anchor now */
- cfs_atomic_set(&anchor->csi_barrier, 0);
+ atomic_set(&anchor->csi_barrier, 0);
}
EXIT;
}
#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
#define CS_LOCK_INC(o, item) \
- cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+ atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
#define CS_LOCK_DEC(o, item) \
- cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+ atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
#define CS_LOCKSTATE_INC(o, state) \
- cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
+ atomic_inc(&cl_object_site(o)->cs_locks_state[state])
#define CS_LOCKSTATE_DEC(o, state) \
- cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
+ atomic_dec(&cl_object_site(o)->cs_locks_state[state])
#else
#define CS_LOCK_INC(o, item)
#define CS_LOCK_DEC(o, item)
const struct cl_lock *lock)
{
return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
- cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
+ atomic_read(&lock->cll_ref) >= lock->cll_holds &&
lock->cll_holds >= lock->cll_users &&
lock->cll_holds >= 0 &&
lock->cll_users >= 0 &&
{
int result;
- result = cfs_atomic_read(&lock->cll_ref) > 0 &&
+ result = atomic_read(&lock->cll_ref) > 0 &&
cl_lock_invariant_trusted(env, lock);
if (!result && env != NULL)
CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
"(%p/%d/%d) at %s():%d\n",
- prefix, lock, cfs_atomic_read(&lock->cll_ref),
+ prefix, lock, atomic_read(&lock->cll_ref),
lock->cll_guarder, lock->cll_depth,
lock->cll_state, lock->cll_error, lock->cll_holds,
lock->cll_users, lock->cll_flags,
LINVRNT(obj != NULL);
CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
- cfs_atomic_read(&lock->cll_ref), lock, RETIP);
+ atomic_read(&lock->cll_ref), lock, RETIP);
- if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
+ if (atomic_dec_and_test(&lock->cll_ref)) {
if (lock->cll_state == CLS_FREEING) {
LASSERT(cfs_list_empty(&lock->cll_linkage));
cl_lock_free(env, lock);
{
LINVRNT(cl_lock_invariant(NULL, lock));
CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
- cfs_atomic_read(&lock->cll_ref), lock, RETIP);
- cfs_atomic_inc(&lock->cll_ref);
+ atomic_read(&lock->cll_ref), lock, RETIP);
+ atomic_inc(&lock->cll_ref);
}
EXPORT_SYMBOL(cl_lock_get);
void cl_lock_get_trust(struct cl_lock *lock)
{
CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
- cfs_atomic_read(&lock->cll_ref), lock, RETIP);
- if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
+ atomic_read(&lock->cll_ref), lock, RETIP);
+ if (atomic_inc_return(&lock->cll_ref) == 1)
CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
}
EXPORT_SYMBOL(cl_lock_get_trust);
ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
if (lock != NULL) {
- cfs_atomic_set(&lock->cll_ref, 1);
+ atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
lock->cll_state = CLS_NEW;
cl_object_get(obj);
{
const struct cl_lock_slice *slice;
(*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
- lock, cfs_atomic_read(&lock->cll_ref),
+ lock, atomic_read(&lock->cll_ref),
lock->cll_state, lock->cll_error, lock->cll_holds,
lock->cll_users, lock->cll_flags);
cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
cs->cs_name = name;
for (i = 0; i < CS_NR; i++)
- cfs_atomic_set(&cs->cs_stats[i], 0);
+ atomic_set(&cs->cs_stats[i], 0);
}
int cache_stats_print(const struct cache_stats *cs,
nob += snprintf(page + nob, count - nob, "%5.5s:", cs->cs_name);
for (i = 0; i < CS_NR; i++)
nob += snprintf(page + nob, count - nob, "%8u",
- cfs_atomic_read(&cs->cs_stats[i]));
+ atomic_read(&cs->cs_stats[i]));
return nob;
}
cache_stats_init(&s->cs_pages, "pages");
cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
- cfs_atomic_set(&s->cs_pages_state[0], 0);
+ atomic_set(&s->cs_pages_state[0], 0);
for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- cfs_atomic_set(&s->cs_locks_state[i], 0);
+ atomic_set(&s->cs_locks_state[i], 0);
cl_env_percpu_refill();
}
return result;
static struct cache_stats cl_env_stats = {
.cs_name = "envs",
- .cs_stats = { CFS_ATOMIC_INIT(0), }
+ .cs_stats = { ATOMIC_INIT(0), }
};
/**
for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
nob += snprintf(page + nob, count - nob, "%s: %u ",
pstate[i],
- cfs_atomic_read(&site->cs_pages_state[i]));
+ atomic_read(&site->cs_pages_state[i]));
nob += snprintf(page + nob, count - nob, "]\n");
nob += cache_stats_print(&site->cs_locks, page + nob, count - nob, 0);
nob += snprintf(page + nob, count - nob, " [");
for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
nob += snprintf(page + nob, count - nob, "%s: %u ",
lstate[i],
- cfs_atomic_read(&site->cs_locks_state[i]));
+ atomic_read(&site->cs_locks_state[i]));
nob += snprintf(page + nob, count - nob, "]\n");
nob += cache_stats_print(&cl_env_stats, page + nob, count - nob, 0);
nob += snprintf(page + nob, count - nob, "\n");
};
#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
-#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
+#define CL_ENV_INC(counter) atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
#define CL_ENV_DEC(counter) do { \
- LASSERT(cfs_atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
- cfs_atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
+ LASSERT(atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
+ atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
} while (0)
#else
#define CL_ENV_INC(counter)
/* Disable page statistic by default due to huge performance penalty. */
#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
#define CS_PAGE_INC(o, item) \
- cfs_atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+ atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
#define CS_PAGE_DEC(o, item) \
- cfs_atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+ atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
#define CS_PAGESTATE_INC(o, state) \
- cfs_atomic_inc(&cl_object_site(o)->cs_pages_state[state])
+ atomic_inc(&cl_object_site(o)->cs_pages_state[state])
#define CS_PAGESTATE_DEC(o, state) \
- cfs_atomic_dec(&cl_object_site(o)->cs_pages_state[state])
+ atomic_dec(&cl_object_site(o)->cs_pages_state[state])
#else
#define CS_PAGE_INC(o, item)
#define CS_PAGE_DEC(o, item)
*/
static void cl_page_get_trust(struct cl_page *page)
{
- LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
- cfs_atomic_inc(&page->cp_ref);
+ LASSERT(atomic_read(&page->cp_ref) > 0);
+ atomic_inc(&page->cp_ref);
}
/**
__GFP_IO);
if (page != NULL) {
int result = 0;
- cfs_atomic_set(&page->cp_ref, 1);
+ atomic_set(&page->cp_ref, 1);
page->cp_obj = o;
cl_object_get(o);
lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
{
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
- cfs_atomic_read(&page->cp_ref));
+ atomic_read(&page->cp_ref));
- if (cfs_atomic_dec_and_test(&page->cp_ref)) {
+ if (atomic_dec_and_test(&page->cp_ref)) {
LASSERT(page->cp_state == CPS_FREEING);
- LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
+ LASSERT(atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, cfs_list_empty(&page->cp_batch));
/*
{
(*printer)(env, cookie,
"page@%p[%d %p %d %d %d %p %p %#x]\n",
- pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
+ pg, atomic_read(&pg->cp_ref), pg->cp_obj,
pg->cp_state, pg->cp_error, pg->cp_type,
pg->cp_owner, pg->cp_req, pg->cp_flags);
}
#ifndef __KERNEL__
/* liblustre workaround */
-cfs_atomic_t libcfs_kmemory = {0};
+atomic_t libcfs_kmemory = {0};
#endif
struct obd_device *obd_devs[MAX_OBD_DEVICES];
EXPORT_SYMBOL(obd_dump_on_eviction);
unsigned int obd_max_dirty_pages = 256;
EXPORT_SYMBOL(obd_max_dirty_pages);
-cfs_atomic_t obd_unstable_pages;
+atomic_t obd_unstable_pages;
EXPORT_SYMBOL(obd_unstable_pages);
-cfs_atomic_t obd_dirty_pages;
+atomic_t obd_dirty_pages;
EXPORT_SYMBOL(obd_dirty_pages);
unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
EXPORT_SYMBOL(obd_timeout);
int at_extra = 30;
EXPORT_SYMBOL(at_extra);
-cfs_atomic_t obd_dirty_transit_pages;
+atomic_t obd_dirty_transit_pages;
EXPORT_SYMBOL(obd_dirty_transit_pages);
char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE;
obd_memory_sum(),
obd_pages_sum() << PAGE_CACHE_SHIFT,
obd_pages_sum(),
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
return 1;
}
return 0;
snprintf(str, len - sizeof(*data), "%3d %s %s %s %s %d",
(int)index, status, obd->obd_type->typ_name,
obd->obd_name, obd->obd_uuid.uuid,
- cfs_atomic_read(&obd->obd_refcount));
+ atomic_read(&obd->obd_refcount));
err = obd_ioctl_popdata((void *)arg, data, len);
GOTO(out, err = 0);
" layout_gen %u, pool ["LOV_POOLNAMEF"]\n", lsm,
POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
lsm->lsm_stripe_size, lsm->lsm_stripe_count,
- cfs_atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen,
+ atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen,
lsm->lsm_pool_name);
}
EXPORT_SYMBOL(dump_lsm);
LCONSOLE(D_CONFIG, "%3d %s %s %s %s %d\n",
i, status, obd->obd_type->typ_name,
obd->obd_name, obd->obd_uuid.uuid,
- cfs_atomic_read(&obd->obd_refcount));
+ atomic_read(&obd->obd_refcount));
}
read_unlock(&obd_dev_lock);
return;
struct obd_export *class_export_get(struct obd_export *exp)
{
- cfs_atomic_inc(&exp->exp_refcount);
+ atomic_inc(&exp->exp_refcount);
CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp,
- cfs_atomic_read(&exp->exp_refcount));
+ atomic_read(&exp->exp_refcount));
return exp;
}
EXPORT_SYMBOL(class_export_get);
LASSERT(exp != NULL);
LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
- cfs_atomic_read(&exp->exp_refcount) - 1);
+ atomic_read(&exp->exp_refcount) - 1);
- if (cfs_atomic_dec_and_test(&exp->exp_refcount)) {
+ if (atomic_dec_and_test(&exp->exp_refcount)) {
LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
CDEBUG(D_IOCTL, "final put %p/%s\n",
exp, exp->exp_client_uuid.uuid);
export->exp_conn_cnt = 0;
export->exp_lock_hash = NULL;
export->exp_flock_hash = NULL;
- cfs_atomic_set(&export->exp_refcount, 2);
- cfs_atomic_set(&export->exp_rpc_count, 0);
- cfs_atomic_set(&export->exp_cb_count, 0);
- cfs_atomic_set(&export->exp_locks_count, 0);
+ atomic_set(&export->exp_refcount, 2);
+ atomic_set(&export->exp_rpc_count, 0);
+ atomic_set(&export->exp_cb_count, 0);
+ atomic_set(&export->exp_locks_count, 0);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
CFS_INIT_LIST_HEAD(&export->exp_locks_list);
spin_lock_init(&export->exp_locks_list_guard);
#endif
- cfs_atomic_set(&export->exp_replay_count, 0);
+ atomic_set(&export->exp_replay_count, 0);
export->exp_obd = obd;
CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
spin_lock_init(&export->exp_uncommitted_replies_lock);
struct obd_import *class_import_get(struct obd_import *import)
{
- cfs_atomic_inc(&import->imp_refcount);
+ atomic_inc(&import->imp_refcount);
CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import,
- cfs_atomic_read(&import->imp_refcount),
+ atomic_read(&import->imp_refcount),
import->imp_obd->obd_name);
return import;
}
LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
- cfs_atomic_read(&imp->imp_refcount) - 1,
+ atomic_read(&imp->imp_refcount) - 1,
imp->imp_obd->obd_name);
- if (cfs_atomic_dec_and_test(&imp->imp_refcount)) {
+ if (atomic_dec_and_test(&imp->imp_refcount)) {
CDEBUG(D_INFO, "final put import %p\n", imp);
obd_zombie_import_add(imp);
}
mutex_init(&imp->imp_sec_mutex);
init_waitqueue_head(&imp->imp_recovery_waitq);
- cfs_atomic_set(&imp->imp_refcount, 2);
- cfs_atomic_set(&imp->imp_unregistering, 0);
- cfs_atomic_set(&imp->imp_inflight, 0);
- cfs_atomic_set(&imp->imp_replay_inflight, 0);
- cfs_atomic_set(&imp->imp_inval_count, 0);
+ atomic_set(&imp->imp_refcount, 2);
+ atomic_set(&imp->imp_unregistering, 0);
+ atomic_set(&imp->imp_inflight, 0);
+ atomic_set(&imp->imp_replay_inflight, 0);
+ atomic_set(&imp->imp_inval_count, 0);
CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
class_handle_hash(&imp->imp_handle, &import_handle_ops);
exp->exp_in_recovery = 0;
spin_unlock(&exp->exp_lock);
LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
- cfs_atomic_dec(&obd->obd_connected_clients);
+ atomic_dec(&obd->obd_connected_clients);
}
/* if called during recovery then should update
spin_lock(&exp->exp_lock);
exp->exp_req_replay_needed = 0;
spin_unlock(&exp->exp_lock);
- LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
- cfs_atomic_dec(&obd->obd_req_replay_clients);
+ LASSERT(atomic_read(&obd->obd_req_replay_clients));
+ atomic_dec(&obd->obd_req_replay_clients);
}
/** Cleanup lock replay data */
if (exp->exp_lock_replay_needed) {
spin_lock(&exp->exp_lock);
exp->exp_lock_replay_needed = 0;
spin_unlock(&exp->exp_lock);
- LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
- cfs_atomic_dec(&obd->obd_lock_replay_clients);
+ LASSERT(atomic_read(&obd->obd_lock_replay_clients));
+ atomic_dec(&obd->obd_lock_replay_clients);
}
}
CDEBUG(D_HA, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: %p %s "LPU64"\n",
exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
- obd_export_nid2str(exp), cfs_atomic_read(&exp->exp_refcount),
- cfs_atomic_read(&exp->exp_rpc_count),
- cfs_atomic_read(&exp->exp_cb_count),
- cfs_atomic_read(&exp->exp_locks_count),
+ obd_export_nid2str(exp), atomic_read(&exp->exp_refcount),
+ atomic_read(&exp->exp_rpc_count),
+ atomic_read(&exp->exp_cb_count),
+ atomic_read(&exp->exp_locks_count),
exp->exp_disconnected, exp->exp_delayed, exp->exp_failed,
nreplies, first_reply, nreplies > 3 ? "..." : "",
exp->exp_last_committed);
"more than %d seconds. "
"The obd refcount = %d. Is it stuck?\n",
obd->obd_name, waited,
- cfs_atomic_read(&obd->obd_refcount));
+ atomic_read(&obd->obd_refcount));
dump_exports(obd, 1);
}
waited *= 2;
#else /* ! KERNEL */
-static cfs_atomic_t zombie_recur = CFS_ATOMIC_INIT(0);
+static atomic_t zombie_recur = ATOMIC_INIT(0);
static void *obd_zombie_impexp_work_cb;
static void *obd_zombie_impexp_idle_cb;
{
int rc = 0;
- if (cfs_atomic_inc_return(&zombie_recur) == 1) {
+ if (atomic_inc_return(&zombie_recur) == 1) {
obd_zombie_impexp_cull();
rc = 1;
}
- cfs_atomic_dec(&zombie_recur);
+ atomic_dec(&zombie_recur);
return rc;
}
#include <obd_support.h>
#define lustre_get_group_info(group_info) do { \
- cfs_atomic_inc(&(group_info)->usage); \
+ atomic_inc(&(group_info)->usage); \
} while (0)
#define lustre_put_group_info(group_info) do { \
- if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+ if (atomic_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
return seq_printf(p, "%3d %s %s %s %s %d\n",
(int)index, status, obd->obd_type->typ_name,
obd->obd_name, obd->obd_uuid.uuid,
- cfs_atomic_read(&obd->obd_refcount));
+ atomic_read(&obd->obd_refcount));
}
struct seq_operations obd_device_list_sops = {
init_rwsem(&loghandle->lgh_lock);
spin_lock_init(&loghandle->lgh_hdr_lock);
CFS_INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
- cfs_atomic_set(&loghandle->lgh_refcount, 1);
+ atomic_set(&loghandle->lgh_refcount, 1);
return loghandle;
}
void llog_handle_get(struct llog_handle *loghandle)
{
- cfs_atomic_inc(&loghandle->lgh_refcount);
+ atomic_inc(&loghandle->lgh_refcount);
}
void llog_handle_put(struct llog_handle *loghandle)
{
- LASSERT(cfs_atomic_read(&loghandle->lgh_refcount) > 0);
- if (cfs_atomic_dec_and_test(&loghandle->lgh_refcount))
+ LASSERT(atomic_read(&loghandle->lgh_refcount) > 0);
+ if (atomic_dec_and_test(&loghandle->lgh_refcount))
llog_free_handle(loghandle);
}
return NULL;
ctxt->loc_obd = obd;
- cfs_atomic_set(&ctxt->loc_refcount, 1);
+ atomic_set(&ctxt->loc_refcount, 1);
return ctxt;
}
int rc = 0;
spin_lock(&olg->olg_lock);
- if (!cfs_atomic_dec_and_test(&ctxt->loc_refcount)) {
+ if (!atomic_dec_and_test(&ctxt->loc_refcount)) {
spin_unlock(&olg->olg_lock);
return rc;
}
/*
* Banlance the ctxt get when calling llog_cleanup()
*/
- LASSERT(cfs_atomic_read(&ctxt->loc_refcount) < LI_POISON);
- LASSERT(cfs_atomic_read(&ctxt->loc_refcount) > 1);
+ LASSERT(atomic_read(&ctxt->loc_refcount) < LI_POISON);
+ LASSERT(atomic_read(&ctxt->loc_refcount) > 1);
llog_ctxt_put(ctxt);
/*
cfs_list_for_each_entry(ls, &ls_list_head, ls_linkage) {
if (ls->ls_osd == dev) {
- cfs_atomic_inc(&ls->ls_refcount);
+ atomic_inc(&ls->ls_refcount);
ret = ls;
break;
}
if (ls == NULL)
GOTO(out_ls, ls = ERR_PTR(-ENOMEM));
- cfs_atomic_set(&ls->ls_refcount, 1);
+ atomic_set(&ls->ls_refcount, 1);
CFS_INIT_LIST_HEAD(&ls->ls_los_list);
mutex_init(&ls->ls_los_mutex);
void ls_device_put(const struct lu_env *env, struct ls_device *ls)
{
LASSERT(env);
- if (!cfs_atomic_dec_and_test(&ls->ls_refcount))
+ if (!atomic_dec_and_test(&ls->ls_refcount))
return;
mutex_lock(&ls_list_mutex);
- if (cfs_atomic_read(&ls->ls_refcount) == 0) {
+ if (atomic_read(&ls->ls_refcount) == 0) {
LASSERT(cfs_list_empty(&ls->ls_los_list));
cfs_list_del(&ls->ls_linkage);
lu_site_purge(env, ls->ls_top_dev.dd_lu_dev.ld_site, ~0);
cfs_list_for_each_entry(los, &ls->ls_los_list, los_list) {
if (los->los_seq == seq) {
- cfs_atomic_inc(&los->los_refcount);
+ atomic_inc(&los->los_refcount);
ret = los;
break;
}
void dt_los_put(struct local_oid_storage *los)
{
- if (cfs_atomic_dec_and_test(&los->los_refcount))
+ if (atomic_dec_and_test(&los->los_refcount))
/* should never happen, only local_oid_storage_fini should
* drop refcount to zero */
LBUG();
if (*los == NULL)
GOTO(out, rc = -ENOMEM);
- cfs_atomic_set(&(*los)->los_refcount, 1);
+ atomic_set(&(*los)->los_refcount, 1);
mutex_init(&(*los)->los_id_lock);
(*los)->los_dev = &ls->ls_top_dev;
- cfs_atomic_inc(&ls->ls_refcount);
+ atomic_inc(&ls->ls_refcount);
cfs_list_add(&(*los)->los_list, &ls->ls_los_list);
/* Use {seq, 0, 0} to create the LAST_ID file for every
out_los:
if (rc != 0) {
cfs_list_del(&(*los)->los_list);
- cfs_atomic_dec(&ls->ls_refcount);
+ atomic_dec(&ls->ls_refcount);
OBD_FREE_PTR(*los);
*los = NULL;
if (o != NULL && !IS_ERR(o))
{
struct ls_device *ls;
- if (!cfs_atomic_dec_and_test(&los->los_refcount))
+ if (!atomic_dec_and_test(&los->los_refcount))
return;
LASSERT(env);
ls = dt2ls_dev(los->los_dev);
mutex_lock(&ls->ls_los_mutex);
- if (cfs_atomic_read(&los->los_refcount) > 0) {
+ if (atomic_read(&los->los_refcount) > 0) {
mutex_unlock(&ls->ls_los_mutex);
return;
}
/* all initialized ls_devices on this node linked by this */
cfs_list_t ls_linkage;
/* how many handle's reference this local storage */
- cfs_atomic_t ls_refcount;
+ atomic_t ls_refcount;
/* underlaying OSD device */
struct dt_device *ls_osd;
/* list of all local OID storages */
struct job_stat {
cfs_hlist_node_t js_hash;
cfs_list_t js_list;
- cfs_atomic_t js_refcount;
+ atomic_t js_refcount;
char js_jobid[JOBSTATS_JOBID_SIZE];
time_t js_timestamp; /* seconds */
struct lprocfs_stats *js_stats;
{
struct job_stat *job;
job = cfs_hlist_entry(hnode, struct job_stat, js_hash);
- cfs_atomic_inc(&job->js_refcount);
+ atomic_inc(&job->js_refcount);
}
static void job_free(struct job_stat *job)
job->js_jobstats = jobs;
CFS_INIT_HLIST_NODE(&job->js_hash);
CFS_INIT_LIST_HEAD(&job->js_list);
- cfs_atomic_set(&job->js_refcount, 1);
+ atomic_set(&job->js_refcount, 1);
return job;
}
struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
struct obd_device *obd = dp->data;
- cfs_atomic_inc(&obd->obd_evict_inprogress);
+ atomic_inc(&obd->obd_evict_inprogress);
return 0;
}
struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
struct obd_device *obd = dp->data;
- cfs_atomic_dec(&obd->obd_evict_inprogress);
+ atomic_dec(&obd->obd_evict_inprogress);
wake_up(&obd->obd_evict_inprogress_waitq);
return 0;
int lprocfs_atomic_seq_show(struct seq_file *m, void *data)
{
- cfs_atomic_t *atom = data;
+ atomic_t *atom = data;
LASSERT(atom != NULL);
return seq_printf(m, "%d\n", atomic_read(atom));
}
lprocfs_atomic_seq_write(struct file *file, const char *buffer,
size_t count, loff_t *off)
{
- cfs_atomic_t *atm = ((struct seq_file *)file->private_data)->private;
+ atomic_t *atm = ((struct seq_file *)file->private_data)->private;
int val = 0;
int rc;
if (val <= 0)
return -ERANGE;
- cfs_atomic_set(atm, val);
+ atomic_set(atm, val);
return count;
}
EXPORT_SYMBOL(lprocfs_atomic_seq_write);
int lprocfs_rd_atomic(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- cfs_atomic_t *atom = data;
+ atomic_t *atom = data;
LASSERT(atom != NULL);
*eof = 1;
- return snprintf(page, count, "%d\n", cfs_atomic_read(atom));
+ return snprintf(page, count, "%d\n", atomic_read(atom));
}
EXPORT_SYMBOL(lprocfs_rd_atomic);
int lprocfs_wr_atomic(struct file *file, const char *buffer,
unsigned long count, void *data)
{
- cfs_atomic_t *atm = data;
+ atomic_t *atm = data;
int val = 0;
int rc;
if (val <= 0)
return -ERANGE;
- cfs_atomic_set(atm, val);
+ atomic_set(atm, val);
return count;
}
EXPORT_SYMBOL(lprocfs_wr_atomic);
libcfs_nid2str(imp->imp_connection->c_peer.nid),
imp->imp_conn_cnt,
imp->imp_generation,
- cfs_atomic_read(&imp->imp_inval_count));
+ atomic_read(&imp->imp_inval_count));
spin_unlock(&imp->imp_lock);
if (obd->obd_svc_stats == NULL)
" unregistering: %u\n"
" timeouts: %u\n"
" avg_waittime: "LPU64" %s\n",
- cfs_atomic_read(&imp->imp_inflight),
- cfs_atomic_read(&imp->imp_unregistering),
- cfs_atomic_read(&imp->imp_timeouts),
+ atomic_read(&imp->imp_inflight),
+ atomic_read(&imp->imp_unregistering),
+ atomic_read(&imp->imp_timeouts),
ret.lc_sum, header->lc_units);
k = 0;
CDEBUG(D_CONFIG, "stat %p - data %p/%p\n", client_stat,
client_stat->nid_proc, client_stat->nid_stats);
- LASSERTF(cfs_atomic_read(&client_stat->nid_exp_ref_count) == 0,
+ LASSERTF(atomic_read(&client_stat->nid_exp_ref_count) == 0,
"nid %s:count %d\n", libcfs_nid2str(client_stat->nid),
atomic_read(&client_stat->nid_exp_ref_count));
struct nid_stat *stat = obj;
ENTRY;
- CDEBUG(D_INFO,"refcnt %d\n", cfs_atomic_read(&stat->nid_exp_ref_count));
- if (cfs_atomic_read(&stat->nid_exp_ref_count) == 1) {
+ CDEBUG(D_INFO,"refcnt %d\n", atomic_read(&stat->nid_exp_ref_count));
+ if (atomic_read(&stat->nid_exp_ref_count) == 1) {
/* object has only hash references. */
spin_lock(&stat->nid_obd->obd_nid_lock);
cfs_list_move(&stat->nid_list, data);
new_stat->nid = *nid;
new_stat->nid_obd = exp->exp_obd;
/* we need set default refcount to 1 to balance obd_disconnect */
- cfs_atomic_set(&new_stat->nid_exp_ref_count, 1);
+ atomic_set(&new_stat->nid_exp_ref_count, 1);
old_stat = cfs_hash_findadd_unique(obd->obd_nid_stats_hash,
nid, &new_stat->nid_hash);
CDEBUG(D_INFO, "Found stats %p for nid %s - ref %d\n",
old_stat, libcfs_nid2str(*nid),
- cfs_atomic_read(&new_stat->nid_exp_ref_count));
+ atomic_read(&new_stat->nid_exp_ref_count));
/* Return -EALREADY here so that we know that the /proc
* entry already has been created */
cfs_time_current_sec()) <= 0)
goto out;
if (lprocfs_obd_snprintf(&page, size, &len,"connected_clients: %d/%d\n",
- cfs_atomic_read(&obd->obd_connected_clients),
+ atomic_read(&obd->obd_connected_clients),
obd->obd_max_recoverable_clients) <= 0)
goto out;
/* Number of clients that have completed recovery */
if (lprocfs_obd_snprintf(&page, size, &len,"req_replay_clients: %d\n",
- cfs_atomic_read(&obd->obd_req_replay_clients))
+ atomic_read(&obd->obd_req_replay_clients))
<= 0)
goto out;
if (lprocfs_obd_snprintf(&page, size, &len,"lock_repay_clients: %d\n",
- cfs_atomic_read(&obd->obd_lock_replay_clients))
+ atomic_read(&obd->obd_lock_replay_clients))
<=0)
goto out;
if (lprocfs_obd_snprintf(&page, size, &len,"completed_clients: %d\n",
- cfs_atomic_read(&obd->obd_connected_clients) -
- cfs_atomic_read(&obd->obd_lock_replay_clients))
+ atomic_read(&obd->obd_connected_clients) -
+ atomic_read(&obd->obd_lock_replay_clients))
<=0)
goto out;
if (lprocfs_obd_snprintf(&page, size, &len,"evicted_clients: %d\n",
LASSERT(top->loh_hash.next == NULL
&& top->loh_hash.pprev == NULL);
LASSERT(cfs_list_empty(&top->loh_lru));
- if (!cfs_atomic_dec_and_test(&top->loh_ref))
+ if (!atomic_dec_and_test(&top->loh_ref))
return;
cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
- LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
+ LASSERT(atomic_read(&h->loh_ref) == 0);
cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
LASSERT(bd.bd_bucket == bd2.bd_bucket);
const struct lu_object_header *hdr)
{
(*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
- hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
+ hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
PFID(&hdr->loh_fid),
cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
struct lu_object_header *h;
h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
+ if (atomic_add_return(1, &h->loh_ref) == 1) {
struct lu_site_bkt_data *bkt;
cfs_hash_bd_t bd;
*/
void lu_device_get(struct lu_device *d)
{
- cfs_atomic_inc(&d->ld_ref);
+ atomic_inc(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_get);
*/
void lu_device_put(struct lu_device *d)
{
- LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
- cfs_atomic_dec(&d->ld_ref);
+ LASSERT(atomic_read(&d->ld_ref) > 0);
+ atomic_dec(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_put);
if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
t->ldt_ops->ldto_start(t);
memset(d, 0, sizeof *d);
- cfs_atomic_set(&d->ld_ref, 0);
+ atomic_set(&d->ld_ref, 0);
d->ld_type = t;
lu_ref_init(&d->ld_reference);
CFS_INIT_LIST_HEAD(&d->ld_linkage);
}
lu_ref_fini(&d->ld_reference);
- LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
- "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
+ LASSERTF(atomic_read(&d->ld_ref) == 0,
+ "Refcount is %u\n", atomic_read(&d->ld_ref));
LASSERT(t->ldt_device_nr > 0);
if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
t->ldt_ops->ldto_stop(t);
int lu_object_header_init(struct lu_object_header *h)
{
memset(h, 0, sizeof *h);
- cfs_atomic_set(&h->loh_ref, 1);
+ atomic_set(&h->loh_ref, 1);
CFS_INIT_HLIST_NODE(&h->loh_hash);
CFS_INIT_LIST_HEAD(&h->loh_lru);
CFS_INIT_LIST_HEAD(&h->loh_layers);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
if (lu_keys[i] == NULL) {
key->lct_index = i;
- cfs_atomic_set(&key->lct_used, 1);
+ atomic_set(&key->lct_used, 1);
lu_keys[i] = key;
lu_ref_init(&key->lct_reference);
result = 0;
key = lu_keys[index];
LASSERT(key != NULL);
LASSERT(key->lct_fini != NULL);
- LASSERT(cfs_atomic_read(&key->lct_used) > 1);
+ LASSERT(atomic_read(&key->lct_used) > 1);
key->lct_fini(ctx, key, ctx->lc_value[index]);
lu_ref_del(&key->lct_reference, "ctx", ctx);
- cfs_atomic_dec(&key->lct_used);
+ atomic_dec(&key->lct_used);
LASSERT(key->lct_owner != NULL);
if ((ctx->lc_tags & LCT_NOREF) == 0) {
*/
void lu_context_key_degister(struct lu_context_key *key)
{
- LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
+ LASSERT(atomic_read(&key->lct_used) >= 1);
LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
lu_context_key_quiesce(key);
}
spin_unlock(&lu_keys_guard);
- LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+ LASSERTF(atomic_read(&key->lct_used) == 1,
"key has instances: %d\n",
- cfs_atomic_read(&key->lct_used));
+ atomic_read(&key->lct_used));
}
EXPORT_SYMBOL(lu_context_key_degister);
if (!(ctx->lc_tags & LCT_NOREF))
try_module_get(key->lct_owner);
lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
- cfs_atomic_inc(&key->lct_used);
+ atomic_inc(&key->lct_used);
/*
* This is the only place in the code, where an
* element of ctx->lc_value[] array is set to non-NULL
CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
i, key, key->lct_tags,
key->lct_init, key->lct_fini, key->lct_exit,
- key->lct_index, cfs_atomic_read(&key->lct_used),
+ key->lct_index, atomic_read(&key->lct_used),
key->lct_owner ? key->lct_owner->name : "",
key->lct_owner);
lu_ref_print(&key->lct_reference);
/* Detach drops this */
spin_lock(&obd->obd_dev_lock);
- cfs_atomic_set(&obd->obd_refcount, 1);
+ atomic_set(&obd->obd_refcount, 1);
spin_unlock(&obd->obd_dev_lock);
lu_ref_init(&obd->obd_reference);
lu_ref_add(&obd->obd_reference, "attach", obd);
obd->obd_attached = 1;
CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n",
- obd->obd_minor, typename, cfs_atomic_read(&obd->obd_refcount));
+ obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
RETURN(0);
out:
if (obd != NULL) {
/* The three references that should be remaining are the
* obd_self_export and the attach and setup references. */
- if (cfs_atomic_read(&obd->obd_refcount) > 3) {
+ if (atomic_read(&obd->obd_refcount) > 3) {
/* refcounf - 3 might be the number of real exports
(excluding self export). But class_incref is called
by other things as well, so don't count on it. */
CDEBUG(D_IOCTL, "%s: forcing exports to disconnect: %d\n",
- obd->obd_name, cfs_atomic_read(&obd->obd_refcount) - 3);
+ obd->obd_name, atomic_read(&obd->obd_refcount) - 3);
dump_exports(obd, 0);
class_disconnect_exports(obd);
}
const char *scope, const void *source)
{
lu_ref_add_atomic(&obd->obd_reference, scope, source);
- cfs_atomic_inc(&obd->obd_refcount);
+ atomic_inc(&obd->obd_refcount);
CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd,
- cfs_atomic_read(&obd->obd_refcount));
+ atomic_read(&obd->obd_refcount));
return obd;
}
int refs;
spin_lock(&obd->obd_dev_lock);
- cfs_atomic_dec(&obd->obd_refcount);
- refs = cfs_atomic_read(&obd->obd_refcount);
+ atomic_dec(&obd->obd_refcount);
+ refs = atomic_read(&obd->obd_refcount);
spin_unlock(&obd->obd_dev_lock);
lu_ref_del(&obd->obd_reference, scope, source);
GOTO(out_free, rc);
/* Re-using an existing MGC */
- cfs_atomic_inc(&obd->u.cli.cl_mgc_refcount);
+ atomic_inc(&obd->u.cli.cl_mgc_refcount);
/* IR compatibility check, only for clients */
if (lmd_is_client(lsi->lsi_lmd)) {
/* Keep a refcount of servers/clients who started with "mount",
so we know when we can get rid of the mgc. */
- cfs_atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
+ atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
/* Try all connections, but only once. */
recov_bk = 1;
lsi->lsi_mgc = NULL;
mutex_lock(&mgc_start_lock);
- LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
- if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
+ LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
+ if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
will call in here. */
CDEBUG(D_MOUNT, "mgc still has %d references.\n",
- cfs_atomic_read(&obd->u.cli.cl_mgc_refcount));
+ atomic_read(&obd->u.cli.cl_mgc_refcount));
GOTO(out, rc = -EBUSY);
}
lsi->lsi_lmd->lmd_recovery_time_hard = 0;
s2lsi_nocast(sb) = lsi;
/* we take 1 extra ref for our setup */
- cfs_atomic_set(&lsi->lsi_mounts, 1);
+ atomic_set(&lsi->lsi_mounts, 1);
/* Default umount style */
lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
/* someone didn't call server_put_mount. */
- LASSERT(cfs_atomic_read(&lsi->lsi_mounts) == 0);
+ LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
if (lsi->lsi_lmd != NULL) {
if (lsi->lsi_lmd->lmd_dev != NULL)
LASSERT(lsi != NULL);
- CDEBUG(D_MOUNT, "put %p %d\n", sb, cfs_atomic_read(&lsi->lsi_mounts));
- if (cfs_atomic_dec_and_test(&lsi->lsi_mounts)) {
+ CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
+ if (atomic_dec_and_test(&lsi->lsi_mounts)) {
if (IS_SERVER(lsi) && lsi->lsi_osd_exp) {
lu_device_put(&lsi->lsi_dt_dev->dd_lu_dev);
lsi->lsi_osd_exp->exp_obd->obd_lvfs_ctxt.dt = NULL;
}
lsi = s2lsi(lmi->lmi_sb);
- cfs_atomic_inc(&lsi->lsi_mounts);
+ atomic_inc(&lsi->lsi_mounts);
CDEBUG(D_MOUNT, "get mount %p from %s, refs=%d\n", lmi->lmi_sb,
- name, cfs_atomic_read(&lsi->lsi_mounts));
+ name, atomic_read(&lsi->lsi_mounts));
RETURN(lmi);
}
lsi = s2lsi(lmi->lmi_sb);
CDEBUG(D_MOUNT, "put mount %p from %s, refs=%d\n",
- lmi->lmi_sb, name, cfs_atomic_read(&lsi->lsi_mounts));
+ lmi->lmi_sb, name, atomic_read(&lsi->lsi_mounts));
if (lustre_put_lsi(lmi->lmi_sb))
CDEBUG(D_MOUNT, "Last put of mount %p from %s\n",
}
}
- cfs_atomic_add(*pages, &obd->u.echo.eo_prep);
+ atomic_add(*pages, &obd->u.echo.eo_prep);
if (cmd & OBD_BRW_READ)
lprocfs_counter_add(obd->obd_stats, LPROC_ECHO_READ_BYTES,
tot_bytes);
CDEBUG(D_PAGE, "%d pages allocated after prep\n",
- cfs_atomic_read(&obd->u.echo.eo_prep));
+ atomic_read(&obd->u.echo.eo_prep));
RETURN(0);
* lose the extra ref gained above */
OBD_PAGE_FREE(res[i].page);
res[i].page = NULL;
- cfs_atomic_dec(&obd->u.echo.eo_prep);
+ atomic_dec(&obd->u.echo.eo_prep);
}
return rc;
}
- cfs_atomic_sub(pgs, &obd->u.echo.eo_prep);
+ atomic_sub(pgs, &obd->u.echo.eo_prep);
CDEBUG(D_PAGE, "%d pages remain after commit\n",
- cfs_atomic_read(&obd->u.echo.eo_prep));
+ atomic_read(&obd->u.echo.eo_prep));
RETURN(rc);
commitrw_cleanup:
- cfs_atomic_sub(pgs, &obd->u.echo.eo_prep);
+ atomic_sub(pgs, &obd->u.echo.eo_prep);
CERROR("cleaning up %d pages (%d obdos)\n",
niocount - pgs - 1, objcount);
/* NB see comment above regarding persistent pages */
OBD_PAGE_FREE(page);
- cfs_atomic_dec(&obd->u.echo.eo_prep);
+ atomic_dec(&obd->u.echo.eo_prep);
}
return rc;
}
ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
obd->obd_namespace = NULL;
- leaked = cfs_atomic_read(&obd->u.echo.eo_prep);
+ leaked = atomic_read(&obd->u.echo.eo_prep);
if (leaked != 0)
CERROR("%d prep/commitrw pages leaked\n", leaked);
struct echo_device *eo_dev;
cfs_list_t eo_obj_chain;
struct lov_stripe_md *eo_lsm;
- cfs_atomic_t eo_npages;
+ atomic_t eo_npages;
int eo_deleted;
};
cfs_list_t el_chain;
struct echo_object *el_object;
__u64 el_cookie;
- cfs_atomic_t el_refcount;
+ atomic_t el_refcount;
};
static int echo_client_setup(const struct lu_env *env,
struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
ENTRY;
- cfs_atomic_dec(&eco->eo_npages);
+ atomic_dec(&eco->eo_npages);
page_cache_release(slice->cpl_page->cp_vmpage);
EXIT;
}
page_cache_get(page->cp_vmpage);
mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
- cfs_atomic_inc(&eco->eo_npages);
+ atomic_inc(&eco->eo_npages);
RETURN(0);
}
cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
el->el_object = cl2echo_obj(obj);
CFS_INIT_LIST_HEAD(&el->el_chain);
- cfs_atomic_set(&el->el_refcount, 0);
+ atomic_set(&el->el_refcount, 0);
}
RETURN(el == NULL ? -ENOMEM : 0);
}
}
eco->eo_dev = ed;
- cfs_atomic_set(&eco->eo_npages, 0);
+ atomic_set(&eco->eo_npages, 0);
cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
spin_lock(&ec->ec_lock);
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
ENTRY;
- LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
+ LASSERT(atomic_read(&eco->eo_npages) == 0);
spin_lock(&ec->ec_lock);
cfs_list_del_init(&eco->eo_obj_chain);
cfs_list_add(&el->el_chain, &ec->ec_locks);
el->el_cookie = ++ec->ec_unique;
}
- cfs_atomic_inc(&el->el_refcount);
+ atomic_inc(&el->el_refcount);
*cookie = el->el_cookie;
spin_unlock(&ec->ec_lock);
} else {
CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
found = (ecl->el_cookie == cookie);
if (found) {
- if (cfs_atomic_dec_and_test(&ecl->el_refcount))
+ if (atomic_dec_and_test(&ecl->el_refcount))
cfs_list_del_init(&ecl->el_chain);
else
still_used = 1;
RETURN(-EBUSY);
}
- LASSERT(cfs_atomic_read(&ec->ec_exp->exp_refcount) > 0);
+ LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
rc = obd_disconnect(ec->ec_exp);
if (rc != 0)
CERROR("fail to disconnect device: %d\n", rc);