AC_MSG_CHECKING([whether to enable page state tracking])
AC_MSG_RESULT([$enable_pgstat_track])
if test x$enable_pgstat_track = xyes ; then
- AC_DEFINE([LUSTRE_PAGESTATE_TRACKING], 1,
+ AC_DEFINE([CONFIG_DEBUG_PAGESTATE_TRACKING], 1,
[enable page state tracking code])
fi
/* @} cl_req */
+enum cache_stats_item {
+ /** how many cache lookups were performed */
+ CS_lookup = 0,
+ /** how many times cache lookup resulted in a hit */
+ CS_hit,
+ /** how many entities are in the cache right now */
+ CS_total,
+ /** how many entities in the cache are actively used (and cannot be
+ * evicted) right now */
+ CS_busy,
+ /** how many entities were created at all */
+ CS_create,
+ CS_NR
+};
+
+#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
+
/**
* Stats for a generic cache (similar to inode, lu_object, etc. caches).
*/
struct cache_stats {
const char *cs_name;
- /** how many entities were created at all */
- cfs_atomic_t cs_created;
- /** how many cache lookups were performed */
- cfs_atomic_t cs_lookup;
- /** how many times cache lookup resulted in a hit */
- cfs_atomic_t cs_hit;
- /** how many entities are in the cache right now */
- cfs_atomic_t cs_total;
- /** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
- cfs_atomic_t cs_busy;
+ cfs_atomic_t cs_stats[CS_NR];
};
/** These are not exported so far */
}
};
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_LOCK_INC(o, item) \
+ cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCK_DEC(o, item) \
+ cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCKSTATE_INC(o, state) \
+ cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
+#define CS_LOCKSTATE_DEC(o, state) \
+ cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
+#else
+#define CS_LOCK_INC(o, item)
+#define CS_LOCK_DEC(o, item)
+#define CS_LOCKSTATE_INC(o, state)
+#define CS_LOCKSTATE_DEC(o, state)
+#endif
+
/**
* Basic lock invariant that is maintained at all times. Caller either has a
* reference to \a lock, or somehow assures that \a lock cannot be freed.
cfs_list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
- cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
- cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
+ CS_LOCK_DEC(obj, total);
+ CS_LOCKSTATE_DEC(obj, lock->cll_state);
lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
cl_object_put(env, obj);
lu_ref_fini(&lock->cll_reference);
LASSERT(cfs_list_empty(&lock->cll_linkage));
cl_lock_free(env, lock);
}
- cfs_atomic_dec(&site->cs_locks.cs_busy);
+ CS_LOCK_DEC(obj, busy);
}
EXIT;
}
*/
void cl_lock_get_trust(struct cl_lock *lock)
{
- struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
cfs_atomic_read(&lock->cll_ref), lock, RETIP);
if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
- cfs_atomic_inc(&site->cs_locks.cs_busy);
+ CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
}
EXPORT_SYMBOL(cl_lock_get_trust);
{
struct cl_lock *lock;
struct lu_object_header *head;
- struct cl_site *site = cl_object_site(obj);
ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
cfs_waitq_init(&lock->cll_wq);
head = obj->co_lu.lo_header;
- cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
- cfs_atomic_inc(&site->cs_locks.cs_total);
- cfs_atomic_inc(&site->cs_locks.cs_created);
+ CS_LOCKSTATE_INC(obj, CLS_NEW);
+ CS_LOCK_INC(obj, total);
+ CS_LOCK_INC(obj, create);
cl_lock_lockdep_init(lock);
cfs_list_for_each_entry(obj, &head->loh_layers,
co_lu.lo_linkage) {
{
struct cl_lock *lock;
struct cl_object_header *head;
- struct cl_site *site;
ENTRY;
head = cl_object_header(obj);
- site = cl_object_site(obj);
LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
- cfs_atomic_inc(&site->cs_locks.cs_lookup);
+ CS_LOCK_INC(obj, lookup);
cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
matched);
if (matched) {
cl_lock_get_trust(lock);
- cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
+ CS_LOCK_INC(obj, hit);
RETURN(lock);
}
}
struct cl_object_header *head;
struct cl_object *obj;
struct cl_lock *lock;
- struct cl_site *site;
ENTRY;
obj = need->cld_obj;
head = cl_object_header(obj);
- site = cl_object_site(obj);
spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
cfs_list_add_tail(&lock->cll_linkage,
&head->coh_locks);
spin_unlock(&head->coh_lock_guard);
- cfs_atomic_inc(&site->cs_locks.cs_busy);
+ CS_LOCK_INC(obj, busy);
} else {
spin_unlock(&head->coh_lock_guard);
/*
void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state)
{
- struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
ENTRY;
LASSERT(lock->cll_state <= state ||
(lock->cll_state == CLS_CACHED &&
lock->cll_state == CLS_INTRANSIT);
if (lock->cll_state != state) {
- cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
- cfs_atomic_inc(&site->cs_locks_state[state]);
+ CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
+ CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
cl_lock_state_signal(env, lock, state);
lock->cll_state = state;
void cache_stats_init(struct cache_stats *cs, const char *name)
{
+ int i;
+
cs->cs_name = name;
- cfs_atomic_set(&cs->cs_lookup, 0);
- cfs_atomic_set(&cs->cs_hit, 0);
- cfs_atomic_set(&cs->cs_total, 0);
- cfs_atomic_set(&cs->cs_busy, 0);
+ for (i = 0; i < CS_NR; i++)
+ cfs_atomic_set(&cs->cs_stats[i], 0);
}
int cache_stats_print(const struct cache_stats *cs,
char *page, int count, int h)
{
- int nob = 0;
-/*
- lookup hit total cached create
- env: ...... ...... ...... ...... ......
-*/
- if (h)
- nob += snprintf(page, count,
- " lookup hit total busy create\n");
-
- nob += snprintf(page + nob, count - nob,
- "%5.5s: %6u %6u %6u %6u %6u",
- cs->cs_name,
- cfs_atomic_read(&cs->cs_lookup),
- cfs_atomic_read(&cs->cs_hit),
- cfs_atomic_read(&cs->cs_total),
- cfs_atomic_read(&cs->cs_busy),
- cfs_atomic_read(&cs->cs_created));
- return nob;
+ int nob = 0;
+ int i;
+ /*
+ * lookup hit total cached create
+ * env: ...... ...... ...... ...... ......
+ */
+ if (h) {
+ const char *names[CS_NR] = CS_NAMES;
+
+ nob += snprintf(page + nob, count - nob, "%6s", " ");
+ for (i = 0; i < CS_NR; i++)
+ nob += snprintf(page + nob, count - nob,
+ "%8s", names[i]);
+ nob += snprintf(page + nob, count - nob, "\n");
+ }
+
+ nob += snprintf(page + nob, count - nob, "%5.5s:", cs->cs_name);
+ for (i = 0; i < CS_NR; i++)
+ nob += snprintf(page + nob, count - nob, "%8u",
+ cfs_atomic_read(&cs->cs_stats[i]));
+ return nob;
}
/**
static struct cache_stats cl_env_stats = {
.cs_name = "envs",
- .cs_created = CFS_ATOMIC_INIT(0),
- .cs_lookup = CFS_ATOMIC_INIT(0),
- .cs_hit = CFS_ATOMIC_INIT(0),
- .cs_total = CFS_ATOMIC_INIT(0),
- .cs_busy = CFS_ATOMIC_INIT(0)
+ .cs_stats = { CFS_ATOMIC_INIT(0), }
};
/**
void *ce_debug;
};
-#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.counter)
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CL_ENV_INC(counter) cfs_atomic_inc(&cl_env_stats.cs_stats[CS_##counter])
-#define CL_ENV_DEC(counter) \
- do { \
- LASSERT(cfs_atomic_read(&cl_env_stats.counter) > 0); \
- cfs_atomic_dec(&cl_env_stats.counter); \
- } while (0)
+#define CL_ENV_DEC(counter) do { \
+ LASSERT(cfs_atomic_read(&cl_env_stats.cs_stats[CS_##counter]) > 0); \
+ cfs_atomic_dec(&cl_env_stats.cs_stats[CS_##counter]); \
+} while (0)
+#else
+#define CL_ENV_INC(counter)
+#define CL_ENV_DEC(counter)
+#endif
static void cl_env_init0(struct cl_env *cle, void *debug)
{
cle->ce_ref = 1;
cle->ce_debug = debug;
- CL_ENV_INC(cs_busy);
+ CL_ENV_INC(busy);
}
OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
env = ERR_PTR(rc);
} else {
- CL_ENV_INC(cs_created);
- CL_ENV_INC(cs_total);
+ CL_ENV_INC(create);
+ CL_ENV_INC(total);
}
} else
env = ERR_PTR(-ENOMEM);
static void cl_env_fini(struct cl_env *cle)
{
- CL_ENV_DEC(cs_total);
+ CL_ENV_DEC(total);
lu_context_fini(&cle->ce_lu.le_ctx);
lu_context_fini(&cle->ce_ses);
OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
struct lu_env *env;
struct cl_env *cle;
- CL_ENV_INC(cs_lookup);
+ CL_ENV_INC(lookup);
/* check that we don't go far from untrusted pointer */
CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
env = NULL;
cle = cl_env_fetch();
if (cle != NULL) {
- CL_ENV_INC(cs_hit);
+ CL_ENV_INC(hit);
env = &cle->ce_lu;
*refcheck = ++cle->ce_ref;
}
CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
if (--cle->ce_ref == 0) {
- CL_ENV_DEC(cs_busy);
+ CL_ENV_DEC(busy);
cl_env_detach(cle);
cle->ce_debug = NULL;
cl_env_exit(cle);
((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
#endif /* !INVARIANT_CHECK */
+/* Disable page statistic by default due to huge performance penalty. */
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_PAGE_INC(o, item) \
+ cfs_atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGE_DEC(o, item) \
+ cfs_atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGESTATE_INC(o, state) \
+ cfs_atomic_inc(&cl_object_site(o)->cs_pages_state[state])
+#define CS_PAGESTATE_DEC(o, state) \
+ cfs_atomic_dec(&cl_object_site(o)->cs_pages_state[state])
+#else
+#define CS_PAGE_INC(o, item)
+#define CS_PAGE_DEC(o, item)
+#define CS_PAGESTATE_INC(o, state)
+#define CS_PAGESTATE_DEC(o, state)
+#endif
+
/**
* Internal version of cl_page_top, it should be called with page referenced,
* or cp_lock held.
* Checkless version for trusted users.
*/
if (cfs_atomic_inc_return(&page->cp_ref) == 1)
- cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+ CS_PAGE_INC(page->cp_obj, busy);
}
/**
static void cl_page_free(const struct lu_env *env, struct cl_page *page)
{
struct cl_object *obj = page->cp_obj;
- struct cl_site *site = cl_object_site(obj);
PASSERT(env, page, cfs_list_empty(&page->cp_batch));
PASSERT(env, page, page->cp_owner == NULL);
cfs_list_del_init(page->cp_layers.next);
slice->cpl_ops->cpo_fini(env, slice);
}
- cfs_atomic_dec(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
- cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
-#endif
+ CS_PAGE_DEC(obj, total);
+ CS_PAGESTATE_DEC(obj, page->cp_state);
lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
cl_object_put(env, obj);
lu_ref_fini(&page->cp_reference);
struct cl_page *page;
struct cl_page *err = NULL;
struct lu_object_header *head;
- struct cl_site *site = cl_object_site(o);
int result;
ENTRY;
}
}
if (err == NULL) {
- cfs_atomic_inc(&site->cs_pages.cs_busy);
- cfs_atomic_inc(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
- cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
-#endif
- cfs_atomic_inc(&site->cs_pages.cs_created);
+ CS_PAGE_INC(o, busy);
+ CS_PAGE_INC(o, total);
+ CS_PAGE_INC(o, create);
+ CS_PAGESTATE_DEC(o, CPS_CACHED);
result = 0;
}
} else
struct cl_page *page = NULL;
struct cl_page *ghost = NULL;
struct cl_object_header *hdr;
- struct cl_site *site = cl_object_site(o);
int err;
LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
ENTRY;
hdr = cl_object_header(o);
- cfs_atomic_inc(&site->cs_pages.cs_lookup);
+ CS_PAGE_INC(o, lookup);
CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
}
if (page != NULL) {
- cfs_atomic_inc(&site->cs_pages.cs_hit);
+ CS_PAGE_INC(o, hit);
RETURN(page);
}
spin_unlock(&hdr->coh_page_guard);
if (unlikely(ghost != NULL)) {
- cfs_atomic_dec(&site->cs_pages.cs_busy);
+ CS_PAGE_DEC(o, busy);
cl_page_delete0(env, ghost, 0);
cl_page_free(env, ghost);
}
struct cl_page *page, enum cl_page_state state)
{
enum cl_page_state old;
-#ifdef LUSTRE_PAGESTATE_TRACKING
- struct cl_site *site = cl_object_site(page->cp_obj);
-#endif
/*
* Matrix of allowed state transitions [old][new], for sanity
PASSERT(env, page,
equi(state == CPS_OWNED, page->cp_owner != NULL));
-#ifdef LUSTRE_PAGESTATE_TRACKING
- cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
- cfs_atomic_inc(&site->cs_pages_state[state]);
-#endif
+ CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
+ CS_PAGESTATE_INC(page->cp_obj, state);
cl_page_state_set_trust(page, state);
}
EXIT;
*/
void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
- struct cl_site *site = cl_object_site(page->cp_obj);
-
PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
ENTRY;
cfs_atomic_read(&page->cp_ref));
if (cfs_atomic_dec_and_lock(&page->cp_ref, &page->cp_lock)) {
- cfs_atomic_dec(&site->cs_pages.cs_busy);
+ CS_PAGE_DEC(page->cp_obj, busy);
/* We're going to access the page w/o a reference, but it's
* ok because we have grabbed the lock cp_lock, which
* means nobody is able to free this page behind us.