LASSERT(top->loh_hash.next == NULL
&& top->loh_hash.pprev == NULL);
LASSERT(cfs_list_empty(&top->loh_lru));
- if (!cfs_atomic_dec_and_test(&top->loh_ref))
+ if (!atomic_dec_and_test(&top->loh_ref))
return;
cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
- if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
- if (lu_object_is_dying(top)) {
+ if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
+ if (lu_object_is_dying(top)) {
- /*
- * somebody may be waiting for this, currently only
- * used for cl_object, see cl_object_put_last().
- */
- cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
- }
- return;
- }
+ /*
+ * somebody may be waiting for this, currently only
+ * used for cl_object, see cl_object_put_last().
+ */
+ wake_up_all(&bkt->lsb_marche_funebre);
+ }
+ return;
+ }
LASSERT(bkt->lsb_busy > 0);
bkt->lsb_busy--;
*/
CFS_INIT_LIST_HEAD(&splice);
cfs_list_splice_init(layers, &splice);
- while (!cfs_list_empty(&splice)) {
- /*
- * Free layers in bottom-to-top order, so that object header
- * lives as long as possible and ->loo_object_free() methods
- * can look at its contents.
- */
- o = container_of0(splice.prev, struct lu_object, lo_linkage);
- cfs_list_del_init(&o->lo_linkage);
- LASSERT(o->lo_ops->loo_object_free != NULL);
- o->lo_ops->loo_object_free(env, o);
- }
+ while (!cfs_list_empty(&splice)) {
+ /*
+ * Free layers in bottom-to-top order, so that object header
+ * lives as long as possible and ->loo_object_free() methods
+ * can look at its contents.
+ */
+ o = container_of0(splice.prev, struct lu_object, lo_linkage);
+ cfs_list_del_init(&o->lo_linkage);
+ LASSERT(o->lo_ops->loo_object_free != NULL);
+ o->lo_ops->loo_object_free(env, o);
+ }
- if (cfs_waitq_active(&bkt->lsb_marche_funebre))
- cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
+ if (waitqueue_active(&bkt->lsb_marche_funebre))
+ wake_up_all(&bkt->lsb_marche_funebre);
}
/**
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
- LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
+ LASSERT(atomic_read(&h->loh_ref) == 0);
cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
LASSERT(bd.bd_bucket == bd2.bd_bucket);
if (count > 0 && --count == 0)
break;
- }
- cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
- cfs_cond_resched();
- /*
- * Free everything on the dispose list. This is safe against
- * races due to the reasons described in lu_object_put().
- */
+ }
+ cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
+ cond_resched();
+ /*
+ * Free everything on the dispose list. This is safe against
+ * races due to the reasons described in lu_object_put().
+ */
while (!cfs_list_empty(&dispose)) {
h = container_of0(dispose.next,
struct lu_object_header, loh_lru);
const struct lu_object_header *hdr)
{
(*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
- hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
+ hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
PFID(&hdr->loh_fid),
cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
EXPORT_SYMBOL(lu_object_invariant);
static struct lu_object *htable_lookup(struct lu_site *s,
- cfs_hash_bd_t *bd,
- const struct lu_fid *f,
- cfs_waitlink_t *waiter,
- __u64 *version)
+ cfs_hash_bd_t *bd,
+ const struct lu_fid *f,
+ wait_queue_t *waiter,
+ __u64 *version)
{
struct lu_site_bkt_data *bkt;
struct lu_object_header *h;
* drained), and moreover, lookup has to wait until object is freed.
*/
- cfs_waitlink_init(waiter);
- cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
- cfs_set_current_state(CFS_TASK_UNINT);
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
- return ERR_PTR(-EAGAIN);
+ init_waitqueue_entry_current(waiter);
+ add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
+ return ERR_PTR(-EAGAIN);
}
static struct lu_object *htable_lookup_nowait(struct lu_site *s,
* Core logic of lu_object_find*() functions.
*/
static struct lu_object *lu_object_find_try(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf,
- cfs_waitlink_t *waiter)
-{
- struct lu_object *o;
- struct lu_object *shadow;
- struct lu_site *s;
- cfs_hash_t *hs;
- cfs_hash_bd_t bd;
- __u64 version = 0;
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf,
+ wait_queue_t *waiter)
+{
+ struct lu_object *o;
+ struct lu_object *shadow;
+ struct lu_site *s;
+ cfs_hash_t *hs;
+ cfs_hash_bd_t bd;
+ __u64 version = 0;
/*
* This uses standard index maintenance protocol:
* objects of different "stacking" to be created within the same site.
*/
struct lu_object *lu_object_find_at(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object *obj;
- cfs_waitlink_t wait;
-
- while (1) {
- obj = lu_object_find_try(env, dev, f, conf, &wait);
- if (obj != ERR_PTR(-EAGAIN))
- return obj;
- /*
- * lu_object_find_try() already added waiter into the
- * wait queue.
- */
- cfs_waitq_wait(&wait, CFS_TASK_UNINT);
- bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
- cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
- }
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_site_bkt_data *bkt;
+ struct lu_object *obj;
+ wait_queue_t wait;
+
+ while (1) {
+ obj = lu_object_find_try(env, dev, f, conf, &wait);
+ if (obj != ERR_PTR(-EAGAIN))
+ return obj;
+ /*
+ * lu_object_find_try() already added waiter into the
+ * wait queue.
+ */
+ waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+ bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
+ }
}
EXPORT_SYMBOL(lu_object_find_at);
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = num_physpages;
+ cache_size = totalram_pages;
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
}
static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
- const void *key, unsigned mask)
+ const void *key, unsigned mask)
{
- struct lu_fid *fid = (struct lu_fid *)key;
- __u32 hash;
+ struct lu_fid *fid = (struct lu_fid *)key;
+ __u32 hash;
- hash = fid_flatten32(fid);
- hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+ hash = fid_flatten32(fid);
+ hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
+ hash = hash_long(hash, hs->hs_bkt_bits);
- /* give me another random factor */
- hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+ /* give me another random factor */
+ hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
- hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
- hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
+ hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
+ hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
- return hash & mask;
+ return hash & mask;
}
static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
struct lu_object_header *h;
h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
- if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
+ if (atomic_add_return(1, &h->loh_ref) == 1) {
struct lu_site_bkt_data *bkt;
cfs_hash_bd_t bd;
return -ENOMEM;
}
- cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
- cfs_waitq_init(&bkt->lsb_marche_funebre);
- }
+ cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
+ bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
+ CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
+ init_waitqueue_head(&bkt->lsb_marche_funebre);
+ }
s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
if (s->ls_stats == NULL) {
*/
void lu_device_get(struct lu_device *d)
{
- cfs_atomic_inc(&d->ld_ref);
+ atomic_inc(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_get);
*/
void lu_device_put(struct lu_device *d)
{
- LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
- cfs_atomic_dec(&d->ld_ref);
+ LASSERT(atomic_read(&d->ld_ref) > 0);
+ atomic_dec(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_put);
if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
t->ldt_ops->ldto_start(t);
memset(d, 0, sizeof *d);
- cfs_atomic_set(&d->ld_ref, 0);
+ atomic_set(&d->ld_ref, 0);
d->ld_type = t;
lu_ref_init(&d->ld_reference);
CFS_INIT_LIST_HEAD(&d->ld_linkage);
}
lu_ref_fini(&d->ld_reference);
- LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
- "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
+ LASSERTF(atomic_read(&d->ld_ref) == 0,
+ "Refcount is %u\n", atomic_read(&d->ld_ref));
LASSERT(t->ldt_device_nr > 0);
if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
t->ldt_ops->ldto_stop(t);
int lu_object_header_init(struct lu_object_header *h)
{
memset(h, 0, sizeof *h);
- cfs_atomic_set(&h->loh_ref, 1);
+ atomic_set(&h->loh_ref, 1);
CFS_INIT_HLIST_NODE(&h->loh_hash);
CFS_INIT_LIST_HEAD(&h->loh_lru);
CFS_INIT_LIST_HEAD(&h->loh_layers);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
if (lu_keys[i] == NULL) {
key->lct_index = i;
- cfs_atomic_set(&key->lct_used, 1);
+ atomic_set(&key->lct_used, 1);
lu_keys[i] = key;
lu_ref_init(&key->lct_reference);
result = 0;
key = lu_keys[index];
LASSERT(key != NULL);
LASSERT(key->lct_fini != NULL);
- LASSERT(cfs_atomic_read(&key->lct_used) > 1);
+ LASSERT(atomic_read(&key->lct_used) > 1);
key->lct_fini(ctx, key, ctx->lc_value[index]);
lu_ref_del(&key->lct_reference, "ctx", ctx);
- cfs_atomic_dec(&key->lct_used);
+ atomic_dec(&key->lct_used);
LASSERT(key->lct_owner != NULL);
if ((ctx->lc_tags & LCT_NOREF) == 0) {
- LINVRNT(cfs_module_refcount(key->lct_owner) > 0);
- cfs_module_put(key->lct_owner);
+ LINVRNT(module_refcount(key->lct_owner) > 0);
+ module_put(key->lct_owner);
}
ctx->lc_value[index] = NULL;
}
*/
void lu_context_key_degister(struct lu_context_key *key)
{
- LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
+ LASSERT(atomic_read(&key->lct_used) >= 1);
LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
lu_context_key_quiesce(key);
}
spin_unlock(&lu_keys_guard);
- LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+ LASSERTF(atomic_read(&key->lct_used) == 1,
"key has instances: %d\n",
- cfs_atomic_read(&key->lct_used));
+ atomic_read(&key->lct_used));
}
EXPORT_SYMBOL(lu_context_key_degister);
void lu_context_key_quiesce(struct lu_context_key *key)
{
struct lu_context *ctx;
+ extern unsigned cl_env_cache_purge(unsigned nr);
if (!(key->lct_tags & LCT_QUIESCENT)) {
/*
* XXX layering violation.
*/
+ cl_env_cache_purge(~0);
key->lct_tags |= LCT_QUIESCENT;
/*
* XXX memory barrier has to go here.
if (unlikely(IS_ERR(value)))
return PTR_ERR(value);
- LASSERT(key->lct_owner != NULL);
- if (!(ctx->lc_tags & LCT_NOREF))
- cfs_try_module_get(key->lct_owner);
- lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
- cfs_atomic_inc(&key->lct_used);
+ LASSERT(key->lct_owner != NULL);
+ if (!(ctx->lc_tags & LCT_NOREF))
+ try_module_get(key->lct_owner);
+ lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
+ atomic_inc(&key->lct_used);
/*
* This is the only place in the code, where an
* element of ctx->lc_value[] array is set to non-NULL
CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
i, key, key->lct_tags,
key->lct_init, key->lct_fini, key->lct_exit,
- key->lct_index, cfs_atomic_read(&key->lct_used),
+ key->lct_index, atomic_read(&key->lct_used),
key->lct_owner ? key->lct_owner->name : "",
key->lct_owner);
lu_ref_print(&key->lct_reference);
* Output site statistical counters into a buffer. Suitable for
* lprocfs_rd_*()-style functions.
*/
+int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
+{
+ lu_site_stats_t stats;
+
+ memset(&stats, 0, sizeof(stats));
+ lu_site_stats_get(s->ls_obj_hash, &stats, 1);
+
+ return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
+ stats.lss_busy,
+ stats.lss_total,
+ stats.lss_populated,
+ CFS_HASH_NHLIST(s->ls_obj_hash),
+ stats.lss_max_search,
+ ls_stats_read(s->ls_stats, LU_SS_CREATED),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
+ ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
+}
+EXPORT_SYMBOL(lu_site_stats_seq_print);
+
int lu_site_stats_print(const struct lu_site *s, char *page, int count)
{
lu_site_stats_t stats;
struct lu_fid *old = &o->lo_header->loh_fid;
struct lu_site_bkt_data *bkt;
struct lu_object *shadow;
- cfs_waitlink_t waiter;
+ wait_queue_t waiter;
cfs_hash_t *hs;
cfs_hash_bd_t bd;
__u64 version = 0;