site = o->lo_dev->ld_site;
orig = o;
kill_it = 0;
- write_lock(&site->ls_guard);
- if (atomic_dec_and_test(&top->loh_ref)) {
+ cfs_write_lock(&site->ls_guard);
+ if (cfs_atomic_dec_and_test(&top->loh_ref)) {
/*
* When last reference is released, iterate over object
* layers, and notify them that object is no longer busy.
*/
- list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ cfs_list_for_each_entry_reverse(o, &top->loh_layers,
+ lo_linkage) {
if (o->lo_ops->loo_object_release != NULL)
o->lo_ops->loo_object_release(env, o);
}
* object lookup is possible and we can safely destroy
* object below.
*/
- hlist_del_init(&top->loh_hash);
- list_del_init(&top->loh_lru);
+ cfs_hlist_del_init(&top->loh_hash);
+ cfs_list_del_init(&top->loh_lru);
-- site->ls_total;
kill_it = 1;
}
}
- write_unlock(&site->ls_guard);
+ cfs_write_unlock(&site->ls_guard);
if (kill_it)
/*
* Object was already removed from hash and lru above, can
{
struct lu_object *scan;
struct lu_object *top;
- struct list_head *layers;
+ cfs_list_t *layers;
int clean;
int result;
ENTRY;
* object slices are created.
*/
clean = 1;
- list_for_each_entry(scan, layers, lo_linkage) {
+ cfs_list_for_each_entry(scan, layers, lo_linkage) {
if (scan->lo_flags & LU_OBJECT_ALLOCATED)
continue;
clean = 0;
}
} while (!clean);
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_start != NULL) {
result = scan->lo_ops->loo_object_start(env, scan);
if (result != 0) {
*/
static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
- struct list_head splice;
- struct lu_object *scan;
- struct lu_site *site;
- struct list_head *layers;
+ cfs_list_t splice;
+ struct lu_object *scan;
+ struct lu_site *site;
+ cfs_list_t *layers;
site = o->lo_dev->ld_site;
layers = &o->lo_header->loh_layers;
/*
* First call ->loo_object_delete() method to release all resources.
*/
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_delete != NULL)
scan->lo_ops->loo_object_delete(env, scan);
}
* top-level slice.
*/
CFS_INIT_LIST_HEAD(&splice);
- list_splice_init(layers, &splice);
- while (!list_empty(&splice)) {
+ cfs_list_splice_init(layers, &splice);
+ while (!cfs_list_empty(&splice)) {
/*
* Free layers in bottom-to-top order, so that object header
* lives as long as possible and ->loo_object_free() methods
* can look at its contents.
*/
o = container_of0(splice.prev, struct lu_object, lo_linkage);
- list_del_init(&o->lo_linkage);
+ cfs_list_del_init(&o->lo_linkage);
LASSERT(o->lo_ops->loo_object_free != NULL);
o->lo_ops->loo_object_free(env, o);
}
*/
int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
{
- struct list_head dispose;
+ cfs_list_t dispose;
struct lu_object_header *h;
struct lu_object_header *temp;
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
*/
- write_lock(&s->ls_guard);
- list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
+ cfs_write_lock(&s->ls_guard);
+ cfs_list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
/*
* Objects are sorted in lru order, and "busy" objects (ones
* with h->loh_ref > 0) naturally tend to live near hot end
*/
if (nr-- == 0)
break;
- if (atomic_read(&h->loh_ref) > 0)
+ if (cfs_atomic_read(&h->loh_ref) > 0)
continue;
- hlist_del_init(&h->loh_hash);
- list_move(&h->loh_lru, &dispose);
+ cfs_hlist_del_init(&h->loh_hash);
+ cfs_list_move(&h->loh_lru, &dispose);
s->ls_total --;
}
- write_unlock(&s->ls_guard);
+ cfs_write_unlock(&s->ls_guard);
/*
* Free everything on the dispose list. This is safe against races due
* to the reasons described in lu_object_put().
*/
- while (!list_empty(&dispose)) {
+ while (!cfs_list_empty(&dispose)) {
h = container_of0(dispose.next,
struct lu_object_header, loh_lru);
- list_del_init(&h->loh_lru);
+ cfs_list_del_init(&h->loh_lru);
lu_object_free(env, lu_object_top(h));
s->ls_stats.s_lru_purged ++;
}
struct lu_cdebug_data *key;
int used;
int complete;
- va_list args;
+ va_list args;
va_start(args, format);
vsnprintf(key->lck_area + used,
ARRAY_SIZE(key->lck_area) - used, format, args);
if (complete) {
- libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
- (char *)info->lpi_file, info->lpi_fn,
- info->lpi_line, "%s", key->lck_area);
+ if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
+ libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
+ (char *)info->lpi_file, info->lpi_fn,
+ info->lpi_line, "%s", key->lck_area);
key->lck_area[0] = 0;
}
va_end(args);
}
EXPORT_SYMBOL(lu_cdebug_printer);
-/*
+/**
* Print object header.
*/
-static void lu_object_header_print(const struct lu_env *env,
- void *cookie, lu_printer_t printer,
- const struct lu_object_header *hdr)
+void lu_object_header_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer,
+ const struct lu_object_header *hdr)
{
(*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
- hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
+ hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
PFID(&hdr->loh_fid),
- hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
- list_empty(&hdr->loh_lru) ? "" : " lru",
+ cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
+ cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
+ "" : " lru",
hdr->loh_attr & LOHA_EXISTS ? " exist":"");
}
+EXPORT_SYMBOL(lu_object_header_print);
-/*
- * Print human readable representation of the @o to the @printer.
+/**
+ * Print human readable representation of the \a o to the \a printer.
*/
void lu_object_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct lu_object *o)
top = o->lo_header;
lu_object_header_print(env, cookie, printer, top);
- (*printer)(env, cookie, "\n");
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ (*printer)(env, cookie, "{ \n");
+ cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
depth = o->lo_depth + 4;
- LASSERT(o->lo_ops->loo_object_print != NULL);
+
/*
- * print `.' @depth times.
+ * print `.' \a depth times followed by type name and address
*/
- (*printer)(env, cookie, "%*.*s", depth, depth, ruler);
- o->lo_ops->loo_object_print(env, cookie, printer, o);
+ (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
+ o->lo_dev->ld_type->ldt_name, o);
+ if (o->lo_ops->loo_object_print != NULL)
+ o->lo_ops->loo_object_print(env, cookie, printer, o);
(*printer)(env, cookie, "\n");
}
+ (*printer)(env, cookie, "} header@%p\n", top);
}
EXPORT_SYMBOL(lu_object_print);
-/*
+/**
* Check object consistency.
*/
int lu_object_invariant(const struct lu_object *o)
struct lu_object_header *top;
top = o->lo_header;
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
if (o->lo_ops->loo_object_invariant != NULL &&
!o->lo_ops->loo_object_invariant(o))
return 0;
EXPORT_SYMBOL(lu_object_invariant);
static struct lu_object *htable_lookup(struct lu_site *s,
- const struct hlist_head *bucket,
- const struct lu_fid *f)
+ const cfs_hlist_head_t *bucket,
+ const struct lu_fid *f,
+ cfs_waitlink_t *waiter)
{
struct lu_object_header *h;
- struct hlist_node *scan;
+ cfs_hlist_node_t *scan;
- hlist_for_each_entry(h, scan, bucket, loh_hash) {
+ cfs_hlist_for_each_entry(h, scan, bucket, loh_hash) {
s->ls_stats.s_cache_check ++;
- if (likely(lu_fid_eq(&h->loh_fid, f) &&
- !lu_object_is_dying(h))) {
+ if (likely(lu_fid_eq(&h->loh_fid, f))) {
+ if (unlikely(lu_object_is_dying(h))) {
+ /*
+ * Lookup found an object being destroyed;
+ * this object cannot be returned (to assure
+ * that references to dying objects are
+ * eventually drained), and moreover, lookup
+ * has to wait until object is freed.
+ */
+ cfs_waitlink_init(waiter);
+ cfs_waitq_add(&s->ls_marche_funebre, waiter);
+ cfs_set_current_state(CFS_TASK_UNINT);
+ s->ls_stats.s_cache_death_race ++;
+ return ERR_PTR(-EAGAIN);
+ }
/* bump reference count... */
- if (atomic_add_return(1, &h->loh_ref) == 1)
+ if (cfs_atomic_add_return(1, &h->loh_ref) == 1)
++ s->ls_busy;
/* and move to the head of the LRU */
/*
{
/* all objects with same id and different versions will belong to same
* collisions list. */
- return hash_long(fid_flatten(f), bits);
+ return cfs_hash_long(fid_flatten(f), bits);
}
-/*
- * Search cache for an object with the fid @f. If such object is found, return
- * it. Otherwise, create new object, insert it into cache and return it. In
- * any case, additional reference is acquired on the returned object.
+/**
+ * Search cache for an object with the fid \a f. If such object is found,
+ * return it. Otherwise, create new object, insert it into cache and return
+ * it. In any case, additional reference is acquired on the returned object.
*/
struct lu_object *lu_object_find(const struct lu_env *env,
- struct lu_site *s, const struct lu_fid *f)
+ struct lu_device *dev, const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
+}
+EXPORT_SYMBOL(lu_object_find);
+
+/**
+ * Core logic of lu_object_find*() functions.
+ */
+static struct lu_object *lu_object_find_try(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf,
+ cfs_waitlink_t *waiter)
{
- struct lu_object *o;
- struct lu_object *shadow;
- struct hlist_head *bucket;
+ struct lu_site *s;
+ struct lu_object *o;
+ struct lu_object *shadow;
+ cfs_hlist_head_t *bucket;
/*
* This uses standard index maintenance protocol:
* object just allocated.
* - unlock index;
* - return object.
+ *
+ * If dying object is found during index search, add @waiter to the
+ * site wait-queue and return ERR_PTR(-EAGAIN).
*/
+ s = dev->ld_site;
bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
- read_lock(&s->ls_guard);
- o = htable_lookup(s, bucket, f);
- read_unlock(&s->ls_guard);
+ cfs_read_lock(&s->ls_guard);
+ o = htable_lookup(s, bucket, f, waiter);
+ cfs_read_unlock(&s->ls_guard);
if (o != NULL)
return o;
* Allocate new object. This may result in rather complicated
* operations, including fld queries, inode loading, etc.
*/
- o = lu_object_alloc(env, s, f);
+ o = lu_object_alloc(env, dev, f, conf);
if (unlikely(IS_ERR(o)))
return o;
LASSERT(lu_fid_eq(lu_object_fid(o), f));
- write_lock(&s->ls_guard);
- shadow = htable_lookup(s, bucket, f);
+ cfs_write_lock(&s->ls_guard);
+ shadow = htable_lookup(s, bucket, f, waiter);
if (likely(shadow == NULL)) {
- hlist_add_head(&o->lo_header->loh_hash, bucket);
- list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
+ cfs_hlist_add_head(&o->lo_header->loh_hash, bucket);
+ cfs_list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
++ s->ls_busy;
++ s->ls_total;
shadow = o;
o = NULL;
} else
s->ls_stats.s_cache_race ++;
- write_unlock(&s->ls_guard);
+ cfs_write_unlock(&s->ls_guard);
if (o != NULL)
lu_object_free(env, o);
return shadow;
}
-EXPORT_SYMBOL(lu_object_find);
-/*
+/**
+ * Much like lu_object_find(), but top level device of object is specifically
+ * \a dev rather than top level device of the site. This interface allows
+ * objects of different "stacking" to be created within the same site.
+ */
+struct lu_object *lu_object_find_at(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_object *obj;
+ cfs_waitlink_t wait;
+
+ while (1) {
+ obj = lu_object_find_try(env, dev, f, conf, &wait);
+ if (obj == ERR_PTR(-EAGAIN)) {
+ /*
+ * lu_object_find_try() already added waiter into the
+ * wait queue.
+ */
+ cfs_waitq_wait(&wait, CFS_TASK_UNINT);
+ cfs_waitq_del(&dev->ld_site->ls_marche_funebre, &wait);
+ } else
+ break;
+ }
+ return obj;
+}
+EXPORT_SYMBOL(lu_object_find_at);
+
+/**
+ * Find object with given fid, and return its slice belonging to given device.
+ */
+struct lu_object *lu_object_find_slice(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_object *top;
+ struct lu_object *obj;
+
+ top = lu_object_find(env, dev, f, conf);
+ if (!IS_ERR(top)) {
+ obj = lu_object_locate(top->lo_header, dev->ld_type);
+ if (obj == NULL)
+ lu_object_put(env, top);
+ } else
+ obj = top;
+ return obj;
+}
+EXPORT_SYMBOL(lu_object_find_slice);
+
/**
* Global list of all device types.
*/
CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
result = ldt->ldt_ops->ldto_init(ldt);
if (result == 0)
- list_add(&ldt->ldt_linkage, &lu_device_types);
+ cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
return result;
}
EXPORT_SYMBOL(lu_device_type_init);
void lu_device_type_fini(struct lu_device_type *ldt)
{
- list_del_init(&ldt->ldt_linkage);
+ cfs_list_del_init(&ldt->ldt_linkage);
ldt->ldt_ops->ldto_fini(ldt);
}
EXPORT_SYMBOL(lu_device_type_fini);
{
struct lu_device_type *ldt;
- list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
+ cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
if (ldt->ldt_device_nr == 0)
ldt->ldt_ops->ldto_stop(ldt);
}
* Global list of all sites on this node
*/
static CFS_LIST_HEAD(lu_sites);
-static DECLARE_MUTEX(lu_sites_guard);
+static CFS_DECLARE_MUTEX(lu_sites_guard);
/**
* Global environment used by site shrinker.
for (i = 0; i < s->ls_hash_size; ++i) {
struct lu_object_header *h;
- struct hlist_node *scan;
+ cfs_hlist_node_t *scan;
- read_lock(&s->ls_guard);
- hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
+ cfs_read_lock(&s->ls_guard);
+ cfs_hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
- if (!list_empty(&h->loh_layers)) {
+ if (!cfs_list_empty(&h->loh_layers)) {
const struct lu_object *obj;
obj = lu_object_top(h);
} else
lu_object_header_print(env, cookie, printer, h);
}
- read_unlock(&s->ls_guard);
+ cfs_read_unlock(&s->ls_guard);
}
}
EXPORT_SYMBOL(lu_site_print);
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = num_physpages;
+ cache_size = cfs_num_physpages;
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
return bits;
}
-static struct lock_class_key lu_site_guard_class;
+static cfs_lock_class_key_t lu_site_guard_class;
/**
* Initialize site \a s, with \a d as the top level device.
ENTRY;
memset(s, 0, sizeof *s);
- rwlock_init(&s->ls_guard);
- lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
+ cfs_rwlock_init(&s->ls_guard);
+ cfs_lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
CFS_INIT_LIST_HEAD(&s->ls_lru);
CFS_INIT_LIST_HEAD(&s->ls_linkage);
cfs_waitq_init(&s->ls_marche_funebre);
s->ls_hash_mask = size - 1;
for (i = 0; i < size; i++)
- INIT_HLIST_HEAD(&s->ls_hash[i]);
+ CFS_INIT_HLIST_HEAD(&s->ls_hash[i]);
RETURN(0);
}
*/
void lu_site_fini(struct lu_site *s)
{
- LASSERT(list_empty(&s->ls_lru));
+ LASSERT(cfs_list_empty(&s->ls_lru));
LASSERT(s->ls_total == 0);
- down(&lu_sites_guard);
- list_del_init(&s->ls_linkage);
- up(&lu_sites_guard);
+ cfs_down(&lu_sites_guard);
+ cfs_list_del_init(&s->ls_linkage);
+ cfs_up(&lu_sites_guard);
if (s->ls_hash != NULL) {
int i;
for (i = 0; i < s->ls_hash_size; i++)
- LASSERT(hlist_empty(&s->ls_hash[i]));
+ LASSERT(cfs_hlist_empty(&s->ls_hash[i]));
cfs_free_large(s->ls_hash);
s->ls_hash = NULL;
}
int lu_site_init_finish(struct lu_site *s)
{
int result;
- down(&lu_sites_guard);
+ cfs_down(&lu_sites_guard);
result = lu_context_refill(&lu_shrink_env.le_ctx);
if (result == 0)
- list_add(&s->ls_linkage, &lu_sites);
- up(&lu_sites_guard);
+ cfs_list_add(&s->ls_linkage, &lu_sites);
+ cfs_up(&lu_sites_guard);
return result;
}
EXPORT_SYMBOL(lu_site_init_finish);
*/
void lu_device_get(struct lu_device *d)
{
- atomic_inc(&d->ld_ref);
+ cfs_atomic_inc(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_get);
*/
void lu_device_put(struct lu_device *d)
{
- LASSERT(atomic_read(&d->ld_ref) > 0);
- atomic_dec(&d->ld_ref);
+ LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
+ cfs_atomic_dec(&d->ld_ref);
}
EXPORT_SYMBOL(lu_device_put);
if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
t->ldt_ops->ldto_start(t);
memset(d, 0, sizeof *d);
- atomic_set(&d->ld_ref, 0);
+ cfs_atomic_set(&d->ld_ref, 0);
d->ld_type = t;
lu_ref_init(&d->ld_reference);
return 0;
struct lu_device_type *t;
t = d->ld_type;
- if (d->ld_obd != NULL)
- /* finish lprocfs */
- lprocfs_obd_cleanup(d->ld_obd);
+ if (d->ld_obd != NULL) {
+ d->ld_obd->obd_lu_dev = NULL;
+ d->ld_obd = NULL;
+ }
lu_ref_fini(&d->ld_reference);
- LASSERTF(atomic_read(&d->ld_ref) == 0,
- "Refcount is %u\n", atomic_read(&d->ld_ref));
+ LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
+ "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
LASSERT(t->ldt_device_nr > 0);
if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
t->ldt_ops->ldto_stop(t);
{
struct lu_device *dev = o->lo_dev;
- LASSERT(list_empty(&o->lo_linkage));
+ LASSERT(cfs_list_empty(&o->lo_linkage));
if (dev != NULL) {
lu_ref_del_at(&dev->ld_reference,
*/
void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
{
- list_move(&o->lo_linkage, &h->loh_layers);
+ cfs_list_move(&o->lo_linkage, &h->loh_layers);
}
EXPORT_SYMBOL(lu_object_add_top);
*/
void lu_object_add(struct lu_object *before, struct lu_object *o)
{
- list_move(&o->lo_linkage, &before->lo_linkage);
+ cfs_list_move(&o->lo_linkage, &before->lo_linkage);
}
EXPORT_SYMBOL(lu_object_add);
int lu_object_header_init(struct lu_object_header *h)
{
memset(h, 0, sizeof *h);
- atomic_set(&h->loh_ref, 1);
- INIT_HLIST_NODE(&h->loh_hash);
+ cfs_atomic_set(&h->loh_ref, 1);
+ CFS_INIT_HLIST_NODE(&h->loh_hash);
CFS_INIT_LIST_HEAD(&h->loh_lru);
CFS_INIT_LIST_HEAD(&h->loh_layers);
lu_ref_init(&h->loh_reference);
*/
void lu_object_header_fini(struct lu_object_header *h)
{
- LASSERT(list_empty(&h->loh_layers));
- LASSERT(list_empty(&h->loh_lru));
- LASSERT(hlist_unhashed(&h->loh_hash));
+ LASSERT(cfs_list_empty(&h->loh_layers));
+ LASSERT(cfs_list_empty(&h->loh_lru));
+ LASSERT(cfs_hlist_unhashed(&h->loh_hash));
lu_ref_fini(&h->loh_reference);
}
EXPORT_SYMBOL(lu_object_header_fini);
{
struct lu_object *o;
- list_for_each_entry(o, &h->loh_layers, lo_linkage) {
+ cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
if (o->lo_dev->ld_type == dtype)
return o;
}
/**
* Finalize and free devices in the device stack.
- *
+ *
* Finalize device stack by purging object cache, and calling
* lu_device_type_operations::ldto_device_fini() and
* lu_device_type_operations::ldto_device_free() on all devices in the stack.
/* purge again. */
lu_site_purge(env, site, ~0);
- if (!list_empty(&site->ls_lru) || site->ls_total != 0) {
+ if (!cfs_list_empty(&site->ls_lru) || site->ls_total != 0) {
/*
* Uh-oh, objects still exist.
*/
next = ldt->ldt_ops->ldto_device_free(env, scan);
type = ldt->ldt_obd_type;
if (type != NULL) {
- type->typ_refcnt--;
- class_put_type(type);
- }
+ type->typ_refcnt--;
+ class_put_type(type);
+ }
}
}
EXPORT_SYMBOL(lu_stack_fini);
static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
-static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
/**
* Global counter incremented whenever key is registered, unregistered,
LASSERT(key->lct_owner != NULL);
result = -ENFILE;
- spin_lock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
if (lu_keys[i] == NULL) {
key->lct_index = i;
- atomic_set(&key->lct_used, 1);
+ cfs_atomic_set(&key->lct_used, 1);
lu_keys[i] = key;
lu_ref_init(&key->lct_reference);
result = 0;
break;
}
}
- spin_unlock(&lu_keys_guard);
+ cfs_spin_unlock(&lu_keys_guard);
return result;
}
EXPORT_SYMBOL(lu_context_key_register);
static void key_fini(struct lu_context *ctx, int index)
{
- if (ctx->lc_value[index] != NULL) {
+ if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
struct lu_context_key *key;
key = lu_keys[index];
LASSERT(key != NULL);
LASSERT(key->lct_fini != NULL);
- LASSERT(atomic_read(&key->lct_used) > 1);
+ LASSERT(cfs_atomic_read(&key->lct_used) > 1);
key->lct_fini(ctx, key, ctx->lc_value[index]);
lu_ref_del(&key->lct_reference, "ctx", ctx);
- atomic_dec(&key->lct_used);
+ cfs_atomic_dec(&key->lct_used);
LASSERT(key->lct_owner != NULL);
if (!(ctx->lc_tags & LCT_NOREF)) {
- LASSERT(module_refcount(key->lct_owner) > 0);
- module_put(key->lct_owner);
+ LASSERT(cfs_module_refcount(key->lct_owner) > 0);
+ cfs_module_put(key->lct_owner);
}
ctx->lc_value[index] = NULL;
}
*/
void lu_context_key_degister(struct lu_context_key *key)
{
- LASSERT(atomic_read(&key->lct_used) >= 1);
+ LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
+ lu_context_key_quiesce(key);
+
++key_set_version;
+ cfs_spin_lock(&lu_keys_guard);
key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+ if (lu_keys[key->lct_index]) {
+ lu_keys[key->lct_index] = NULL;
+ lu_ref_fini(&key->lct_reference);
+ }
+ cfs_spin_unlock(&lu_keys_guard);
- if (atomic_read(&key->lct_used) > 1)
- CERROR("key has instances.\n");
- spin_lock(&lu_keys_guard);
- lu_keys[key->lct_index] = NULL;
- spin_unlock(&lu_keys_guard);
+ LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+ "key has instances: %d\n",
+ cfs_atomic_read(&key->lct_used));
}
EXPORT_SYMBOL(lu_context_key_degister);
{
LINVRNT(ctx->lc_state == LCS_ENTERED);
LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
+ LASSERT(lu_keys[key->lct_index] == key);
return ctx->lc_value[key->lct_index];
}
EXPORT_SYMBOL(lu_context_key_get);
void lu_context_key_quiesce(struct lu_context_key *key)
{
struct lu_context *ctx;
+ extern unsigned cl_env_cache_purge(unsigned nr);
if (!(key->lct_tags & LCT_QUIESCENT)) {
+ /*
+ * XXX layering violation.
+ */
+ cl_env_cache_purge(~0);
key->lct_tags |= LCT_QUIESCENT;
/*
* XXX memory barrier has to go here.
*/
- spin_lock(&lu_keys_guard);
- list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
+ cfs_spin_lock(&lu_keys_guard);
+ cfs_list_for_each_entry(ctx, &lu_context_remembered,
+ lc_remember)
key_fini(ctx, key->lct_index);
- spin_unlock(&lu_keys_guard);
+ cfs_spin_unlock(&lu_keys_guard);
++key_set_version;
}
}
{
int i;
+ cfs_spin_lock(&lu_keys_guard);
if (ctx->lc_value != NULL) {
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
key_fini(ctx, i);
ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
ctx->lc_value = NULL;
}
+ cfs_spin_unlock(&lu_keys_guard);
}
static int keys_fill(struct lu_context *ctx)
value = key->lct_init(ctx, key);
if (unlikely(IS_ERR(value)))
return PTR_ERR(value);
+
LASSERT(key->lct_owner != NULL);
if (!(ctx->lc_tags & LCT_NOREF))
- try_module_get(key->lct_owner);
+ cfs_try_module_get(key->lct_owner);
lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
- atomic_inc(&key->lct_used);
+ cfs_atomic_inc(&key->lct_used);
/*
* This is the only place in the code, where an
* element of ctx->lc_value[] array is set to non-NULL
ctx->lc_state = LCS_INITIALIZED;
ctx->lc_tags = tags;
if (tags & LCT_REMEMBER) {
- spin_lock(&lu_keys_guard);
- list_add(&ctx->lc_remember, &lu_context_remembered);
- spin_unlock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
+ cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+ cfs_spin_unlock(&lu_keys_guard);
} else
CFS_INIT_LIST_HEAD(&ctx->lc_remember);
return keys_init(ctx);
LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
ctx->lc_state = LCS_FINALIZED;
keys_fini(ctx);
- spin_lock(&lu_keys_guard);
- list_del_init(&ctx->lc_remember);
- spin_unlock(&lu_keys_guard);
+ cfs_spin_lock(&lu_keys_guard);
+ cfs_list_del_init(&ctx->lc_remember);
+ cfs_spin_unlock(&lu_keys_guard);
}
EXPORT_SYMBOL(lu_context_fini);
}
EXPORT_SYMBOL(lu_context_refill);
-static int lu_env_setup(struct lu_env *env, struct lu_context *ses,
- __u32 tags, int noref)
+int lu_env_init(struct lu_env *env, __u32 tags)
{
int result;
- LINVRNT(ergo(!noref, !(tags & LCT_NOREF)));
-
- env->le_ses = ses;
+ env->le_ses = NULL;
result = lu_context_init(&env->le_ctx, tags);
if (likely(result == 0))
lu_context_enter(&env->le_ctx);
return result;
}
-
-static int lu_env_init_noref(struct lu_env *env, struct lu_context *ses,
- __u32 tags)
-{
- return lu_env_setup(env, ses, tags, 1);
-}
-
-int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags)
-{
- return lu_env_setup(env, ses, tags, 0);
-}
EXPORT_SYMBOL(lu_env_init);
void lu_env_fini(struct lu_env *env)
}
EXPORT_SYMBOL(lu_env_refill);
-static struct shrinker *lu_site_shrinker = NULL;
+static struct cfs_shrinker *lu_site_shrinker = NULL;
#ifdef __KERNEL__
static int lu_cache_shrink(int nr, unsigned int gfp_mask)
int remain = nr;
CFS_LIST_HEAD(splice);
- if (nr != 0 && !(gfp_mask & __GFP_FS))
- return -1;
+ if (nr != 0) {
+ if (!(gfp_mask & __GFP_FS))
+ return -1;
+ CDEBUG(D_INODE, "Shrink %d objects\n", nr);
+ }
- down(&lu_sites_guard);
- list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+ cfs_down(&lu_sites_guard);
+ cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
if (nr != 0) {
remain = lu_site_purge(&lu_shrink_env, s, remain);
/*
* Move just shrunk site to the tail of site list to
* assure shrinking fairness.
*/
- list_move_tail(&s->ls_linkage, &splice);
+ cfs_list_move_tail(&s->ls_linkage, &splice);
}
- read_lock(&s->ls_guard);
+ cfs_read_lock(&s->ls_guard);
cached += s->ls_total - s->ls_busy;
- read_unlock(&s->ls_guard);
- if (remain <= 0)
+ cfs_read_unlock(&s->ls_guard);
+ if (nr && remain <= 0)
break;
}
- list_splice(&splice, lu_sites.prev);
- up(&lu_sites_guard);
+ cfs_list_splice(&splice, lu_sites.prev);
+ cfs_up(&lu_sites_guard);
+
+ cached = (cached / 100) * sysctl_vfs_cache_pressure;
+ if (nr == 0)
+ CDEBUG(D_INODE, "%d objects cached\n", cached);
return cached;
}
+/*
+ * Debugging stuff.
+ */
+
+/**
+ * Environment to be used in debugger, contains all tags.
+ */
+struct lu_env lu_debugging_env;
+
+/**
+ * Debugging printer function using printk().
+ */
+int lu_printk_printer(const struct lu_env *env,
+ void *unused, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vprintk(format, args);
+ va_end(args);
+ return 0;
+}
+
+void lu_debugging_setup(void)
+{
+ lu_env_init(&lu_debugging_env, ~0);
+}
+
+void lu_context_keys_dump(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
+ struct lu_context_key *key;
+
+ key = lu_keys[i];
+ if (key != NULL) {
+ CERROR("[%i]: %p %x (%p,%p,%p) %i %i \"%s\"@%p\n",
+ i, key, key->lct_tags,
+ key->lct_init, key->lct_fini, key->lct_exit,
+ key->lct_index, cfs_atomic_read(&key->lct_used),
+ key->lct_owner ? key->lct_owner->name : "",
+ key->lct_owner);
+ lu_ref_print(&key->lct_reference);
+ }
+ }
+}
+EXPORT_SYMBOL(lu_context_keys_dump);
#else /* !__KERNEL__ */
static int lu_cache_shrink(int nr, unsigned int gfp_mask)
{
}
#endif /* __KERNEL__ */
+int cl_global_init(void);
+void cl_global_fini(void);
int lu_ref_global_init(void);
void lu_ref_global_fini(void);
+int dt_global_init(void);
+void dt_global_fini(void);
+
+int llo_global_init(void);
+void llo_global_fini(void);
+
/**
* Initialization of global lu_* data.
*/
CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys);
+ result = lu_ref_global_init();
+ if (result != 0)
+ return result;
+
LU_CONTEXT_KEY_INIT(&lu_global_key);
result = lu_context_key_register(&lu_global_key);
if (result != 0)
return result;
- /*
+ /*
* At this level, we don't know what tags are needed, so allocate them
* conservatively. This should not be too bad, because this
* environment is global.
- */
- down(&lu_sites_guard);
- result = lu_env_init_noref(&lu_shrink_env, NULL, LCT_SHRINKER);
- up(&lu_sites_guard);
+ */
+ cfs_down(&lu_sites_guard);
+ result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
+ cfs_up(&lu_sites_guard);
if (result != 0)
return result;
- result = lu_ref_global_init();
- if (result != 0)
- return result;
- /*
+ /*
* seeks estimation: 3 seeks to read a record from oi, one to read
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
+ lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
if (lu_site_shrinker == NULL)
return -ENOMEM;
- result = lu_time_global_init();
+ result = lu_time_global_init();
+ if (result)
+ GOTO(out, result);
+
+#ifdef __KERNEL__
+ result = dt_global_init();
+ if (result)
+ GOTO(out, result);
+
+ result = llo_global_init();
+ if (result)
+ GOTO(out, result);
+#endif
+ result = cl_global_init();
+out:
+
return result;
}
*/
void lu_global_fini(void)
{
+ cl_global_fini();
+#ifdef __KERNEL__
+ llo_global_fini();
+ dt_global_fini();
+#endif
lu_time_global_fini();
if (lu_site_shrinker != NULL) {
- remove_shrinker(lu_site_shrinker);
+ cfs_remove_shrinker(lu_site_shrinker);
lu_site_shrinker = NULL;
}
* Tear shrinker environment down _after_ de-registering
* lu_global_key, because the latter has a value in the former.
*/
- down(&lu_sites_guard);
+ cfs_down(&lu_sites_guard);
lu_env_fini(&lu_shrink_env);
- up(&lu_sites_guard);
+ cfs_up(&lu_sites_guard);
lu_ref_global_fini();
}
};
EXPORT_SYMBOL(LU_BUF_NULL);
-/*
- * XXX: Functions below logically belong to fid module, but they are used by
- * dt_store_open(). Put them here until better place is found.
+/**
+ * Output site statistical counters into a buffer. Suitable for
+ * lprocfs_rd_*()-style functions.
*/
-
-void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid,
- struct lu_fid *befider)
+int lu_site_stats_print(const struct lu_site *s, char *page, int count)
{
- int recsize;
- __u64 seq;
- __u32 oid;
-
- seq = fid_seq(fid);
- oid = fid_oid(fid);
-
- /*
- * Two cases: compact 6 bytes representation for a common case, and
- * full 17 byte representation for "unusual" fid.
- */
+ int i;
+ int populated;
/*
- * Check that usual case is really usual.
+ * How many hash buckets are not-empty? Don't bother with locks: it's
+ * an estimation anyway.
*/
- CLASSERT(LUSTRE_SEQ_MAX_WIDTH < 0xffffull);
-
- if (fid_is_igif(fid) ||
- seq > 0xffffffull || oid > 0xffff || fid_ver(fid) != 0) {
- fid_cpu_to_be(befider, fid);
- recsize = sizeof *befider;
- } else {
- unsigned char *small_befider;
-
- small_befider = (char *)befider;
-
- small_befider[0] = seq >> 16;
- small_befider[1] = seq >> 8;
- small_befider[2] = seq;
-
- small_befider[3] = oid >> 8;
- small_befider[4] = oid;
-
- recsize = 5;
- }
- memcpy(pack->fp_area, befider, recsize);
- pack->fp_len = recsize + 1;
-}
-EXPORT_SYMBOL(fid_pack);
-
-int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid)
-{
- int result;
-
- result = 0;
- switch (pack->fp_len) {
- case sizeof *fid + 1:
- memcpy(fid, pack->fp_area, sizeof *fid);
- fid_be_to_cpu(fid, fid);
- break;
- case 6: {
- const unsigned char *area;
-
- area = pack->fp_area;
- fid->f_seq = (area[0] << 16) | (area[1] << 8) | area[2];
- fid->f_oid = (area[3] << 8) | area[4];
- fid->f_ver = 0;
- break;
- }
- default:
- CERROR("Unexpected packed fid size: %d\n", pack->fp_len);
- result = -EIO;
- }
- return result;
+ for (i = 0, populated = 0; i < s->ls_hash_size; i++)
+ populated += !cfs_hlist_empty(&s->ls_hash[i]);
+
+ return snprintf(page, count, "%d %d %d/%d %d %d %d %d %d %d %d\n",
+ s->ls_total,
+ s->ls_busy,
+ populated,
+ s->ls_hash_size,
+ s->ls_stats.s_created,
+ s->ls_stats.s_cache_hit,
+ s->ls_stats.s_cache_miss,
+ s->ls_stats.s_cache_check,
+ s->ls_stats.s_cache_race,
+ s->ls_stats.s_cache_death_race,
+ s->ls_stats.s_lru_purged);
}
-EXPORT_SYMBOL(fid_unpack);
+EXPORT_SYMBOL(lu_site_stats_print);
const char *lu_time_names[LU_TIME_NR] = {
[LU_TIME_FIND_LOOKUP] = "find_lookup",
}
}
EXPORT_SYMBOL(lu_kmem_fini);
-