* Flags from enum lu_object_flags.
*/
unsigned long lo_flags;
+ /**
+ * Link to the device, for debugging.
+ */
+ struct lu_ref_link *lo_dev_ref;
};
enum lu_object_header_flags {
return o->lo_header->loh_attr;
}
+static inline struct lu_ref_link *lu_object_ref_add(struct lu_object *o,
+ const char *scope,
+ const void *source)
+{
+ return lu_ref_add(&o->lo_header->loh_reference, scope, source);
+}
+
+static inline void lu_object_ref_del(struct lu_object *o,
+ const char *scope, const void *source)
+{
+ lu_ref_del(&o->lo_header->loh_reference, scope, source);
+}
+
+static inline void lu_object_ref_del_at(struct lu_object *o,
+ struct lu_ref_link *link,
+ const char *scope, const void *source)
+{
+ lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
+}
+
struct lu_rdpg {
/* input params, should be filled out by mdt */
__u64 rp_hash; /* hash */
LU_CACHE_PERCENT = 20,
};
-/*
+/**
* Return desired hash table order.
*/
static int lu_htable_order(void)
return bits;
}
-/*
- * Initialize site @s, with @d as the top level device.
+static struct lock_class_key lu_site_guard_class;
+
+/**
+ * Initialize site \a s, with \a d as the top level device.
*/
int lu_site_init(struct lu_site *s, struct lu_device *top)
{
memset(s, 0, sizeof *s);
rwlock_init(&s->ls_guard);
+ lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
CFS_INIT_LIST_HEAD(&s->ls_lru);
CFS_INIT_LIST_HEAD(&s->ls_linkage);
+ cfs_waitq_init(&s->ls_marche_funebre);
s->ls_top_dev = top;
top->ld_site = s;
lu_device_get(top);
+ lu_ref_add(&top->ld_reference, "site-top", s);
for (bits = lu_htable_order(), size = 1 << bits;
(s->ls_hash =
}
EXPORT_SYMBOL(lu_site_init);
-/*
- * Finalize @s and release its resources.
+/**
+ * Finalize \a s and release its resources.
*/
void lu_site_fini(struct lu_site *s)
{
}
if (s->ls_top_dev != NULL) {
s->ls_top_dev->ld_site = NULL;
+ lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
lu_device_put(s->ls_top_dev);
s->ls_top_dev = NULL;
}
}
EXPORT_SYMBOL(lu_site_fini);
-/*
+/**
* Called when initialization of stack for this site is completed.
*/
int lu_site_init_finish(struct lu_site *s)
o->lo_header = h;
o->lo_dev = d;
lu_device_get(d);
+ o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
CFS_INIT_LIST_HEAD(&o->lo_linkage);
return 0;
}
EXPORT_SYMBOL(lu_object_init);
-/*
+/**
* Finalize object and release its resources.
*/
void lu_object_fini(struct lu_object *o)
{
+ struct lu_device *dev = o->lo_dev;
+
LASSERT(list_empty(&o->lo_linkage));
- if (o->lo_dev != NULL) {
- lu_device_put(o->lo_dev);
+ if (dev != NULL) {
+ lu_ref_del_at(&dev->ld_reference,
+ o->lo_dev_ref , "lu_object", o);
+ lu_device_put(dev);
o->lo_dev = NULL;
}
}
EXPORT_SYMBOL(lu_object_fini);
-/*
- * Add object @o as first layer of compound object @h
+/**
+ * Add object \a o as first layer of compound object \a h
*
* This is typically called by the ->ldo_object_alloc() method of top-level
* device.
INIT_HLIST_NODE(&h->loh_hash);
CFS_INIT_LIST_HEAD(&h->loh_lru);
CFS_INIT_LIST_HEAD(&h->loh_layers);
+ lu_ref_init(&h->loh_reference);
return 0;
}
EXPORT_SYMBOL(lu_object_header_init);
-/*
+/**
* Finalize compound object.
*/
void lu_object_header_fini(struct lu_object_header *h)
LASSERT(list_empty(&h->loh_layers));
LASSERT(list_empty(&h->loh_lru));
LASSERT(hlist_unhashed(&h->loh_hash));
+ lu_ref_fini(&h->loh_reference);
}
EXPORT_SYMBOL(lu_object_header_fini);
-/*
+/**
* Given a compound object, find its slice, corresponding to the device type
- * @dtype.
+ * \a dtype.
*/
struct lu_object *lu_object_locate(struct lu_object_header *h,
- struct lu_device_type *dtype)
+ const struct lu_device_type *dtype)
{
struct lu_object *o;
-/*
+/**
* Finalize and free devices in the device stack.
*
* Finalize device stack by purging object cache, and calling
lu_site_purge(env, site, ~0);
for (scan = top; scan != NULL; scan = next) {
next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
+ lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
lu_device_put(scan);
}
struct thandle ot_super;
handle_t *ot_handle;
struct journal_callback ot_jcb;
+ /* Link to the device, for debugging. */
+ struct lu_ref_link *ot_dev_link;
+
};
/*
struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
struct thandle *th = &oh->ot_super;
struct dt_device *dev = th->th_dev;
+ struct lu_device *lud = &dev->dd_lu_dev;
LASSERT(dev != NULL);
LASSERT(oh->ot_handle == NULL);
lu_context_exit(&env->le_ctx);
}
- lu_device_put(&dev->dd_lu_dev);
+ lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
+ lu_device_put(lud);
th->th_dev = NULL;
lu_context_exit(&th->th_ctx);
if (osd_param_is_sane(dev, p)) {
OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
if (oh != NULL) {
+ struct osd_thread_info *oti = osd_oti_get(env);
+
/*
* XXX temporary stuff. Some abstraction layer should
* be used.
th->th_result = 0;
jh->h_sync = p->tp_sync;
lu_device_get(&d->dd_lu_dev);
+ oh->ot_dev_link = lu_ref_add
+ (&d->dd_lu_dev.ld_reference,
+ "osd-tx", th);
/* add commit callback */
lu_context_init(&th->th_ctx, LCT_TX_HANDLE);
lu_context_enter(&th->th_ctx);
journal_callback_set(jh, osd_trans_commit_cb,
(struct journal_callback *)&oh->ot_jcb);
-#if OSD_COUNTERS
- {
- struct osd_thread_info *oti =
- osd_oti_get(env);
-
LASSERT(oti->oti_txns == 0);
LASSERT(oti->oti_r_locks == 0);
LASSERT(oti->oti_w_locks == 0);
oti->oti_txns++;
- }
-#endif
} else {
OBD_FREE_PTR(oh);
th = (void *)jh;