struct list_head *layers;
int clean;
int result;
+ ENTRY;
/*
* Create top-level object slice. This will also create
*/
top = s->ls_top_dev->ld_ops->ldo_object_alloc(env,
NULL, s->ls_top_dev);
- if (IS_ERR(top))
- RETURN(top);
+ if (top == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
/*
* This is the only place where object fid is assigned. It's constant
* after this point.
* necessary, because lu_object_header is freed together with the
* top-level slice.
*/
- INIT_LIST_HEAD(&splice);
+ CFS_INIT_LIST_HEAD(&splice);
list_splice_init(&o->lo_header->loh_layers, &splice);
while (!list_empty(&splice)) {
o = container_of0(splice.next, struct lu_object, lo_linkage);
struct lu_object_header *h;
struct lu_object_header *temp;
- INIT_LIST_HEAD(&dispose);
+ CFS_INIT_LIST_HEAD(&dispose);
/*
* Under LRU list lock, scan LRU list and move unreferenced objects to
* the dispose list, removing them from LRU and hash table.
/*
* Global list of all sites on this node
*/
-static LIST_HEAD(lu_sites);
+static CFS_LIST_HEAD(lu_sites);
static DECLARE_MUTEX(lu_sites_guard);
/*
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = nr_free_buffer_pages() / 100 *
+ cache_size = ll_nr_free_buffer_pages() / 100 *
LU_CACHE_PERCENT * (CFS_PAGE_SIZE / 1024);
for (bits = 1; (1 << bits) < cache_size; ++bits) {
}
EXPORT_SYMBOL(lu_object_locate);
+
+
+/*
+ * Finalize and free devices in the device stack.
+ *
+ * Finalize device stack by purging object cache, and calling
+ * lu_device_type_operations::ldto_device_fini() and
+ * lu_device_type_operations::ldto_device_free() on all devices in the stack.
+ */
+void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
+{
+ struct lu_site *site = top->ld_site;
+ struct lu_device *scan;
+ struct lu_device *next;
+
+ lu_site_purge(env, site, ~0);
+ for (scan = top; scan != NULL; scan = next) {
+ next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
+ lu_device_put(scan);
+ }
+
+ /* purge again. */
+ lu_site_purge(env, site, ~0);
+
+ if (!list_empty(&site->ls_lru) || site->ls_total != 0) {
+ /*
+ * Uh-oh, objects still exist.
+ */
+ static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
+
+ lu_site_print(env, site, &cookie, lu_cdebug_printer);
+ }
+
+ for (scan = top; scan != NULL; scan = next) {
+ const struct lu_device_type *ldt = scan->ld_type;
+ struct obd_type *type;
+
+ next = ldt->ldt_ops->ldto_device_free(env, scan);
+ type = ldt->ldt_obd_type;
+ type->typ_refcnt--;
+ class_put_type(type);
+ }
+}
+EXPORT_SYMBOL(lu_stack_fini);
+
enum {
/*
* Maximal number of tld slots.
void *lu_context_key_get(const struct lu_context *ctx,
struct lu_context_key *key)
{
+ LASSERT(ctx->lc_state == LCS_ENTERED);
LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
return ctx->lc_value[key->lct_index];
}
int lu_context_init(struct lu_context *ctx, __u32 tags)
{
memset(ctx, 0, sizeof *ctx);
+ ctx->lc_state = LCS_INITIALIZED;
ctx->lc_tags = tags;
return keys_init(ctx);
}
*/
void lu_context_fini(struct lu_context *ctx)
{
+ LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
+ ctx->lc_state = LCS_FINALIZED;
keys_fini(ctx);
}
EXPORT_SYMBOL(lu_context_fini);
*/
void lu_context_enter(struct lu_context *ctx)
{
+ LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
+ ctx->lc_state = LCS_ENTERED;
}
EXPORT_SYMBOL(lu_context_enter);
{
int i;
+ LASSERT(ctx->lc_state == LCS_ENTERED);
+ ctx->lc_state = LCS_LEFT;
if (ctx->lc_value != NULL) {
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
if (ctx->lc_value[i] != NULL) {
struct lu_site *tmp;
int cached = 0;
int remain = nr;
- LIST_HEAD(splice);
+ CFS_LIST_HEAD(splice);
if (nr != 0 && !(gfp_mask & __GFP_FS))
return -1;