}
static void cmm_device_shutdown(const struct lu_env *env,
- struct cmm_device *cm)
+ struct cmm_device *cm,
+ struct lustre_cfg *cfg)
{
struct mdc_device *mc, *tmp;
ENTRY;
spin_lock(&cm->cmm_tgt_guard);
list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
struct lu_device *ld_m = mdc2lu_dev(mc);
-
- list_del_init(&mc->mc_linkage);
- lu_device_put(cmm2lu_dev(cm));
- ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m);
- ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m);
- cm->cmm_tgt_count--;
+ ld_m->ld_ops->ldo_process_config(env, ld_m, cfg);
}
spin_unlock(&cm->cmm_tgt_guard);
}
case LCFG_CLEANUP:
{
- cmm_device_shutdown(env, m);
+ cmm_device_shutdown(env, m, cfg);
}
default:
err = next->ld_ops->ldo_process_config(env, next, cfg);
struct lu_device *ld)
{
struct cmm_device *cm = lu2cmm_dev(ld);
+ struct mdc_device *mc, *tmp;
ENTRY;
+ /* finish all mdc devices */
+ spin_lock(&cm->cmm_tgt_guard);
+ list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
+ struct lu_device *ld_m = mdc2lu_dev(mc);
+
+ list_del_init(&mc->mc_linkage);
+ lu_device_put(cmm2lu_dev(cm));
+ ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m);
+ ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m);
+ cm->cmm_tgt_count--;
+ }
+ spin_unlock(&cm->cmm_tgt_guard);
+
RETURN (md2lu_dev(cm->cmm_child));
}
case LCFG_ADD_MDC:
rc = mdc_add_obd(env, mc, cfg);
break;
+ case LCFG_CLEANUP:
+ rc = mdc_del_obd(mc);
+ break;
default:
rc = -EOPNOTSUPP;
}
static struct lu_device *mdc_device_fini(const struct lu_env *env,
struct lu_device *ld)
{
- struct mdc_device *mc = lu2mdc_dev(ld);
-
ENTRY;
-
- mdc_del_obd(mc);
-
RETURN (NULL);
}
{
struct mdc_device *mc = lu2mdc_dev(ld);
- LASSERT(atomic_read(&ld->ld_ref) == 0);
+ LASSERTF(atomic_read(&ld->ld_ref) == 0, "Refcount = %i\n", atomic_read(&ld->ld_ref));
LASSERT(list_empty(&mc->mc_linkage));
md_device_fini(&mc->mc_md_dev);
OBD_FREE_PTR(mc);