X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fcmm%2Fcmm_device.c;h=2105222e1e0fdc608f9036159fbe672b11a34106;hb=d2b8a0efaa4b5faea675bd4bd4bfe1f80dad4011;hp=a74fdf46217ed3e79b32b975c07032f85478269f;hpb=90d8e7fd28746a572c8de488222f5312fe927fc3;p=fs%2Flustre-release.git diff --git a/lustre/cmm/cmm_device.c b/lustre/cmm/cmm_device.c index a74fdf4..2105222 100644 --- a/lustre/cmm/cmm_device.c +++ b/lustre/cmm/cmm_device.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -39,7 +39,10 @@ * * Author: Mike Pershin */ - +/** + * \addtogroup cmm + * @{ + */ #ifndef EXPORT_SYMTAB # define EXPORT_SYMTAB #endif @@ -57,7 +60,7 @@ # include #endif -static struct obd_ops cmm_obd_device_ops = { +struct obd_ops cmm_obd_device_ops = { .o_owner = THIS_MODULE }; @@ -65,7 +68,7 @@ static const struct lu_device_operations cmm_lu_ops; static inline int lu_device_is_cmm(struct lu_device *d) { - return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops); + return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops); } int cmm_root_get(const struct lu_env *env, struct md_device *md, @@ -81,10 +84,10 @@ int cmm_root_get(const struct lu_env *env, struct md_device *md, } static int cmm_statfs(const struct lu_env *env, struct md_device *md, - struct kstatfs *sfs) + cfs_kstatfs_t *sfs) { struct cmm_device *cmm_dev = md2cmm_dev(md); - int rc; + int rc; ENTRY; rc = cmm_child_ops(cmm_dev)->mdo_statfs(env, @@ -130,17 +133,29 @@ static int cmm_update_capa_key(const struct lu_env *env, RETURN(rc); } +static int cmm_llog_ctxt_get(const struct lu_env *env, struct md_device *m, + int idx, void **h) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + rc = cmm_child_ops(cmm_dev)->mdo_llog_ctxt_get(env, cmm_dev->cmm_child, + idx, h); + RETURN(rc); +} + #ifdef HAVE_QUOTA_SUPPORT +/** + * \name Quota functions + * @{ + */ static int cmm_quota_notify(const struct lu_env *env, struct md_device *m) { struct cmm_device *cmm_dev = md2cmm_dev(m); int rc; ENTRY; - /* disable quota for CMD case temporary. */ - if (cmm_dev->cmm_tgt_count) - RETURN(-EOPNOTSUPP); - rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_notify(env, cmm_dev->cmm_child); RETURN(rc); @@ -153,10 +168,6 @@ static int cmm_quota_setup(const struct lu_env *env, struct md_device *m, int rc; ENTRY; - /* disable quota for CMD case temporary. */ - if (cmm_dev->cmm_tgt_count) - RETURN(-EOPNOTSUPP); - rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_setup(env, cmm_dev->cmm_child, data); @@ -169,10 +180,6 @@ static int cmm_quota_cleanup(const struct lu_env *env, struct md_device *m) int rc; ENTRY; - /* disable quota for CMD case temporary. */ - if (cmm_dev->cmm_tgt_count) - RETURN(-EOPNOTSUPP); - rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_cleanup(env, cmm_dev->cmm_child); RETURN(rc); @@ -184,17 +191,13 @@ static int cmm_quota_recovery(const struct lu_env *env, struct md_device *m) int rc; ENTRY; - /* disable quota for CMD case temporary. */ - if (cmm_dev->cmm_tgt_count) - RETURN(-EOPNOTSUPP); - rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_recovery(env, cmm_dev->cmm_child); RETURN(rc); } static int cmm_quota_check(const struct lu_env *env, struct md_device *m, - struct obd_export *exp, __u32 type) + __u32 type) { struct cmm_device *cmm_dev = md2cmm_dev(m); int rc; @@ -206,12 +209,12 @@ static int cmm_quota_check(const struct lu_env *env, struct md_device *m, rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_check(env, cmm_dev->cmm_child, - exp, type); + type); RETURN(rc); } static int cmm_quota_on(const struct lu_env *env, struct md_device *m, - __u32 type, __u32 id) + __u32 type) { struct cmm_device *cmm_dev = md2cmm_dev(m); int rc; @@ -223,24 +226,20 @@ static int cmm_quota_on(const struct lu_env *env, struct md_device *m, rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_on(env, cmm_dev->cmm_child, - type, id); + type); RETURN(rc); } static int cmm_quota_off(const struct lu_env *env, struct md_device *m, - __u32 type, __u32 id) + __u32 type) { struct cmm_device *cmm_dev = md2cmm_dev(m); int rc; ENTRY; - /* disable quota for CMD case temporary. */ - if (cmm_dev->cmm_tgt_count) - RETURN(-EOPNOTSUPP); - rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_off(env, cmm_dev->cmm_child, - type, id); + type); RETURN(rc); } @@ -383,14 +382,29 @@ static int cmm_quota_finvalidate(const struct lu_env *env, struct md_device *m, type); RETURN(rc); } +/** @} */ #endif +int cmm_iocontrol(const struct lu_env *env, struct md_device *m, + unsigned int cmd, int len, void *data) +{ + struct md_device *next = md2cmm_dev(m)->cmm_child; + int rc; + + ENTRY; + rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data); + RETURN(rc); +} + + static const struct md_device_operations cmm_md_ops = { .mdo_statfs = cmm_statfs, .mdo_root_get = cmm_root_get, .mdo_maxsize_get = cmm_maxsize_get, .mdo_init_capa_ctxt = cmm_init_capa_ctxt, .mdo_update_capa_key = cmm_update_capa_key, + .mdo_llog_ctxt_get = cmm_llog_ctxt_get, + .mdo_iocontrol = cmm_iocontrol, #ifdef HAVE_QUOTA_SUPPORT .mdo_quota = { .mqo_notify = cmm_quota_notify, @@ -413,7 +427,9 @@ static const struct md_device_operations cmm_md_ops = { }; extern struct lu_device_type mdc_device_type; - +/** + * Init MDC. + */ static int cmm_post_init_mdc(const struct lu_env *env, struct cmm_device *cmm) { @@ -422,16 +438,16 @@ static int cmm_post_init_mdc(const struct lu_env *env, /* get the max mdsize and cookiesize from lower layer */ rc = cmm_maxsize_get(env, &cmm->cmm_md_dev, &max_mdsize, - &max_cookiesize); + &max_cookiesize); if (rc) RETURN(rc); - spin_lock(&cmm->cmm_tgt_guard); - list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets, - mc_linkage) { + cfs_spin_lock(&cmm->cmm_tgt_guard); + cfs_list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets, + mc_linkage) { cmm_mdc_init_ea_size(env, mc, max_mdsize, max_cookiesize); } - spin_unlock(&cmm->cmm_tgt_guard); + cfs_spin_unlock(&cmm->cmm_tgt_guard); RETURN(rc); } @@ -447,7 +463,11 @@ static int cmm_add_mdc(const struct lu_env *env, struct lu_device *ld; struct lu_device *cmm_lu = cmm2lu_dev(cm); mdsno_t mdc_num; + struct lu_site *site = cmm2lu_dev(cm)->ld_site; int rc; +#ifdef HAVE_QUOTA_SUPPORT + int first; +#endif ENTRY; /* find out that there is no such mdc */ @@ -458,20 +478,20 @@ static int cmm_add_mdc(const struct lu_env *env, RETURN(-EINVAL); } - spin_lock(&cm->cmm_tgt_guard); - list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, - mc_linkage) { + cfs_spin_lock(&cm->cmm_tgt_guard); + cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, + mc_linkage) { if (mc->mc_num == mdc_num) { - spin_unlock(&cm->cmm_tgt_guard); + cfs_spin_unlock(&cm->cmm_tgt_guard); RETURN(-EEXIST); } } - spin_unlock(&cm->cmm_tgt_guard); + cfs_spin_unlock(&cm->cmm_tgt_guard); ld = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg); if (IS_ERR(ld)) RETURN(PTR_ERR(ld)); - ld->ld_site = cmm2lu_dev(cm)->ld_site; + ld->ld_site = site; rc = ldt->ldt_ops->ldto_device_init(env, ld, NULL, NULL); if (rc) { @@ -486,20 +506,23 @@ static int cmm_add_mdc(const struct lu_env *env, RETURN(rc); } - spin_lock(&cm->cmm_tgt_guard); - list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, - mc_linkage) { + cfs_spin_lock(&cm->cmm_tgt_guard); + cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, + mc_linkage) { if (mc->mc_num == mdc_num) { - spin_unlock(&cm->cmm_tgt_guard); + cfs_spin_unlock(&cm->cmm_tgt_guard); ldt->ldt_ops->ldto_device_fini(env, ld); ldt->ldt_ops->ldto_device_free(env, ld); RETURN(-EEXIST); } } mc = lu2mdc_dev(ld); - list_add_tail(&mc->mc_linkage, &cm->cmm_targets); + cfs_list_add_tail(&mc->mc_linkage, &cm->cmm_targets); cm->cmm_tgt_count++; - spin_unlock(&cm->cmm_tgt_guard); +#ifdef HAVE_QUOTA_SUPPORT + first = cm->cmm_tgt_count; +#endif + cfs_spin_unlock(&cm->cmm_tgt_guard); lu_device_get(cmm_lu); lu_ref_add(&cmm_lu->ld_reference, "mdc-child", ld); @@ -509,6 +532,20 @@ static int cmm_add_mdc(const struct lu_env *env, target.ft_exp = mc->mc_desc.cl_exp; fld_client_add_target(cm->cmm_fld, &target); + if (mc->mc_num == 0) { + /* this is mdt0 -> mc export, fld lookup need this export + to forward fld lookup request. */ + LASSERT(!lu_site2md(site)->ms_server_fld->lsf_control_exp); + lu_site2md(site)->ms_server_fld->lsf_control_exp = + mc->mc_desc.cl_exp; + } +#ifdef HAVE_QUOTA_SUPPORT + /* XXX: Disable quota for CMD case temporary. */ + if (first == 1) { + CWARN("Disable quota for CMD case temporary!\n"); + cmm_child_ops(cm)->mdo_quota.mqo_off(env, cm->cmm_child, UGQUOTA); + } +#endif /* Set max md size for the mdc. */ rc = cmm_post_init_mdc(env, cm); RETURN(rc); @@ -525,13 +562,13 @@ static void cmm_device_shutdown(const struct lu_env *env, fld_client_del_target(cm->cmm_fld, cm->cmm_local_num); /* Finish all mdc devices. */ - spin_lock(&cm->cmm_tgt_guard); - list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) { + cfs_spin_lock(&cm->cmm_tgt_guard); + cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) { struct lu_device *ld_m = mdc2lu_dev(mc); fld_client_del_target(cm->cmm_fld, mc->mc_num); ld_m->ld_ops->ldo_process_config(env, ld_m, cfg); } - spin_unlock(&cm->cmm_tgt_guard); + cfs_spin_unlock(&cm->cmm_tgt_guard); /* remove upcall device*/ md_upcall_fini(&cm->cmm_md_dev); @@ -627,7 +664,7 @@ static int cmm_prepare(const struct lu_env *env, } static const struct lu_device_operations cmm_lu_ops = { - .ldo_object_alloc = cmm_object_alloc, + .ldo_object_alloc = cmm_object_alloc, .ldo_process_config = cmm_process_config, .ldo_recovery_complete = cmm_recovery_complete, .ldo_prepare = cmm_prepare, @@ -635,7 +672,7 @@ static const struct lu_device_operations cmm_lu_ops = { /* --- lu_device_type operations --- */ int cmm_upcall(const struct lu_env *env, struct md_device *md, - enum md_upcall_event ev) + enum md_upcall_event ev, void *data) { int rc; ENTRY; @@ -647,7 +684,7 @@ int cmm_upcall(const struct lu_env *env, struct md_device *md, CERROR("can not init md size %d\n", rc); /* fall through */ default: - rc = md_do_upcall(env, md, ev); + rc = md_do_upcall(env, md, ev, data); } RETURN(rc); } @@ -660,7 +697,7 @@ static struct lu_device *cmm_device_free(const struct lu_env *env, ENTRY; LASSERT(m->cmm_tgt_count == 0); - LASSERT(list_empty(&m->cmm_targets)); + LASSERT(cfs_list_empty(&m->cmm_targets)); if (m->cmm_fld != NULL) { OBD_FREE_PTR(m->cmm_fld); m->cmm_fld = NULL; @@ -715,6 +752,40 @@ struct cmm_thread_info *cmm_env_info(const struct lu_env *env) /* type constructor/destructor: cmm_type_init/cmm_type_fini */ LU_TYPE_INIT_FINI(cmm, &cmm_thread_key); +/* + * Kludge code : it should be moved mdc_device.c if mdc_(mds)_device + * is really stacked. + */ +static int __cmm_type_init(struct lu_device_type *t) +{ + int rc; + rc = lu_device_type_init(&mdc_device_type); + if (rc == 0) { + rc = cmm_type_init(t); + if (rc) + lu_device_type_fini(&mdc_device_type); + } + return rc; +} + +static void __cmm_type_fini(struct lu_device_type *t) +{ + lu_device_type_fini(&mdc_device_type); + cmm_type_fini(t); +} + +static void __cmm_type_start(struct lu_device_type *t) +{ + mdc_device_type.ldt_ops->ldto_start(&mdc_device_type); + cmm_type_start(t); +} + +static void __cmm_type_stop(struct lu_device_type *t) +{ + mdc_device_type.ldt_ops->ldto_stop(&mdc_device_type); + cmm_type_stop(t); +} + static int cmm_device_init(const struct lu_env *env, struct lu_device *d, const char *name, struct lu_device *next) { @@ -723,7 +794,7 @@ static int cmm_device_init(const struct lu_env *env, struct lu_device *d, int err = 0; ENTRY; - spin_lock_init(&m->cmm_tgt_guard); + cfs_spin_lock_init(&m->cmm_tgt_guard); CFS_INIT_LIST_HEAD(&m->cmm_targets); m->cmm_tgt_count = 0; m->cmm_child = lu2md_dev(next); @@ -752,19 +823,19 @@ static struct lu_device *cmm_device_fini(const struct lu_env *env, ENTRY; /* Finish all mdc devices */ - spin_lock(&cm->cmm_tgt_guard); - list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) { + cfs_spin_lock(&cm->cmm_tgt_guard); + cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) { struct lu_device *ld_m = mdc2lu_dev(mc); struct lu_device *ld_c = cmm2lu_dev(cm); - list_del_init(&mc->mc_linkage); + cfs_list_del_init(&mc->mc_linkage); lu_ref_del(&ld_c->ld_reference, "mdc-child", ld_m); lu_device_put(ld_c); ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m); ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m); cm->cmm_tgt_count--; } - spin_unlock(&cm->cmm_tgt_guard); + cfs_spin_unlock(&cm->cmm_tgt_guard); fld_client_fini(cm->cmm_fld); ls = cmm2lu_dev(cm)->ld_site; @@ -775,11 +846,11 @@ static struct lu_device *cmm_device_fini(const struct lu_env *env, } static struct lu_device_type_operations cmm_device_type_ops = { - .ldto_init = cmm_type_init, - .ldto_fini = cmm_type_fini, + .ldto_init = __cmm_type_init, + .ldto_fini = __cmm_type_fini, - .ldto_start = cmm_type_start, - .ldto_stop = cmm_type_stop, + .ldto_start = __cmm_type_start, + .ldto_stop = __cmm_type_stop, .ldto_device_alloc = cmm_device_alloc, .ldto_device_free = cmm_device_free, @@ -808,6 +879,7 @@ static void lprocfs_cmm_init_vars(struct lprocfs_static_vars *lvars) lvars->module_vars = lprocfs_cmm_module_vars; lvars->obd_vars = lprocfs_cmm_obd_vars; } +/** @} */ static int __init cmm_mod_init(void) {