X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fcmm%2Fcmm_device.c;h=8867fd83c2e927bf0d8051168425a6999f5fa68a;hp=b4e03d266972f7829efc4fd8295544d535cdc3ef;hb=9587ede5df1cd99c74dd732b84d885106af57ca0;hpb=f2b264f2049554d69c1cc6f0c0ebc5d3e196a068 diff --git a/lustre/cmm/cmm_device.c b/lustre/cmm/cmm_device.c index b4e03d2..8867fd8 100644 --- a/lustre/cmm/cmm_device.c +++ b/lustre/cmm/cmm_device.c @@ -1,29 +1,43 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * lustre/cmm/cmm_device.c - * Lustre Cluster Metadata Manager (cmm) + * GPL HEADER START * - * Copyright (c) 2006 Cluster File Systems, Inc. - * Author: Mike Pershin + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of the Lustre file system, http://www.lustre.org - * Lustre is a trademark of Cluster File Systems, Inc. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * You may have signed or agreed to another license before downloading - * this software. If so, you are bound by the terms and conditions - * of that agreement, and the following does not apply to you. See the - * LICENSE file included with this distribution for more information. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * If you did not agree to a different license, then this copy of Lustre - * is open source software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * In either case, Lustre is distributed in the hope that it will be - * useful, but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * license text for more details. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/cmm/cmm_device.c + * + * Lustre Cluster Metadata Manager (cmm) + * + * Author: Mike Pershin */ #ifndef EXPORT_SYMTAB @@ -39,16 +53,19 @@ #include #include "cmm_internal.h" #include "mdc_internal.h" +#ifdef HAVE_QUOTA_SUPPORT +# include +#endif -static struct obd_ops cmm_obd_device_ops = { +struct obd_ops cmm_obd_device_ops = { .o_owner = THIS_MODULE }; -static struct lu_device_operations cmm_lu_ops; +static const struct lu_device_operations cmm_lu_ops; static inline int lu_device_is_cmm(struct lu_device *d) { - return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops); + return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops); } int cmm_root_get(const struct lu_env *env, struct md_device *md, @@ -67,7 +84,7 @@ static int cmm_statfs(const struct lu_env *env, struct md_device *md, struct kstatfs *sfs) { struct cmm_device *cmm_dev = md2cmm_dev(md); - int rc; + int rc; ENTRY; rc = cmm_child_ops(cmm_dev)->mdo_statfs(env, @@ -113,12 +130,292 @@ static int cmm_update_capa_key(const struct lu_env *env, RETURN(rc); } -static struct md_device_operations cmm_md_ops = { +static int cmm_llog_ctxt_get(const struct lu_env *env, struct md_device *m, + int idx, void **h) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + rc = cmm_child_ops(cmm_dev)->mdo_llog_ctxt_get(env, cmm_dev->cmm_child, + idx, h); + RETURN(rc); +} + +#ifdef HAVE_QUOTA_SUPPORT +static int cmm_quota_notify(const struct lu_env *env, struct md_device *m) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_notify(env, + cmm_dev->cmm_child); + RETURN(rc); +} + +static int cmm_quota_setup(const struct lu_env *env, struct md_device *m, + void *data) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_setup(env, + cmm_dev->cmm_child, + data); + RETURN(rc); +} + +static int cmm_quota_cleanup(const struct lu_env *env, struct md_device *m) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_cleanup(env, + cmm_dev->cmm_child); + RETURN(rc); +} + +static int cmm_quota_recovery(const struct lu_env *env, struct md_device *m) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_recovery(env, + cmm_dev->cmm_child); + RETURN(rc); +} + +static int cmm_quota_check(const struct lu_env *env, struct md_device *m, + __u32 type) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_check(env, + cmm_dev->cmm_child, + type); + RETURN(rc); +} + +static int cmm_quota_on(const struct lu_env *env, struct md_device *m, + __u32 type) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_on(env, + cmm_dev->cmm_child, + type); + RETURN(rc); +} + +static int cmm_quota_off(const struct lu_env *env, struct md_device *m, + __u32 type) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_off(env, + cmm_dev->cmm_child, + type); + RETURN(rc); +} + +static int cmm_quota_setinfo(const struct lu_env *env, struct md_device *m, + __u32 type, __u32 id, struct obd_dqinfo *dqinfo) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_setinfo(env, + cmm_dev->cmm_child, + type, id, dqinfo); + RETURN(rc); +} + +static int cmm_quota_getinfo(const struct lu_env *env, + const struct md_device *m, + __u32 type, __u32 id, struct obd_dqinfo *dqinfo) +{ + struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getinfo(env, + cmm_dev->cmm_child, + type, id, dqinfo); + RETURN(rc); +} + +static int cmm_quota_setquota(const struct lu_env *env, struct md_device *m, + __u32 type, __u32 id, struct obd_dqblk *dqblk) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_setquota(env, + cmm_dev->cmm_child, + type, id, dqblk); + RETURN(rc); +} + +static int cmm_quota_getquota(const struct lu_env *env, + const struct md_device *m, + __u32 type, __u32 id, struct obd_dqblk *dqblk) +{ + struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getquota(env, + cmm_dev->cmm_child, + type, id, dqblk); + RETURN(rc); +} + +static int cmm_quota_getoinfo(const struct lu_env *env, + const struct md_device *m, + __u32 type, __u32 id, struct obd_dqinfo *dqinfo) +{ + struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getoinfo(env, + cmm_dev->cmm_child, + type, id, dqinfo); + RETURN(rc); +} + +static int cmm_quota_getoquota(const struct lu_env *env, + const struct md_device *m, + __u32 type, __u32 id, struct obd_dqblk *dqblk) +{ + struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getoquota(env, + cmm_dev->cmm_child, + type, id, dqblk); + RETURN(rc); +} + +static int cmm_quota_invalidate(const struct lu_env *env, struct md_device *m, + __u32 type) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_invalidate(env, + cmm_dev->cmm_child, + type); + RETURN(rc); +} + +static int cmm_quota_finvalidate(const struct lu_env *env, struct md_device *m, + __u32 type) +{ + struct cmm_device *cmm_dev = md2cmm_dev(m); + int rc; + ENTRY; + + /* disable quota for CMD case temporary. */ + if (cmm_dev->cmm_tgt_count) + RETURN(-EOPNOTSUPP); + + rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_finvalidate(env, + cmm_dev->cmm_child, + type); + RETURN(rc); +} +#endif + +int cmm_iocontrol(const struct lu_env *env, struct md_device *m, + unsigned int cmd, int len, void *data) +{ + struct md_device *next = md2cmm_dev(m)->cmm_child; + int rc; + + ENTRY; + rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data); + RETURN(rc); +} + + +static const struct md_device_operations cmm_md_ops = { .mdo_statfs = cmm_statfs, .mdo_root_get = cmm_root_get, .mdo_maxsize_get = cmm_maxsize_get, .mdo_init_capa_ctxt = cmm_init_capa_ctxt, .mdo_update_capa_key = cmm_update_capa_key, + .mdo_llog_ctxt_get = cmm_llog_ctxt_get, + .mdo_iocontrol = cmm_iocontrol, +#ifdef HAVE_QUOTA_SUPPORT + .mdo_quota = { + .mqo_notify = cmm_quota_notify, + .mqo_setup = cmm_quota_setup, + .mqo_cleanup = cmm_quota_cleanup, + .mqo_recovery = cmm_quota_recovery, + .mqo_check = cmm_quota_check, + .mqo_on = cmm_quota_on, + .mqo_off = cmm_quota_off, + .mqo_setinfo = cmm_quota_setinfo, + .mqo_getinfo = cmm_quota_getinfo, + .mqo_setquota = cmm_quota_setquota, + .mqo_getquota = cmm_quota_getquota, + .mqo_getoinfo = cmm_quota_getoinfo, + .mqo_getoquota = cmm_quota_getoquota, + .mqo_invalidate = cmm_quota_invalidate, + .mqo_finvalidate = cmm_quota_finvalidate + } +#endif }; extern struct lu_device_type mdc_device_type; @@ -138,7 +435,7 @@ static int cmm_post_init_mdc(const struct lu_env *env, spin_lock(&cmm->cmm_tgt_guard); list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets, mc_linkage) { - mdc_init_ea_size(env, mc, max_mdsize, max_cookiesize); + cmm_mdc_init_ea_size(env, mc, max_mdsize, max_cookiesize); } spin_unlock(&cmm->cmm_tgt_guard); RETURN(rc); @@ -154,8 +451,13 @@ static int cmm_add_mdc(const struct lu_env *env, struct mdc_device *mc, *tmp; struct lu_fld_target target; struct lu_device *ld; + struct lu_device *cmm_lu = cmm2lu_dev(cm); mdsno_t mdc_num; + struct lu_site *site = cmm2lu_dev(cm)->ld_site; int rc; +#ifdef HAVE_QUOTA_SUPPORT + int first; +#endif ENTRY; /* find out that there is no such mdc */ @@ -176,17 +478,23 @@ static int cmm_add_mdc(const struct lu_env *env, } spin_unlock(&cm->cmm_tgt_guard); ld = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg); - ld->ld_site = cmm2lu_dev(cm)->ld_site; + if (IS_ERR(ld)) + RETURN(PTR_ERR(ld)); + + ld->ld_site = site; rc = ldt->ldt_ops->ldto_device_init(env, ld, NULL, NULL); if (rc) { ldt->ldt_ops->ldto_device_free(env, ld); - RETURN (rc); + RETURN(rc); } /* pass config to the just created MDC */ rc = ld->ld_ops->ldo_process_config(env, ld, cfg); - if (rc) + if (rc) { + ldt->ldt_ops->ldto_device_fini(env, ld); + ldt->ldt_ops->ldto_device_free(env, ld); RETURN(rc); + } spin_lock(&cm->cmm_tgt_guard); list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, @@ -201,15 +509,33 @@ static int cmm_add_mdc(const struct lu_env *env, mc = lu2mdc_dev(ld); list_add_tail(&mc->mc_linkage, &cm->cmm_targets); cm->cmm_tgt_count++; +#ifdef HAVE_QUOTA_SUPPORT + first = cm->cmm_tgt_count; +#endif spin_unlock(&cm->cmm_tgt_guard); - lu_device_get(cmm2lu_dev(cm)); + lu_device_get(cmm_lu); + lu_ref_add(&cmm_lu->ld_reference, "mdc-child", ld); target.ft_srv = NULL; target.ft_idx = mc->mc_num; target.ft_exp = mc->mc_desc.cl_exp; fld_client_add_target(cm->cmm_fld, &target); + if (mc->mc_num == 0) { + /* this is mdt0 -> mc export, fld lookup need this export + to forward fld lookup request. */ + LASSERT(!lu_site2md(site)->ms_server_fld->lsf_control_exp); + lu_site2md(site)->ms_server_fld->lsf_control_exp = + mc->mc_desc.cl_exp; + } +#ifdef HAVE_QUOTA_SUPPORT + /* XXX: Disable quota for CMD case temporary. */ + if (first == 1) { + CWARN("Disable quota for CMD case temporary!\n"); + cmm_child_ops(cm)->mdo_quota.mqo_off(env, cm->cmm_child, UGQUOTA); + } +#endif /* Set max md size for the mdc. */ rc = cmm_post_init_mdc(env, cm); RETURN(rc); @@ -272,7 +598,7 @@ static int cmm_process_config(const struct lu_env *env, struct lu_site *ls = cmm2lu_dev(m)->ld_site; struct lu_fld_target target; - target.ft_srv = ls->ls_server_fld; + target.ft_srv = lu_site2md(ls)->ms_server_fld; target.ft_idx = m->cmm_local_num; target.ft_exp = NULL; @@ -314,15 +640,29 @@ static int cmm_recovery_complete(const struct lu_env *env, RETURN(rc); } -static struct lu_device_operations cmm_lu_ops = { - .ldo_object_alloc = cmm_object_alloc, +static int cmm_prepare(const struct lu_env *env, + struct lu_device *pdev, + struct lu_device *dev) +{ + struct cmm_device *cmm = lu2cmm_dev(dev); + struct lu_device *next = md2lu_dev(cmm->cmm_child); + int rc; + + ENTRY; + rc = next->ld_ops->ldo_prepare(env, dev, next); + RETURN(rc); +} + +static const struct lu_device_operations cmm_lu_ops = { + .ldo_object_alloc = cmm_object_alloc, .ldo_process_config = cmm_process_config, - .ldo_recovery_complete = cmm_recovery_complete + .ldo_recovery_complete = cmm_recovery_complete, + .ldo_prepare = cmm_prepare, }; /* --- lu_device_type operations --- */ int cmm_upcall(const struct lu_env *env, struct md_device *md, - enum md_upcall_event ev) + enum md_upcall_event ev, void *data) { int rc; ENTRY; @@ -334,11 +674,29 @@ int cmm_upcall(const struct lu_env *env, struct md_device *md, CERROR("can not init md size %d\n", rc); /* fall through */ default: - rc = md_do_upcall(env, md, ev); + rc = md_do_upcall(env, md, ev, data); } RETURN(rc); } +static struct lu_device *cmm_device_free(const struct lu_env *env, + struct lu_device *d) +{ + struct cmm_device *m = lu2cmm_dev(d); + struct lu_device *next = md2lu_dev(m->cmm_child); + ENTRY; + + LASSERT(m->cmm_tgt_count == 0); + LASSERT(list_empty(&m->cmm_targets)); + if (m->cmm_fld != NULL) { + OBD_FREE_PTR(m->cmm_fld); + m->cmm_fld = NULL; + } + md_device_fini(&m->cmm_md_dev); + OBD_FREE_PTR(m); + RETURN(next); +} + static struct lu_device *cmm_device_alloc(const struct lu_env *env, struct lu_device_type *t, struct lustre_cfg *cfg) @@ -358,28 +716,12 @@ static struct lu_device *cmm_device_alloc(const struct lu_env *env, l->ld_ops = &cmm_lu_ops; OBD_ALLOC_PTR(m->cmm_fld); - if (!m->cmm_fld) - GOTO(out_free_cmm, l = ERR_PTR(-ENOMEM)); + if (!m->cmm_fld) { + cmm_device_free(env, l); + l = ERR_PTR(-ENOMEM); + } } - RETURN(l); -out_free_cmm: - OBD_FREE_PTR(m); - return l; -} - -static void cmm_device_free(const struct lu_env *env, struct lu_device *d) -{ - struct cmm_device *m = lu2cmm_dev(d); - - LASSERT(m->cmm_tgt_count == 0); - LASSERT(list_empty(&m->cmm_targets)); - if (m->cmm_fld != NULL) { - OBD_FREE_PTR(m->cmm_fld); - m->cmm_fld = NULL; - } - md_device_fini(&m->cmm_md_dev); - OBD_FREE_PTR(m); } /* context key constructor/destructor: cmm_key_init, cmm_key_fini */ @@ -400,6 +742,40 @@ struct cmm_thread_info *cmm_env_info(const struct lu_env *env) /* type constructor/destructor: cmm_type_init/cmm_type_fini */ LU_TYPE_INIT_FINI(cmm, &cmm_thread_key); +/* + * Kludge code : it should be moved mdc_device.c if mdc_(mds)_device + * is really stacked. + */ +static int __cmm_type_init(struct lu_device_type *t) +{ + int rc; + rc = lu_device_type_init(&mdc_device_type); + if (rc == 0) { + rc = cmm_type_init(t); + if (rc) + lu_device_type_fini(&mdc_device_type); + } + return rc; +} + +static void __cmm_type_fini(struct lu_device_type *t) +{ + lu_device_type_fini(&mdc_device_type); + cmm_type_fini(t); +} + +static void __cmm_type_start(struct lu_device_type *t) +{ + mdc_device_type.ldt_ops->ldto_start(&mdc_device_type); + cmm_type_start(t); +} + +static void __cmm_type_stop(struct lu_device_type *t) +{ + mdc_device_type.ldt_ops->ldto_stop(&mdc_device_type); + cmm_type_stop(t); +} + static int cmm_device_init(const struct lu_env *env, struct lu_device *d, const char *name, struct lu_device *next) { @@ -409,7 +785,7 @@ static int cmm_device_init(const struct lu_env *env, struct lu_device *d, ENTRY; spin_lock_init(&m->cmm_tgt_guard); - INIT_LIST_HEAD(&m->cmm_targets); + CFS_INIT_LIST_HEAD(&m->cmm_targets); m->cmm_tgt_count = 0; m->cmm_child = lu2md_dev(next); @@ -422,16 +798,16 @@ static int cmm_device_init(const struct lu_env *env, struct lu_device *d, /* Assign site's fld client ref, needed for asserts in osd. */ ls = cmm2lu_dev(m)->ld_site; - ls->ls_client_fld = m->cmm_fld; + lu_site2md(ls)->ms_client_fld = m->cmm_fld; err = cmm_procfs_init(m, name); - + RETURN(err); } static struct lu_device *cmm_device_fini(const struct lu_env *env, struct lu_device *ld) { - struct cmm_device *cm = lu2cmm_dev(ld); + struct cmm_device *cm = lu2cmm_dev(ld); struct mdc_device *mc, *tmp; struct lu_site *ls; ENTRY; @@ -440,9 +816,11 @@ static struct lu_device *cmm_device_fini(const struct lu_env *env, spin_lock(&cm->cmm_tgt_guard); list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) { struct lu_device *ld_m = mdc2lu_dev(mc); + struct lu_device *ld_c = cmm2lu_dev(cm); list_del_init(&mc->mc_linkage); - lu_device_put(cmm2lu_dev(cm)); + lu_ref_del(&ld_c->ld_reference, "mdc-child", ld_m); + lu_device_put(ld_c); ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m); ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m); cm->cmm_tgt_count--; @@ -451,15 +829,18 @@ static struct lu_device *cmm_device_fini(const struct lu_env *env, fld_client_fini(cm->cmm_fld); ls = cmm2lu_dev(cm)->ld_site; - ls->ls_client_fld = NULL; + lu_site2md(ls)->ms_client_fld = NULL; cmm_procfs_fini(cm); RETURN (md2lu_dev(cm->cmm_child)); } static struct lu_device_type_operations cmm_device_type_ops = { - .ldto_init = cmm_type_init, - .ldto_fini = cmm_type_fini, + .ldto_init = __cmm_type_init, + .ldto_fini = __cmm_type_fini, + + .ldto_start = __cmm_type_start, + .ldto_stop = __cmm_type_stop, .ldto_device_alloc = cmm_device_alloc, .ldto_device_free = cmm_device_free, @@ -503,7 +884,7 @@ static void __exit cmm_mod_exit(void) class_unregister_type(LUSTRE_CMM_NAME); } -MODULE_AUTHOR("Cluster File Systems, Inc. "); +MODULE_AUTHOR("Sun Microsystems, Inc. "); MODULE_DESCRIPTION("Lustre Clustered Metadata Manager ("LUSTRE_CMM_NAME")"); MODULE_LICENSE("GPL");