1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/cmm/cmm_device.c
40 * Lustre Cluster Metadata Manager (cmm)
42 * Author: Mike Pershin <tappro@clusterfs.com>
49 # define EXPORT_SYMTAB
51 #define DEBUG_SUBSYSTEM S_MDS
53 #include <linux/module.h>
56 #include <obd_class.h>
57 #include <lprocfs_status.h>
58 #include <lustre_ver.h>
59 #include "cmm_internal.h"
60 #include "mdc_internal.h"
61 #ifdef HAVE_QUOTA_SUPPORT
62 # include <lustre_quota.h>
65 struct obd_ops cmm_obd_device_ops = {
66 .o_owner = THIS_MODULE
69 static const struct lu_device_operations cmm_lu_ops;
71 static inline int lu_device_is_cmm(struct lu_device *d)
73 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops);
76 int cmm_root_get(const struct lu_env *env, struct md_device *md,
79 struct cmm_device *cmm_dev = md2cmm_dev(md);
80 /* valid only on master MDS */
81 if (cmm_dev->cmm_local_num == 0)
82 return cmm_child_ops(cmm_dev)->mdo_root_get(env,
83 cmm_dev->cmm_child, fid);
88 static int cmm_statfs(const struct lu_env *env, struct md_device *md,
91 struct cmm_device *cmm_dev = md2cmm_dev(md);
95 rc = cmm_child_ops(cmm_dev)->mdo_statfs(env,
96 cmm_dev->cmm_child, sfs);
100 static int cmm_maxsize_get(const struct lu_env *env, struct md_device *md,
101 int *md_size, int *cookie_size)
103 struct cmm_device *cmm_dev = md2cmm_dev(md);
106 rc = cmm_child_ops(cmm_dev)->mdo_maxsize_get(env, cmm_dev->cmm_child,
107 md_size, cookie_size);
111 static int cmm_init_capa_ctxt(const struct lu_env *env, struct md_device *md,
112 int mode , unsigned long timeout, __u32 alg,
113 struct lustre_capa_key *keys)
115 struct cmm_device *cmm_dev = md2cmm_dev(md);
118 LASSERT(cmm_child_ops(cmm_dev)->mdo_init_capa_ctxt);
119 rc = cmm_child_ops(cmm_dev)->mdo_init_capa_ctxt(env, cmm_dev->cmm_child,
125 static int cmm_update_capa_key(const struct lu_env *env,
126 struct md_device *md,
127 struct lustre_capa_key *key)
129 struct cmm_device *cmm_dev = md2cmm_dev(md);
132 rc = cmm_child_ops(cmm_dev)->mdo_update_capa_key(env,
138 static int cmm_llog_ctxt_get(const struct lu_env *env, struct md_device *m,
141 struct cmm_device *cmm_dev = md2cmm_dev(m);
145 rc = cmm_child_ops(cmm_dev)->mdo_llog_ctxt_get(env, cmm_dev->cmm_child,
150 #ifdef HAVE_QUOTA_SUPPORT
152 * \name Quota functions
155 static int cmm_quota_notify(const struct lu_env *env, struct md_device *m)
157 struct cmm_device *cmm_dev = md2cmm_dev(m);
161 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_notify(env,
166 static int cmm_quota_setup(const struct lu_env *env, struct md_device *m,
169 struct cmm_device *cmm_dev = md2cmm_dev(m);
173 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_setup(env,
179 static int cmm_quota_cleanup(const struct lu_env *env, struct md_device *m)
181 struct cmm_device *cmm_dev = md2cmm_dev(m);
185 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_cleanup(env,
190 static int cmm_quota_recovery(const struct lu_env *env, struct md_device *m)
192 struct cmm_device *cmm_dev = md2cmm_dev(m);
196 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_recovery(env,
201 static int cmm_quota_check(const struct lu_env *env, struct md_device *m,
204 struct cmm_device *cmm_dev = md2cmm_dev(m);
208 /* disable quota for CMD case temporary. */
209 if (cmm_dev->cmm_tgt_count)
212 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_check(env,
218 static int cmm_quota_on(const struct lu_env *env, struct md_device *m,
221 struct cmm_device *cmm_dev = md2cmm_dev(m);
225 /* disable quota for CMD case temporary. */
226 if (cmm_dev->cmm_tgt_count)
229 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_on(env,
235 static int cmm_quota_off(const struct lu_env *env, struct md_device *m,
238 struct cmm_device *cmm_dev = md2cmm_dev(m);
242 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_off(env,
248 static int cmm_quota_setinfo(const struct lu_env *env, struct md_device *m,
249 __u32 type, __u32 id, struct obd_dqinfo *dqinfo)
251 struct cmm_device *cmm_dev = md2cmm_dev(m);
255 /* disable quota for CMD case temporary. */
256 if (cmm_dev->cmm_tgt_count)
259 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_setinfo(env,
265 static int cmm_quota_getinfo(const struct lu_env *env,
266 const struct md_device *m,
267 __u32 type, __u32 id, struct obd_dqinfo *dqinfo)
269 struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m);
273 /* disable quota for CMD case temporary. */
274 if (cmm_dev->cmm_tgt_count)
277 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getinfo(env,
283 static int cmm_quota_setquota(const struct lu_env *env, struct md_device *m,
284 __u32 type, __u32 id, struct obd_dqblk *dqblk)
286 struct cmm_device *cmm_dev = md2cmm_dev(m);
290 /* disable quota for CMD case temporary. */
291 if (cmm_dev->cmm_tgt_count)
294 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_setquota(env,
300 static int cmm_quota_getquota(const struct lu_env *env,
301 const struct md_device *m,
302 __u32 type, __u32 id, struct obd_dqblk *dqblk)
304 struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m);
308 /* disable quota for CMD case temporary. */
309 if (cmm_dev->cmm_tgt_count)
312 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getquota(env,
318 static int cmm_quota_getoinfo(const struct lu_env *env,
319 const struct md_device *m,
320 __u32 type, __u32 id, struct obd_dqinfo *dqinfo)
322 struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m);
326 /* disable quota for CMD case temporary. */
327 if (cmm_dev->cmm_tgt_count)
330 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getoinfo(env,
336 static int cmm_quota_getoquota(const struct lu_env *env,
337 const struct md_device *m,
338 __u32 type, __u32 id, struct obd_dqblk *dqblk)
340 struct cmm_device *cmm_dev = md2cmm_dev((struct md_device *)m);
344 /* disable quota for CMD case temporary. */
345 if (cmm_dev->cmm_tgt_count)
348 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_getoquota(env,
354 static int cmm_quota_invalidate(const struct lu_env *env, struct md_device *m,
357 struct cmm_device *cmm_dev = md2cmm_dev(m);
361 /* disable quota for CMD case temporary. */
362 if (cmm_dev->cmm_tgt_count)
365 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_invalidate(env,
371 static int cmm_quota_finvalidate(const struct lu_env *env, struct md_device *m,
374 struct cmm_device *cmm_dev = md2cmm_dev(m);
378 /* disable quota for CMD case temporary. */
379 if (cmm_dev->cmm_tgt_count)
382 rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_finvalidate(env,
390 int cmm_iocontrol(const struct lu_env *env, struct md_device *m,
391 unsigned int cmd, int len, void *data)
393 struct md_device *next = md2cmm_dev(m)->cmm_child;
397 rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data);
402 static const struct md_device_operations cmm_md_ops = {
403 .mdo_statfs = cmm_statfs,
404 .mdo_root_get = cmm_root_get,
405 .mdo_maxsize_get = cmm_maxsize_get,
406 .mdo_init_capa_ctxt = cmm_init_capa_ctxt,
407 .mdo_update_capa_key = cmm_update_capa_key,
408 .mdo_llog_ctxt_get = cmm_llog_ctxt_get,
409 .mdo_iocontrol = cmm_iocontrol,
410 #ifdef HAVE_QUOTA_SUPPORT
412 .mqo_notify = cmm_quota_notify,
413 .mqo_setup = cmm_quota_setup,
414 .mqo_cleanup = cmm_quota_cleanup,
415 .mqo_recovery = cmm_quota_recovery,
416 .mqo_check = cmm_quota_check,
417 .mqo_on = cmm_quota_on,
418 .mqo_off = cmm_quota_off,
419 .mqo_setinfo = cmm_quota_setinfo,
420 .mqo_getinfo = cmm_quota_getinfo,
421 .mqo_setquota = cmm_quota_setquota,
422 .mqo_getquota = cmm_quota_getquota,
423 .mqo_getoinfo = cmm_quota_getoinfo,
424 .mqo_getoquota = cmm_quota_getoquota,
425 .mqo_invalidate = cmm_quota_invalidate,
426 .mqo_finvalidate = cmm_quota_finvalidate
431 extern struct lu_device_type mdc_device_type;
435 static int cmm_post_init_mdc(const struct lu_env *env,
436 struct cmm_device *cmm)
438 int max_mdsize, max_cookiesize, rc;
439 struct mdc_device *mc, *tmp;
441 /* get the max mdsize and cookiesize from lower layer */
442 rc = cmm_maxsize_get(env, &cmm->cmm_md_dev, &max_mdsize,
447 cfs_spin_lock(&cmm->cmm_tgt_guard);
448 cfs_list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
450 cmm_mdc_init_ea_size(env, mc, max_mdsize, max_cookiesize);
452 cfs_spin_unlock(&cmm->cmm_tgt_guard);
456 /* --- cmm_lu_operations --- */
457 /* add new MDC to the CMM, create MDC lu_device and connect it to mdc_obd */
458 static int cmm_add_mdc(const struct lu_env *env,
459 struct cmm_device *cm, struct lustre_cfg *cfg)
461 struct lu_device_type *ldt = &mdc_device_type;
462 char *p, *num = lustre_cfg_string(cfg, 2);
463 struct mdc_device *mc, *tmp;
464 struct lu_fld_target target;
465 struct lu_device *ld;
466 struct lu_device *cmm_lu = cmm2lu_dev(cm);
468 struct lu_site *site = cmm2lu_dev(cm)->ld_site;
470 #ifdef HAVE_QUOTA_SUPPORT
475 /* find out that there is no such mdc */
477 mdc_num = simple_strtol(num, &p, 10);
479 CERROR("Invalid index in lustre_cgf, offset 2\n");
483 cfs_spin_lock(&cm->cmm_tgt_guard);
484 cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
486 if (mc->mc_num == mdc_num) {
487 cfs_spin_unlock(&cm->cmm_tgt_guard);
491 cfs_spin_unlock(&cm->cmm_tgt_guard);
492 ld = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
498 rc = ldt->ldt_ops->ldto_device_init(env, ld, NULL, NULL);
500 ldt->ldt_ops->ldto_device_free(env, ld);
503 /* pass config to the just created MDC */
504 rc = ld->ld_ops->ldo_process_config(env, ld, cfg);
506 ldt->ldt_ops->ldto_device_fini(env, ld);
507 ldt->ldt_ops->ldto_device_free(env, ld);
511 cfs_spin_lock(&cm->cmm_tgt_guard);
512 cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
514 if (mc->mc_num == mdc_num) {
515 cfs_spin_unlock(&cm->cmm_tgt_guard);
516 ldt->ldt_ops->ldto_device_fini(env, ld);
517 ldt->ldt_ops->ldto_device_free(env, ld);
522 cfs_list_add_tail(&mc->mc_linkage, &cm->cmm_targets);
524 #ifdef HAVE_QUOTA_SUPPORT
525 first = cm->cmm_tgt_count;
527 cfs_spin_unlock(&cm->cmm_tgt_guard);
529 lu_device_get(cmm_lu);
530 lu_ref_add(&cmm_lu->ld_reference, "mdc-child", ld);
532 target.ft_srv = NULL;
533 target.ft_idx = mc->mc_num;
534 target.ft_exp = mc->mc_desc.cl_exp;
535 fld_client_add_target(cm->cmm_fld, &target);
537 if (mc->mc_num == 0) {
538 /* this is mdt0 -> mc export, fld lookup need this export
539 to forward fld lookup request. */
540 LASSERT(!lu_site2md(site)->ms_server_fld->lsf_control_exp);
541 lu_site2md(site)->ms_server_fld->lsf_control_exp =
544 #ifdef HAVE_QUOTA_SUPPORT
545 /* XXX: Disable quota for CMD case temporary. */
547 CWARN("Disable quota for CMD case temporary!\n");
548 cmm_child_ops(cm)->mdo_quota.mqo_off(env, cm->cmm_child, UGQUOTA);
551 /* Set max md size for the mdc. */
552 rc = cmm_post_init_mdc(env, cm);
556 static void cmm_device_shutdown(const struct lu_env *env,
557 struct cmm_device *cm,
558 struct lustre_cfg *cfg)
560 struct mdc_device *mc, *tmp;
563 /* Remove local target from FLD. */
564 fld_client_del_target(cm->cmm_fld, cm->cmm_local_num);
566 /* Finish all mdc devices. */
567 cfs_spin_lock(&cm->cmm_tgt_guard);
568 cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
569 struct lu_device *ld_m = mdc2lu_dev(mc);
570 fld_client_del_target(cm->cmm_fld, mc->mc_num);
571 ld_m->ld_ops->ldo_process_config(env, ld_m, cfg);
573 cfs_spin_unlock(&cm->cmm_tgt_guard);
575 /* remove upcall device*/
576 md_upcall_fini(&cm->cmm_md_dev);
581 static int cmm_device_mount(const struct lu_env *env,
582 struct cmm_device *m, struct lustre_cfg *cfg)
584 const char *index = lustre_cfg_string(cfg, 2);
587 LASSERT(index != NULL);
589 m->cmm_local_num = simple_strtol(index, &p, 10);
591 CERROR("Invalid index in lustre_cgf\n");
598 static int cmm_process_config(const struct lu_env *env,
599 struct lu_device *d, struct lustre_cfg *cfg)
601 struct cmm_device *m = lu2cmm_dev(d);
602 struct lu_device *next = md2lu_dev(m->cmm_child);
606 switch(cfg->lcfg_command) {
608 /* On first ADD_MDC add also local target. */
609 if (!(m->cmm_flags & CMM_INITIALIZED)) {
610 struct lu_site *ls = cmm2lu_dev(m)->ld_site;
611 struct lu_fld_target target;
613 target.ft_srv = lu_site2md(ls)->ms_server_fld;
614 target.ft_idx = m->cmm_local_num;
615 target.ft_exp = NULL;
617 fld_client_add_target(m->cmm_fld, &target);
619 err = cmm_add_mdc(env, m, cfg);
621 /* The first ADD_MDC can be counted as setup is finished. */
622 if (!(m->cmm_flags & CMM_INITIALIZED))
623 m->cmm_flags |= CMM_INITIALIZED;
628 /* lower layers should be set up at first */
629 err = next->ld_ops->ldo_process_config(env, next, cfg);
631 err = cmm_device_mount(env, m, cfg);
636 cmm_device_shutdown(env, m, cfg);
639 err = next->ld_ops->ldo_process_config(env, next, cfg);
644 static int cmm_recovery_complete(const struct lu_env *env,
647 struct cmm_device *m = lu2cmm_dev(d);
648 struct lu_device *next = md2lu_dev(m->cmm_child);
651 rc = next->ld_ops->ldo_recovery_complete(env, next);
655 static int cmm_prepare(const struct lu_env *env,
656 struct lu_device *pdev,
657 struct lu_device *dev)
659 struct cmm_device *cmm = lu2cmm_dev(dev);
660 struct lu_device *next = md2lu_dev(cmm->cmm_child);
664 rc = next->ld_ops->ldo_prepare(env, dev, next);
668 static const struct lu_device_operations cmm_lu_ops = {
669 .ldo_object_alloc = cmm_object_alloc,
670 .ldo_process_config = cmm_process_config,
671 .ldo_recovery_complete = cmm_recovery_complete,
672 .ldo_prepare = cmm_prepare,
675 /* --- lu_device_type operations --- */
676 int cmm_upcall(const struct lu_env *env, struct md_device *md,
677 enum md_upcall_event ev, void *data)
684 rc = cmm_post_init_mdc(env, md2cmm_dev(md));
686 CERROR("can not init md size %d\n", rc);
689 rc = md_do_upcall(env, md, ev, data);
694 static struct lu_device *cmm_device_free(const struct lu_env *env,
697 struct cmm_device *m = lu2cmm_dev(d);
698 struct lu_device *next = md2lu_dev(m->cmm_child);
701 LASSERT(m->cmm_tgt_count == 0);
702 LASSERT(cfs_list_empty(&m->cmm_targets));
703 if (m->cmm_fld != NULL) {
704 OBD_FREE_PTR(m->cmm_fld);
707 md_device_fini(&m->cmm_md_dev);
712 static struct lu_device *cmm_device_alloc(const struct lu_env *env,
713 struct lu_device_type *t,
714 struct lustre_cfg *cfg)
717 struct cmm_device *m;
722 l = ERR_PTR(-ENOMEM);
724 md_device_init(&m->cmm_md_dev, t);
725 m->cmm_md_dev.md_ops = &cmm_md_ops;
726 md_upcall_init(&m->cmm_md_dev, cmm_upcall);
728 l->ld_ops = &cmm_lu_ops;
730 OBD_ALLOC_PTR(m->cmm_fld);
732 cmm_device_free(env, l);
733 l = ERR_PTR(-ENOMEM);
739 /* context key constructor/destructor: cmm_key_init, cmm_key_fini */
740 LU_KEY_INIT_FINI(cmm, struct cmm_thread_info);
742 /* context key: cmm_thread_key */
743 LU_CONTEXT_KEY_DEFINE(cmm, LCT_MD_THREAD);
745 struct cmm_thread_info *cmm_env_info(const struct lu_env *env)
747 struct cmm_thread_info *info;
749 info = lu_context_key_get(&env->le_ctx, &cmm_thread_key);
750 LASSERT(info != NULL);
754 /* type constructor/destructor: cmm_type_init/cmm_type_fini */
755 LU_TYPE_INIT_FINI(cmm, &cmm_thread_key);
758 * Kludge code : it should be moved mdc_device.c if mdc_(mds)_device
761 static int __cmm_type_init(struct lu_device_type *t)
764 rc = lu_device_type_init(&mdc_device_type);
766 rc = cmm_type_init(t);
768 lu_device_type_fini(&mdc_device_type);
773 static void __cmm_type_fini(struct lu_device_type *t)
775 lu_device_type_fini(&mdc_device_type);
779 static void __cmm_type_start(struct lu_device_type *t)
781 mdc_device_type.ldt_ops->ldto_start(&mdc_device_type);
785 static void __cmm_type_stop(struct lu_device_type *t)
787 mdc_device_type.ldt_ops->ldto_stop(&mdc_device_type);
791 static int cmm_device_init(const struct lu_env *env, struct lu_device *d,
792 const char *name, struct lu_device *next)
794 struct cmm_device *m = lu2cmm_dev(d);
799 cfs_spin_lock_init(&m->cmm_tgt_guard);
800 CFS_INIT_LIST_HEAD(&m->cmm_targets);
801 m->cmm_tgt_count = 0;
802 m->cmm_child = lu2md_dev(next);
804 err = fld_client_init(m->cmm_fld, name,
805 LUSTRE_CLI_FLD_HASH_DHT);
807 CERROR("Can't init FLD, err %d\n", err);
811 /* Assign site's fld client ref, needed for asserts in osd. */
812 ls = cmm2lu_dev(m)->ld_site;
813 lu_site2md(ls)->ms_client_fld = m->cmm_fld;
814 err = cmm_procfs_init(m, name);
819 static struct lu_device *cmm_device_fini(const struct lu_env *env,
820 struct lu_device *ld)
822 struct cmm_device *cm = lu2cmm_dev(ld);
823 struct mdc_device *mc, *tmp;
827 /* Finish all mdc devices */
828 cfs_spin_lock(&cm->cmm_tgt_guard);
829 cfs_list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
830 struct lu_device *ld_m = mdc2lu_dev(mc);
831 struct lu_device *ld_c = cmm2lu_dev(cm);
833 cfs_list_del_init(&mc->mc_linkage);
834 lu_ref_del(&ld_c->ld_reference, "mdc-child", ld_m);
836 ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m);
837 ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m);
840 cfs_spin_unlock(&cm->cmm_tgt_guard);
842 fld_client_proc_fini(cm->cmm_fld);
843 fld_client_fini(cm->cmm_fld);
844 ls = cmm2lu_dev(cm)->ld_site;
845 lu_site2md(ls)->ms_client_fld = NULL;
848 RETURN (md2lu_dev(cm->cmm_child));
851 static struct lu_device_type_operations cmm_device_type_ops = {
852 .ldto_init = __cmm_type_init,
853 .ldto_fini = __cmm_type_fini,
855 .ldto_start = __cmm_type_start,
856 .ldto_stop = __cmm_type_stop,
858 .ldto_device_alloc = cmm_device_alloc,
859 .ldto_device_free = cmm_device_free,
861 .ldto_device_init = cmm_device_init,
862 .ldto_device_fini = cmm_device_fini
865 static struct lu_device_type cmm_device_type = {
866 .ldt_tags = LU_DEVICE_MD,
867 .ldt_name = LUSTRE_CMM_NAME,
868 .ldt_ops = &cmm_device_type_ops,
869 .ldt_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD
872 struct lprocfs_vars lprocfs_cmm_obd_vars[] = {
876 struct lprocfs_vars lprocfs_cmm_module_vars[] = {
880 static void lprocfs_cmm_init_vars(struct lprocfs_static_vars *lvars)
882 lvars->module_vars = lprocfs_cmm_module_vars;
883 lvars->obd_vars = lprocfs_cmm_obd_vars;
887 static int __init cmm_mod_init(void)
889 struct lprocfs_static_vars lvars;
891 lprocfs_cmm_init_vars(&lvars);
892 return class_register_type(&cmm_obd_device_ops, NULL, lvars.module_vars,
893 LUSTRE_CMM_NAME, &cmm_device_type);
896 static void __exit cmm_mod_exit(void)
898 class_unregister_type(LUSTRE_CMM_NAME);
901 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
902 MODULE_DESCRIPTION("Lustre Clustered Metadata Manager ("LUSTRE_CMM_NAME")");
903 MODULE_LICENSE("GPL");
905 cfs_module(cmm, "0.1.0", cmm_mod_init, cmm_mod_exit);