1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/cmm/cmm_device.c
5 * Lustre Cluster Metadata Manager (cmm)
7 * Copyright (c) 2006 Cluster File Systems, Inc.
8 * Author: Mike Pershin <tappro@clusterfs.com>
10 * This file is part of the Lustre file system, http://www.lustre.org
11 * Lustre is a trademark of Cluster File Systems, Inc.
13 * You may have signed or agreed to another license before downloading
14 * this software. If so, you are bound by the terms and conditions
15 * of that agreement, and the following does not apply to you. See the
16 * LICENSE file included with this distribution for more information.
18 * If you did not agree to a different license, then this copy of Lustre
19 * is open source software; you can redistribute it and/or modify it
20 * under the terms of version 2 of the GNU General Public License as
21 * published by the Free Software Foundation.
23 * In either case, Lustre is distributed in the hope that it will be
24 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
25 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * license text for more details.
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_MDS
34 #include <linux/module.h>
37 #include <obd_class.h>
38 #include <lprocfs_status.h>
39 #include <lustre_ver.h>
40 #include "cmm_internal.h"
41 #include "mdc_internal.h"
43 static struct obd_ops cmm_obd_device_ops = {
44 .o_owner = THIS_MODULE
47 static struct lu_device_operations cmm_lu_ops;
49 static inline int lu_device_is_cmm(struct lu_device *d)
51 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &cmm_lu_ops);
54 int cmm_root_get(const struct lu_env *env, struct md_device *md,
57 struct cmm_device *cmm_dev = md2cmm_dev(md);
58 /* valid only on master MDS */
59 if (cmm_dev->cmm_local_num == 0)
60 return cmm_child_ops(cmm_dev)->mdo_root_get(env,
61 cmm_dev->cmm_child, fid);
66 static int cmm_statfs(const struct lu_env *env, struct md_device *md,
69 struct cmm_device *cmm_dev = md2cmm_dev(md);
73 rc = cmm_child_ops(cmm_dev)->mdo_statfs(env,
74 cmm_dev->cmm_child, sfs);
78 static int cmm_maxsize_get(const struct lu_env *env, struct md_device *md,
79 int *md_size, int *cookie_size)
81 struct cmm_device *cmm_dev = md2cmm_dev(md);
84 rc = cmm_child_ops(cmm_dev)->mdo_maxsize_get(env, cmm_dev->cmm_child,
85 md_size, cookie_size);
89 static int cmm_init_capa_ctxt(const struct lu_env *env, struct md_device *md,
90 int mode , unsigned long timeout, __u32 alg,
91 struct lustre_capa_key *keys)
93 struct cmm_device *cmm_dev = md2cmm_dev(md);
96 LASSERT(cmm_child_ops(cmm_dev)->mdo_init_capa_ctxt);
97 rc = cmm_child_ops(cmm_dev)->mdo_init_capa_ctxt(env, cmm_dev->cmm_child,
103 static int cmm_update_capa_key(const struct lu_env *env,
104 struct md_device *md,
105 struct lustre_capa_key *key)
107 struct cmm_device *cmm_dev = md2cmm_dev(md);
110 rc = cmm_child_ops(cmm_dev)->mdo_update_capa_key(env,
116 static struct md_device_operations cmm_md_ops = {
117 .mdo_statfs = cmm_statfs,
118 .mdo_root_get = cmm_root_get,
119 .mdo_maxsize_get = cmm_maxsize_get,
120 .mdo_init_capa_ctxt = cmm_init_capa_ctxt,
121 .mdo_update_capa_key= cmm_update_capa_key,
124 extern struct lu_device_type mdc_device_type;
126 static int cmm_post_init_mdc(const struct lu_env *env,
127 struct cmm_device *cmm)
129 int max_mdsize, max_cookiesize, rc;
130 struct mdc_device *mc, *tmp;
132 /* get the max mdsize and cookiesize from lower layer */
133 rc = cmm_maxsize_get(env, &cmm->cmm_md_dev, &max_mdsize,
138 spin_lock(&cmm->cmm_tgt_guard);
139 list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
141 mdc_init_ea_size(env, mc, max_mdsize, max_cookiesize);
143 spin_unlock(&cmm->cmm_tgt_guard);
147 /* --- cmm_lu_operations --- */
148 /* add new MDC to the CMM, create MDC lu_device and connect it to mdc_obd */
149 static int cmm_add_mdc(const struct lu_env *env,
150 struct cmm_device *cm, struct lustre_cfg *cfg)
152 struct lu_device_type *ldt = &mdc_device_type;
153 char *p, *num = lustre_cfg_string(cfg, 2);
154 struct mdc_device *mc, *tmp;
155 struct lu_fld_target target;
156 struct lu_device *ld;
162 /* find out that there is no such mdc */
164 mdc_num = simple_strtol(num, &p, 10);
166 CERROR("Invalid index in lustre_cgf, offset 2\n");
170 spin_lock(&cm->cmm_tgt_guard);
171 list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
173 if (mc->mc_num == mdc_num) {
174 spin_unlock(&cm->cmm_tgt_guard);
178 spin_unlock(&cm->cmm_tgt_guard);
179 ld = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
180 ld->ld_site = cmm2lu_dev(cm)->ld_site;
182 rc = ldt->ldt_ops->ldto_device_init(env, ld, NULL);
184 ldt->ldt_ops->ldto_device_free(env, ld);
187 /* pass config to the just created MDC */
188 rc = ld->ld_ops->ldo_process_config(env, ld, cfg);
192 spin_lock(&cm->cmm_tgt_guard);
193 list_for_each_entry_safe(mc, tmp, &cm->cmm_targets,
195 if (mc->mc_num == mdc_num) {
196 spin_unlock(&cm->cmm_tgt_guard);
197 ldt->ldt_ops->ldto_device_fini(env, ld);
198 ldt->ldt_ops->ldto_device_free(env, ld);
203 list_add_tail(&mc->mc_linkage, &cm->cmm_targets);
205 spin_unlock(&cm->cmm_tgt_guard);
207 lu_device_get(cmm2lu_dev(cm));
209 ls = cm->cmm_md_dev.md_lu_dev.ld_site;
211 target.ft_srv = NULL;
212 target.ft_idx = mc->mc_num;
213 target.ft_exp = mc->mc_desc.cl_exp;
215 fld_client_add_target(ls->ls_client_fld, &target);
217 /* set max md size for the mdc */
218 rc = cmm_post_init_mdc(env, cm);
223 static void cmm_device_shutdown(const struct lu_env *env,
224 struct cmm_device *cm,
225 struct lustre_cfg *cfg)
227 struct mdc_device *mc, *tmp;
230 /* finish all mdc devices */
231 spin_lock(&cm->cmm_tgt_guard);
232 list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
233 struct lu_device *ld_m = mdc2lu_dev(mc);
234 ld_m->ld_ops->ldo_process_config(env, ld_m, cfg);
236 spin_unlock(&cm->cmm_tgt_guard);
240 static int cmm_device_mount(const struct lu_env *env,
241 struct cmm_device *m, struct lustre_cfg *cfg)
243 const char *index = lustre_cfg_string(cfg, 2);
246 LASSERT(index != NULL);
248 m->cmm_local_num = simple_strtol(index, &p, 10);
250 CERROR("Invalid index in lustre_cgf\n");
257 static int cmm_process_config(const struct lu_env *env,
258 struct lu_device *d, struct lustre_cfg *cfg)
260 struct cmm_device *m = lu2cmm_dev(d);
261 struct lu_device *next = md2lu_dev(m->cmm_child);
265 switch(cfg->lcfg_command) {
267 err = cmm_add_mdc(env, m, cfg);
268 /* the first ADD_MDC can be counted as setup is finished */
269 if ((m->cmm_flags & CMM_INITIALIZED) == 0)
270 m->cmm_flags |= CMM_INITIALIZED;
274 /* lower layers should be set up at first */
275 err = next->ld_ops->ldo_process_config(env, next, cfg);
277 err = cmm_device_mount(env, m, cfg);
282 cmm_device_shutdown(env, m, cfg);
285 err = next->ld_ops->ldo_process_config(env, next, cfg);
290 static int cmm_recovery_complete(const struct lu_env *env,
293 struct cmm_device *m = lu2cmm_dev(d);
294 struct lu_device *next = md2lu_dev(m->cmm_child);
297 rc = next->ld_ops->ldo_recovery_complete(env, next);
301 static struct lu_device_operations cmm_lu_ops = {
302 .ldo_object_alloc = cmm_object_alloc,
303 .ldo_process_config = cmm_process_config,
304 .ldo_recovery_complete = cmm_recovery_complete
307 /* --- lu_device_type operations --- */
308 int cmm_upcall(const struct lu_env *env, struct md_device *md,
309 enum md_upcall_event ev)
311 struct md_device *upcall_dev;
315 upcall_dev = md->md_upcall.mu_upcall_dev;
320 rc = cmm_post_init_mdc(env, md2cmm_dev(md));
322 CERROR("can not init md size %d\n", rc);
324 rc = upcall_dev->md_upcall.mu_upcall(env,
325 md->md_upcall.mu_upcall_dev, ev);
330 static struct lu_device *cmm_device_alloc(const struct lu_env *env,
331 struct lu_device_type *t,
332 struct lustre_cfg *cfg)
335 struct cmm_device *m;
341 l = ERR_PTR(-ENOMEM);
343 md_device_init(&m->cmm_md_dev, t);
344 m->cmm_md_dev.md_ops = &cmm_md_ops;
345 m->cmm_md_dev.md_upcall.mu_upcall = cmm_upcall;
347 l->ld_ops = &cmm_lu_ops;
353 static void cmm_device_free(const struct lu_env *env, struct lu_device *d)
355 struct cmm_device *m = lu2cmm_dev(d);
357 LASSERT(m->cmm_tgt_count == 0);
358 LASSERT(list_empty(&m->cmm_targets));
359 md_device_fini(&m->cmm_md_dev);
363 /* context key constructor/destructor */
364 static void *cmm_key_init(const struct lu_context *ctx,
365 struct lu_context_key *key)
367 struct cmm_thread_info *info;
369 CLASSERT(CFS_PAGE_SIZE >= sizeof *info);
372 info = ERR_PTR(-ENOMEM);
376 static void cmm_key_fini(const struct lu_context *ctx,
377 struct lu_context_key *key, void *data)
379 struct cmm_thread_info *info = data;
383 static struct lu_context_key cmm_thread_key = {
384 .lct_tags = LCT_MD_THREAD,
385 .lct_init = cmm_key_init,
386 .lct_fini = cmm_key_fini
389 struct cmm_thread_info *cmm_env_info(const struct lu_env *env)
391 struct cmm_thread_info *info;
393 info = lu_context_key_get(&env->le_ctx, &cmm_thread_key);
394 LASSERT(info != NULL);
398 static int cmm_type_init(struct lu_device_type *t)
400 return lu_context_key_register(&cmm_thread_key);
403 static void cmm_type_fini(struct lu_device_type *t)
405 lu_context_key_degister(&cmm_thread_key);
408 static int cmm_device_init(const struct lu_env *env,
409 struct lu_device *d, struct lu_device *next)
411 struct cmm_device *m = lu2cmm_dev(d);
415 spin_lock_init(&m->cmm_tgt_guard);
416 INIT_LIST_HEAD(&m->cmm_targets);
417 m->cmm_tgt_count = 0;
418 m->cmm_child = lu2md_dev(next);
423 static struct lu_device *cmm_device_fini(const struct lu_env *env,
424 struct lu_device *ld)
426 struct cmm_device *cm = lu2cmm_dev(ld);
427 struct mdc_device *mc, *tmp;
429 /* finish all mdc devices */
430 spin_lock(&cm->cmm_tgt_guard);
431 list_for_each_entry_safe(mc, tmp, &cm->cmm_targets, mc_linkage) {
432 struct lu_device *ld_m = mdc2lu_dev(mc);
434 list_del_init(&mc->mc_linkage);
435 lu_device_put(cmm2lu_dev(cm));
436 ld_m->ld_type->ldt_ops->ldto_device_fini(env, ld_m);
437 ld_m->ld_type->ldt_ops->ldto_device_free(env, ld_m);
440 spin_unlock(&cm->cmm_tgt_guard);
442 RETURN (md2lu_dev(cm->cmm_child));
445 static struct lu_device_type_operations cmm_device_type_ops = {
446 .ldto_init = cmm_type_init,
447 .ldto_fini = cmm_type_fini,
449 .ldto_device_alloc = cmm_device_alloc,
450 .ldto_device_free = cmm_device_free,
452 .ldto_device_init = cmm_device_init,
453 .ldto_device_fini = cmm_device_fini
456 static struct lu_device_type cmm_device_type = {
457 .ldt_tags = LU_DEVICE_MD,
458 .ldt_name = LUSTRE_CMM_NAME,
459 .ldt_ops = &cmm_device_type_ops,
460 .ldt_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD
463 struct lprocfs_vars lprocfs_cmm_obd_vars[] = {
467 struct lprocfs_vars lprocfs_cmm_module_vars[] = {
471 LPROCFS_INIT_VARS(cmm, lprocfs_cmm_module_vars, lprocfs_cmm_obd_vars);
473 static int __init cmm_mod_init(void)
475 struct lprocfs_static_vars lvars;
477 printk(KERN_INFO "Lustre: Clustered Metadata Manager; info@clusterfs.com\n");
479 lprocfs_init_vars(cmm, &lvars);
480 return class_register_type(&cmm_obd_device_ops, NULL, lvars.module_vars,
481 LUSTRE_CMM_NAME, &cmm_device_type);
484 static void __exit cmm_mod_exit(void)
486 class_unregister_type(LUSTRE_CMM_NAME);
489 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
490 MODULE_DESCRIPTION("Lustre Clustered Metadata Manager ("LUSTRE_CMM_NAME")");
491 MODULE_LICENSE("GPL");
493 cfs_module(cmm, "0.1.0", cmm_mod_init, cmm_mod_exit);