+ lovconfig_uuid = mds.get_first_ref('lovconfig')
+ if lovconfig_uuid:
+ lovconfig = mds.lookup(lovconfig_uuid)
+ obd_uuid = lovconfig.get_first_ref('lov')
+ else:
+ obd_uuid = fs.get_first_ref('obd')
+
+ client_uuid = generate_client_uuid(self.name)
+ client = VOSC(self.db.lookup(obd_uuid), client_uuid, self.name,
+ self.name)
+ config.record = 1
+ lctl.clear_log(self.name, self.name)
+ lctl.record(self.name, self.name)
+ client.prepare()
+ lctl.mount_option(self.name, client.get_name(), "")
+ lctl.end_record()
+ process_updates(self.db, self.name, self.name, client)
+
+ config.cleanup = 1
+ lctl.clear_log(self.name, self.name + '-clean')
+ lctl.record(self.name, self.name + '-clean')
+ client.cleanup()
+ lctl.del_mount_option(self.name)
+ lctl.end_record()
+ process_updates(self.db, self.name, self.name + '-clean', client)
+ config.cleanup = 0
+ config.record = 0
+
+ # record logs for each client
+ if config.noexec:
+ noexec_opt = '-n'
+ else:
+ noexec_opt = ''
+ if config.ldapurl:
+ config_options = "--ldapurl " + config.ldapurl + " --config " + config.config
+ else:
+ config_options = CONFIG_FILE
+
+ for node_db in self.db.lookup_class('node'):
+ client_name = node_db.getName()
+ for prof_uuid in node_db.get_refs('profile'):
+ prof_db = node_db.lookup(prof_uuid)
+ # refactor this into a funtion to test "clientness"
+ # of a node.
+ for ref_class, ref_uuid in prof_db.get_all_refs():
+ if ref_class in ('mountpoint','echoclient'):
+ debug("recording", client_name)
+ old_noexec = config.noexec
+ config.noexec = 0
+ ret, out = run (sys.argv[0], noexec_opt,
+ " -v --record --nomod",
+ "--record_log", client_name,
+ "--record_device", self.name,
+ "--node", client_name,
+ config_options)
+ if config.verbose:
+ for s in out: log("record> ", string.strip(s))
+ ret, out = run (sys.argv[0], noexec_opt,
+ "--cleanup -v --record --nomod",
+ "--record_log", client_name + "-clean",
+ "--record_device", self.name,
+ "--node", client_name,
+ config_options)
+ if config.verbose:
+ for s in out: log("record> ", string.strip(s))
+ config.noexec = old_noexec
+ if do_cleanup:
+ try:
+ lctl.cleanup(self.name, self.uuid, 0, 0)
+ except CommandError, e:
+ log(self.module_name, "cleanup failed: ", self.name)
+ e.dump()
+ cleanup_error(e.rc)
+ Module.cleanup(self)
+
+ if self.fstype == 'smfs':
+ clean_loop(self.backdevpath)
+ else:
+ clean_loop(self.devpath)
+
+ def msd_remaining(self):
+ out = lctl.device_list()
+ for s in out:
+ if string.split(s)[2] in ('mds',):
+ return 1
+
+ def safe_to_clean(self):
+ return self.active
+
+ def safe_to_clean_modules(self):
+ return not self.msd_remaining()
+
+ def cleanup(self):
+ if not self.active:
+ debug(self.uuid, "not active")
+ return
+ self.info()
+ if is_prepared(self.name):
+ try:
+ lctl.cleanup(self.name, self.uuid, config.force,
+ config.failover)
+ except CommandError, e:
+ log(self.module_name, "cleanup failed: ", self.name)
+ e.dump()
+ cleanup_error(e.rc)
+ Module.cleanup(self)
+ # cleanup LMV
+ if self.master_mds:
+ self.master.cleanup()
+ if not self.msd_remaining() and is_prepared('MDT'):
+ try:
+ lctl.cleanup("MDT", "MDT_UUID", config.force,
+ config.failover)
+ except CommandError, e:
+ print "cleanup failed: ", self.name
+ e.dump()
+ cleanup_error(e.rc)
+
+ if self.fstype == 'smfs':
+ clean_loop(self.backdevpath)
+ else:
+ clean_loop(self.devpath)
+
+ def correct_level(self, level, op=None):
+ #if self.master_mds:
+ # level = level + 2
+ return level
+
+class OSD(Module):
+ def __init__(self, db):
+ Module.__init__(self, 'OSD', db)
+ self.osdtype = self.db.get_val('osdtype')
+ self.devpath = self.db.get_val('devpath', '')
+ self.backdevpath = self.db.get_val('backdevpath', '')
+ self.size = self.db.get_val_int('devsize', 0)
+ self.journal_size = self.db.get_val_int('journalsize', 0)
+ self.inode_size = self.db.get_val_int('inodesize', 0)
+ self.mkfsoptions = self.db.get_val('mkfsoptions', '')
+ self.mountfsoptions = self.db.get_val('mountfsoptions', '')
+ self.fstype = self.db.get_val('fstype', '')
+ self.backfstype = self.db.get_val('backfstype', '')
+ self.nspath = self.db.get_val('nspath', '')
+ target_uuid = self.db.get_first_ref('target')
+ ost = self.db.lookup(target_uuid)
+ self.name = ost.getName()
+ self.format = self.db.get_val('autoformat', 'yes')
+ if ost.get_val('failover', 0):
+ self.failover_ost = 'f'
+ else:
+ self.failover_ost = 'n'
+
+ active_uuid = get_active_target(ost)
+ if not active_uuid:
+ panic("No target device found:", target_uuid)
+ if active_uuid == self.uuid:
+ self.active = 1
+ else:
+ self.active = 0
+ if self.active and config.group and config.group != ost.get_val('group'):
+ self.active = 0
+
+ self.target_dev_uuid = self.uuid
+ self.uuid = target_uuid
+ # modules
+ self.add_lustre_module('ost', 'ost')
+ if self.fstype == 'smfs':
+ self.add_lustre_module('smfs', 'smfs')
+ # FIXME: should we default to ext3 here?
+ if self.fstype == 'ldiskfs':
+ self.add_lustre_module('ldiskfs', 'ldiskfs')
+ if self.fstype:
+ self.add_lustre_module('lvfs' , 'fsfilt_%s' % (self.fstype))
+ if self.fstype == 'smfs':
+ self.add_lustre_module('lvfs' , 'fsfilt_%s' % (self.backfstype))
+
+ for options in self.mountfsoptions:
+ if options == 'snap':
+ if not self.fstype == 'smfs':
+ panic("mountoptions with snap, but fstype is not smfs\n")
+ self.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.fstype))
+ self.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.backfstype))
+
+ self.add_lustre_module(self.osdtype, self.osdtype)
+
+ def load_module(self):
+ if self.active:
+ Module.load_module(self)
+
+ # need to check /proc/mounts and /etc/mtab before
+ # formatting anything.
+ # FIXME: check if device is already formatted.