3 # Copyright (C) 2002 Cluster File Systems, Inc.
4 # Author: Robert Read <rread@clusterfs.com>
6 # This file is part of Lustre, http://www.lustre.org.
8 # Lustre is free software; you can redistribute it and/or
9 # modify it under the terms of version 2 of the GNU General Public
10 # License as published by the Free Software Foundation.
12 # Lustre is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with Lustre; if not, write to the Free Software
19 # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 # lconf - lustre configuration tool
23 # lconf is the main driver script for starting and stopping
24 # lustre filesystem services.
26 # Based in part on the XML obdctl modifications done by Brian Behlendorf
29 import string, os, stat, popen2, socket, time
31 import xml.dom.minidom
37 # Maximum number of devices to search for.
38 # (the /dev/loop* nodes need to be created beforehand)
39 MAX_LOOP_DEVICES = 256
43 print """usage: lconf config.xml
45 config.xml Lustre configuration in xml format.
46 --get <url> URL to fetch a config file
47 --node <nodename> Load config for <nodename>
48 -d | --cleanup Cleans up config. (Shutdown)
49 -v | --verbose Print system commands as they are run
50 -h | --help Print this help
51 --gdb Prints message after creating gdb module script
52 and sleeps for 5 seconds.
53 -n | --noexec Prints the commands and steps that will be run for a
54 config without executing them. This can used to check if a
55 config file is doing what it should be doing. (Implies -v)
56 --nomod Skip load/unload module step.
57 --nosetup Skip device setup/cleanup step.
60 --ldap server LDAP server with lustre config database
61 --makeldiff Translate xml source to LDIFF
62 --reformat Reformat all devices (will confirm)
63 This are perhaps not needed:
64 --lustre="src dir" Base directory of lustre sources. Used to search
66 --portals=src Portals source
70 # ============================================================
71 # Config parameters, encapsulated in a class
86 self._gdb_script = '/tmp/ogdb'
87 self._debug_path = '/tmp/lustre-log'
90 def verbose(self, flag = None):
91 if flag: self._verbose = flag
94 def noexec(self, flag = None):
95 if flag: self._noexec = flag
98 def reformat(self, flag = None):
99 if flag: self._reformat = flag
100 return self._reformat
102 def cleanup(self, flag = None):
103 if flag: self._cleanup = flag
106 def gdb(self, flag = None):
107 if flag: self._gdb = flag
110 def nomod(self, flag = None):
111 if flag: self._nomod = flag
114 def nosetup(self, flag = None):
115 if flag: self._nosetup = flag
118 def node(self, val = None):
119 if val: self._node = val
122 def url(self, val = None):
123 if val: self._url = val
126 def gdb_script(self):
127 if os.path.isdir('/r'):
128 return '/r' + self._gdb_script
130 return self._gdb_script
132 def debug_path(self):
133 if os.path.isdir('/r'):
134 return '/r' + self._debug_path
136 return self._debug_path
138 def src_dir(self, val = None):
139 if val: self._url = val
144 # ============================================================
145 # debugging and error funcs
147 def fixme(msg = "this feature"):
148 raise LconfError, msg + ' not implmemented yet.'
151 msg = string.join(map(str,args))
153 if not config.noexec():
154 raise LconfError(msg)
157 msg = string.join(map(str,args))
162 print string.strip(s)
166 msg = string.join(map(str,args))
169 # ============================================================
170 # locally defined exceptions
171 class CommandError (exceptions.Exception):
172 def __init__(self, cmd_name, cmd_err, rc=None):
173 self.cmd_name = cmd_name
174 self.cmd_err = cmd_err
179 if type(self.cmd_err) == types.StringType:
181 print "! %s (%d): %s" % (self.cmd_name, self.rc, self.cmd_err)
183 print "! %s: %s" % (self.cmd_name, self.cmd_err)
184 elif type(self.cmd_err) == types.ListType:
186 print "! %s (error %d):" % (self.cmd_name, self.rc)
188 print "! %s:" % (self.cmd_name)
189 for s in self.cmd_err:
190 print "> %s" %(string.strip(s))
194 class LconfError (exceptions.Exception):
195 def __init__(self, args):
199 # ============================================================
200 # handle lctl interface
203 Manage communication with lctl
206 def __init__(self, cmd):
208 Initialize close by finding the lctl binary.
210 self.lctl = find_prog(cmd)
213 debug('! lctl not found')
216 raise CommandError('lctl', "unable to find lctl binary.")
221 the cmds are written to stdin of lctl
222 lctl doesn't return errors when run in script mode, so
224 should modify command line to accept multiple commands, or
225 create complex command line options
227 debug("+", self.lctl, cmds)
228 if config.noexec(): return (0, [])
229 p = popen2.Popen3(self.lctl, 1)
230 p.tochild.write(cmds + "\n")
232 out = p.fromchild.readlines()
233 err = p.childerr.readlines()
236 raise CommandError(self.lctl, err, ret)
240 def network(self, net, nid):
241 """ initialized network and add "self" """
242 # Idea: "mynid" could be used for all network types to add "self," and then
243 # this special case would be gone and the "self" hack would be hidden.
249 quit""" % (net, nid, nid)
258 # create a new connection
259 def connect(self, net, nid, port, servuuid, send_buf, read_buf):
260 # XXX: buf size params not used yet
265 quit""" % (net, nid, port, servuuid, nid)
268 # create a new connection
269 def add_route(self, net, to, via):
274 # disconnect one connection
275 def disconnect(self, net, nid, port, servuuid):
280 quit""" % (net, nid, servuuid)
283 # disconnect all connections
284 def disconnectAll(self, net):
292 # create a new device with lctl
293 def newdev(self, attach, setup = ""):
298 quit""" % (attach, setup)
302 def cleanup(self, name, uuid):
311 def lovconfig(self, uuid, mdsuuid, stripe_cnt, stripe_sz, stripe_off, pattern, devlist):
315 lovconfig %s %d %d %d %s %s
316 quit""" % (mdsuuid, uuid, stripe_cnt, stripe_sz, stripe_off, pattern, devlist)
319 # ============================================================
320 # Various system-level functions
321 # (ideally moved to their own module)
323 # Run a command and return the output and status.
324 # stderr is sent to /dev/null, could use popen3 to
325 # save it if necessary
327 cmd = string.join(map(str,args))
329 if config.noexec(): return (0, [])
330 f = os.popen(cmd + ' 2>&1')
339 # Run a command in the background.
340 def run_daemon(*args):
341 cmd = string.join(map(str,args))
343 if config.noexec(): return 0
344 f = os.popen(cmd + ' 2>&1')
352 # Determine full path to use for an external command
353 # searches dirname(argv[0]) first, then PATH
355 syspath = string.split(os.environ['PATH'], ':')
356 cmdpath = os.path.dirname(sys.argv[0])
357 syspath.insert(0, cmdpath);
358 syspath.insert(0, os.path.join(cmdpath, '../../portals/linux/utils/'))
360 prog = os.path.join(d,cmd)
361 if os.access(prog, os.X_OK):
365 # Recursively look for file starting at base dir
366 def do_find_file(base, mod):
367 fullname = os.path.join(base, mod)
368 if os.access(fullname, os.R_OK):
370 for d in os.listdir(base):
371 dir = os.path.join(base,d)
372 if os.path.isdir(dir):
373 module = do_find_file(dir, mod)
377 def find_module(src_dir, modname):
378 mod = '%s.o' % (modname)
379 search = (src_dir + "/lustre", src_dir + "/portals/linux")
382 module = do_find_file(d, mod)
389 # is the path a block device?
396 return stat.S_ISBLK(s[stat.ST_MODE])
398 # build fs according to type
400 def mkfs(fstype, dev):
401 if(fstype in ('ext3', 'extN')):
402 mkfs = 'mkfs.ext2 -j -b 4096'
404 print 'unsupported fs type: ', fstype
405 if not is_block(dev):
409 (ret, out) = run (mkfs, force, dev)
411 panic("Unable to build fs:", dev)
412 # enable hash tree indexing on fs
414 htree = 'echo "feature FEATURE_C5" | debugfs -w'
415 (ret, out) = run (htree, dev)
417 panic("Unable to enable htree:", dev)
419 # some systems use /dev/loopN, some /dev/loop/N
423 if not os.access(loop + str(0), os.R_OK):
425 if not os.access(loop + str(0), os.R_OK):
426 panic ("can't access loop devices")
429 # find loop device assigned to thefile
432 for n in xrange(0, MAX_LOOP_DEVICES):
434 if os.access(dev, os.R_OK):
435 (stat, out) = run('losetup', dev)
436 if (out and stat == 0):
437 m = re.search(r'\((.*)\)', out[0])
438 if m and file == m.group(1):
444 # create file if necessary and assign the first free loop device
445 def init_loop(file, size, fstype):
446 dev = find_loop(file)
448 print 'WARNING file:', file, 'already mapped to', dev
450 if not os.access(file, os.R_OK | os.W_OK):
451 run("dd if=/dev/zero bs=1k count=0 seek=%d of=%s" %(size, file))
453 # find next free loop
454 for n in xrange(0, MAX_LOOP_DEVICES):
456 if os.access(dev, os.R_OK):
457 (stat, out) = run('losetup', dev)
459 run('losetup', dev, file)
462 print "out of loop devices"
464 print "out of loop devices"
467 # undo loop assignment
468 def clean_loop(file):
469 dev = find_loop(file)
471 ret, out = run('losetup -d', dev)
473 log('unable to clean loop device:', dev, 'for file:', file)
476 # initialize a block device if needed
477 def block_dev(dev, size, fstype, format):
478 if config.noexec(): return dev
479 if not is_block(dev):
480 dev = init_loop(dev, size, fstype)
481 if (format == 'yes'):
485 def get_local_address(net_type):
486 """Return the local address for the network type."""
488 if net_type == 'tcp':
490 host = socket.gethostname()
491 local = socket.gethostbyname(host)
492 elif net_type == 'elan':
493 # awk '/NodeId/ { print $2 }' '/proc/elan/device0/position'
495 fp = open('/proc/elan/device0/position', 'r')
496 lines = fp.readlines()
505 elif net_type == 'gm':
506 fixme("automatic local address for GM")
511 # ============================================================
512 # Classes to prepare and cleanup the various objects
515 """ Base class for the rest of the modules. The default cleanup method is
516 defined here, as well as some utilitiy funcs.
518 def __init__(self, tag_name, node):
520 self.tag_name = tag_name
521 self.name = node.getAttribute('name')
522 self.uuid = node.getAttribute('uuid')
523 self.kmodule_list = []
525 def info(self, *args):
526 msg = string.join(map(str,args))
527 print self.tag_name + ":", self.name, self.uuid, msg
530 """ default cleanup, used for most modules """
533 lctl.cleanup(self.name, self.uuid)
535 print "cleanup failed: ", self.name
537 def add_module(self, modname):
538 """Append a module to list of modules to load."""
539 self.kmodule_list.append(modname)
541 def mod_loaded(self, modname):
542 """Check if a module is already loaded. Look in /proc/modules for it."""
543 fp = open('/proc/modules')
544 lines = fp.readlines()
546 # please forgive my tired fingers for this one
547 ret = filter(lambda word, mod=modname: word == mod,
548 map(lambda line: string.split(line)[0], lines))
551 def load_module(self):
552 """Load all the modules in the list in the order they appear."""
553 for mod in self.kmodule_list:
554 # (rc, out) = run ('/sbin/lsmod | grep -s', mod)
555 if self.mod_loaded(mod) and not config.noexec():
557 log ('loading module:', mod)
559 module = find_module(config.src_dir(), mod)
561 panic('module not found:', mod)
562 (rc, out) = run('/sbin/insmod', module)
564 raise CommandError('insmod', out, rc)
566 (rc, out) = run('/sbin/modprobe', mod)
568 raise CommandError('modprobe', out, rc)
570 def cleanup_module(self):
571 """Unload the modules in the list in reverse order."""
572 rev = self.kmodule_list
575 log('unloading module:', mod)
578 run('/sbin/rmmod', mod)
581 class Network(Module):
582 def __init__(self,node):
583 Module.__init__(self, 'NETWORK', node)
584 self.net_type = node.getAttribute('type')
585 self.nid = getText(node, 'server', '*')
586 self.port = int(getText(node, 'port', 0))
587 self.send_buf = int(getText(node, 'send_buf', 0))
588 self.read_buf = int(getText(node, 'read_buf', 0))
590 self.nid = get_local_address(self.net_type)
592 panic("unable to set nid for", self.net_type)
594 self.add_module('portals')
595 if self.net_type == 'tcp':
596 self.add_module('ksocknal')
597 if self.net_type == 'elan':
598 self.add_module('kqswnal')
599 if self.net_type == 'gm':
600 self.add_module('kgmnal')
601 self.add_module('obdclass')
602 self.add_module('ptlrpc')
605 self.info(self.net_type, self.nid, self.port)
606 if self.net_type == 'tcp':
607 ret = run_daemon(TCP_ACCEPTOR, self.port)
609 raise CommandError(TCP_ACCEPTOR, 'failed', ret)
610 lctl.network(self.net_type, self.nid)
611 lctl.newdev(attach = "ptlrpc RPCDEV")
614 self.info(self.net_type, self.nid, self.port)
616 lctl.cleanup("RPCDEV", "")
617 lctl.disconnectAll(self.net_type)
619 print "cleanup failed: ", self.name
620 if self.net_type == 'tcp':
621 # yikes, this ugly! need to save pid in /var/something
622 run("killall acceptor")
625 def __init__(self,node):
626 Module.__init__(self, 'LDLM', node)
627 self.add_module('ldlm')
630 lctl.newdev(attach="ldlm %s %s" % (self.name, self.uuid),
634 def __init__(self,node):
635 Module.__init__(self, 'LOV', node)
636 devs = node.getElementsByTagName('devices')[0]
637 self.stripe_sz = int(devs.getAttribute('stripesize'))
638 self.stripe_off = int(devs.getAttribute('stripeoffset'))
639 self.pattern = int(devs.getAttribute('pattern'))
640 mdsref = node.getElementsByTagName('mds_ref')[0]
641 self.mdsuuid = mdsref.getAttribute('uuidref')
642 mds= lookup(node.parentNode, self.mdsuuid)
643 self.mdsname = getName(mds)
646 for child in devs.childNodes:
647 if child.nodeName == 'osc_ref':
648 devlist = devlist + child.getAttribute('uuidref') + " "
649 stripe_cnt = stripe_cnt + 1
650 self.devlist = devlist
651 self.stripe_cnt = stripe_cnt
652 self.add_module('osc')
653 self.add_module('lov')
656 self.info(self.mdsuuid, self.stripe_cnt, self.stripe_sz, self.stripe_off, self.pattern,
657 self.devlist, self.mdsname)
658 lctl.lovconfig(self.uuid, self.mdsname, self.stripe_cnt,
659 self.stripe_sz, self.stripe_off, self.pattern,
666 def __init__(self,node):
667 Module.__init__(self, 'MDS', node)
668 self.devname, self.size = getDevice(node)
669 self.fstype = getText(node, 'fstype')
670 self.format = getText(node, 'autoformat', "no")
671 if self.fstype == 'extN':
672 self.add_module('extN')
673 self.add_module('mds')
674 self.add_module('mds_%s' % (self.fstype))
677 self.info(self.devname, self.fstype, self.format)
678 blkdev = block_dev(self.devname, self.size, self.fstype, self.format)
679 lctl.newdev(attach="mds %s %s" % (self.name, self.uuid),
680 setup ="%s %s" %(blkdev, self.fstype))
683 clean_loop(self.devname)
686 def __init__(self,node):
687 Module.__init__(self, 'MDC', node)
688 ref = node.getElementsByTagName('mds_ref')[0]
689 self.mds_uuid = ref.getAttribute('uuidref')
690 self.add_module('mdc')
693 self.info(self.mds_uuid)
694 mds = lookup(self.dom_node.parentNode, self.mds_uuid)
696 panic(self.mdsuuid, "not found.")
697 net = get_ost_net(self.dom_node.parentNode, self.mds_uuid)
699 lctl.connect(srv.net_type, srv.nid, srv.port, srv.uuid, srv.send_buf, srv.read_buf)
700 lctl.newdev(attach="mdc %s %s" % (self.name, self.uuid),
701 setup ="%s %s" %(self.mds_uuid, srv.uuid))
704 self.info(self.mds_uuid)
705 net = get_ost_net(self.dom_node.parentNode, self.mds_uuid)
708 lctl.disconnect(srv.net_type, srv.nid, srv.port, srv.uuid)
709 lctl.cleanup(self.name, self.uuid)
711 print "cleanup failed: ", self.name
714 def __init__(self, node):
715 Module.__init__(self, 'OBD', node)
716 self.obdtype = node.getAttribute('type')
717 self.devname, self.size = getDevice(node)
718 self.fstype = getText(node, 'fstype')
719 self.format = getText(node, 'autoformat', 'yes')
720 if self.fstype == 'extN':
721 self.add_module('extN')
722 self.add_module(self.obdtype)
724 # need to check /proc/mounts and /etc/mtab before
725 # formatting anything.
726 # FIXME: check if device is already formatted.
728 self.info(self.obdtype, self.devname, self.size, self.fstype, self.format)
729 blkdev = block_dev(self.devname, self.size, self.fstype, self.format)
730 lctl.newdev(attach="%s %s %s" % (self.obdtype, self.name, self.uuid),
731 setup ="%s %s" %(blkdev, self.fstype))
734 clean_loop(self.devname)
737 def __init__(self,node):
738 Module.__init__(self, 'OST', node)
739 ref = node.getElementsByTagName('obd_ref')[0]
740 self.obd_uuid = ref.getAttribute('uuidref')
741 self.add_module('ost')
744 self.info(self.obd_uuid)
745 lctl.newdev(attach="ost %s %s" % (self.name, self.uuid),
746 setup ="%s" % (self.obd_uuid))
749 def __init__(self,node):
750 Module.__init__(self, 'OSC', node)
751 ref = node.getElementsByTagName('obd_ref')[0]
752 self.obd_uuid = ref.getAttribute('uuidref')
753 ref = node.getElementsByTagName('ost_ref')[0]
754 self.ost_uuid = ref.getAttribute('uuidref')
755 self.add_module('osc')
758 self.info(self.obd_uuid, self.ost_uuid)
759 net = get_ost_net(self.dom_node.parentNode, self.ost_uuid)
761 lctl.connect(srv.net_type, srv.nid, srv.port, srv.uuid, srv.send_buf, srv.read_buf)
762 lctl.newdev(attach="osc %s %s" % (self.name, self.uuid),
763 setup ="%s %s" %(self.obd_uuid, srv.uuid))
766 self.info(self.obd_uuid, self.ost_uuid)
767 net_uuid = get_ost_net(self.dom_node.parentNode, self.ost_uuid)
768 srv = Network(net_uuid)
770 lctl.disconnect(srv.net_type, srv.nid, srv.port, srv.uuid)
771 lctl.cleanup(self.name, self.uuid)
773 print "cleanup failed: ", self.name
775 class Mountpoint(Module):
776 def __init__(self,node):
777 Module.__init__(self, 'MTPT', node)
778 self.path = getText(node, 'path')
779 ref = node.getElementsByTagName('mdc_ref')[0]
780 self.mdc_uuid = ref.getAttribute('uuidref')
781 ref = node.getElementsByTagName('osc_ref')[0]
782 self.lov_uuid = ref.getAttribute('uuidref')
783 self.add_module('osc')
784 self.add_module('llite')
787 l = lookup(self.dom_node.parentNode, self.lov_uuid)
788 if l.nodeName == 'lov':
790 for osc_uuid in string.split(lov.devlist):
791 osc = lookup(self.dom_node.parentNode, osc_uuid)
796 panic('osc not found:', osc_uuid)
797 lctl.newdev(attach="lov %s %s" % (lov.name, lov.uuid),
798 setup ="%s" % (self.mdc_uuid))
803 self.info(self.path, self.mdc_uuid,self.lov_uuid)
804 cmd = "mount -t lustre_lite -o osc=%s,mdc=%s none %s" % \
805 (self.lov_uuid, self.mdc_uuid, self.path)
806 run("mkdir", self.path)
809 panic("mount failed:", self.path)
811 self.info(self.path, self.mdc_uuid,self.lov_uuid)
812 run("umount", self.path)
813 l = lookup(self.dom_node.parentNode, self.lov_uuid)
814 if l.nodeName == 'lov':
816 for osc_uuid in string.split(lov.devlist):
817 osc = lookup(self.dom_node.parentNode, osc_uuid)
822 panic('osc not found:', osc_uuid)
828 # ============================================================
829 # XML processing and query
830 # TODO: Change query funcs to use XPath, which is muc cleaner
833 dev = obd.getElementsByTagName('device')[0]
836 size = int(dev.getAttribute('size'))
839 return dev.firstChild.data, size
841 # Get the text content from the first matching child
842 def getText(node, tag, default=""):
843 list = node.getElementsByTagName(tag)
847 return node.firstChild.data
851 def get_ost_net(node, uuid):
852 ost = lookup(node, uuid)
853 list = ost.getElementsByTagName('network_ref')
855 uuid = list[0].getAttribute('uuidref')
858 return lookup(node, uuid)
860 def lookup(node, uuid):
861 for n in node.childNodes:
862 if n.nodeType == n.ELEMENT_NODE:
863 if getUUID(n) == uuid:
870 # Get name attribute of node
872 return node.getAttribute('name')
875 return node.getAttribute('uuidref')
877 # Get name attribute of node
879 return node.getAttribute('uuid')
881 # the tag name is the service type
882 # fixme: this should do some checks to make sure the node is a service
883 def getServiceType(node):
887 # determine what "level" a particular node is at.
888 # the order of iniitailization is based on level. objects
889 # are assigned a level based on type:
890 # net,devices,ldlm:1, obd, mdd:2 mds,ost:3 osc,mdc:4 mounts:5
891 def getServiceLevel(node):
892 type = getServiceType(node)
893 if type in ('network',):
895 if type in ('device', 'ldlm'):
897 elif type in ('obd', 'mdd'):
899 elif type in ('mds','ost'):
901 elif type in ('mdc','osc'):
903 elif type in ('lov',):
905 elif type in ('mountpoint',):
910 # return list of services in a profile. list is a list of tuples
912 def getServices(lustreNode, profileNode):
914 for n in profileNode.childNodes:
915 if n.nodeType == n.ELEMENT_NODE:
916 servNode = lookup(lustreNode, getRef(n))
919 panic('service not found: ' + getRef(n))
920 level = getServiceLevel(servNode)
921 list.append((level, servNode))
925 def getByName(lustreNode, tag, name):
926 ndList = lustreNode.getElementsByTagName(tag)
928 if getName(nd) == name:
933 # ============================================================
936 def startService(node, clean_flag, module_flag):
937 type = getServiceType(node)
938 debug('Service:', type, getName(node), getUUID(node))
939 # there must be a more dynamic way of doing this...
945 elif type == 'network':
957 elif type == 'mountpoint':
960 panic ("unknown service type:", type)
978 # Prepare the system to run lustre using a particular profile
979 # in a the configuration.
980 # * load & the modules
981 # * setup networking for the current node
982 # * make sure partitions are in place and prepared
983 # * initialize devices with lctl
984 # Levels is important, and needs to be enforced.
985 def startProfile(lustreNode, profileNode, clean_flag, module_flag):
987 panic("profile:", profile, "not found.")
988 services = getServices(lustreNode, profileNode)
992 startService(s[1], clean_flag, module_flag)
996 def doHost(lustreNode, hosts, clean_flag):
999 node = getByName(lustreNode, 'node', h)
1004 print 'No host entry found.'
1007 # Two step process: (1) load modules, (2) setup lustre
1008 # if not cleaning, load modules first.
1009 module_flag = not clean_flag
1010 reflist = node.getElementsByTagName('profile')
1011 for profile in reflist:
1012 startProfile(lustreNode, profile, clean_flag, module_flag)
1016 script = config.gdb_script()
1017 run(lctl.lctl, ' modules >', script)
1019 # dump /tmp/ogdb and sleep/pause here
1020 log ("The GDB module script is in", script)
1023 module_flag = not module_flag
1024 for profile in reflist:
1025 startProfile(lustreNode, profile, clean_flag, module_flag)
1027 # Command line processing
1029 def parse_cmdline(argv):
1031 long_opts = ["ldap", "reformat", "lustre=", "verbose", "gdb",
1032 "portals=", "makeldiff", "cleanup", "noexec",
1033 "help", "node=", "get=", "nomod", "nosetup"]
1037 opts, args = getopt.getopt(argv, short_opts, long_opts)
1038 except getopt.error:
1043 if o in ("-h", "--help"):
1045 if o in ("-d","--cleanup"):
1047 if o in ("-v", "--verbose"):
1049 if o in ("-n", "--noexec"):
1052 if o == "--portals":
1056 if o == "--reformat":
1066 if o == "--nosetup":
1074 s = urllib.urlopen(url)
1080 def setupModulePath(cmd):
1081 base = os.path.dirname(cmd)
1082 if os.access(base+"/Makefile", os.R_OK):
1083 config.src_dir(base + "/../../")
1086 debug("debug path: ", config.debug_path())
1090 fp = open('/proc/sys/portals/debug_path', 'w')
1091 fp.write(config.debug_path())
1098 if not os.access('/dev/portals', os.R_OK):
1099 run('mknod /dev/portals c 10 240')
1100 if not os.access('/dev/obd', os.R_OK):
1101 run('mknod /dev/obd c 10 241')
1103 # Initialize or shutdown lustre according to a configuration file
1104 # * prepare the system for lustre
1105 # * configure devices with lctl
1106 # Shutdown does steps in reverse
1109 global TCP_ACCEPTOR, lctl
1110 args = parse_cmdline(sys.argv[1:])
1112 if not os.access(args[0], os.R_OK | os.W_OK):
1113 print 'File not found:', args[0]
1115 dom = xml.dom.minidom.parse(args[0])
1117 xmldata = fetch(config.url())
1118 dom = xml.dom.minidom.parseString(xmldata)
1124 node_list.append(config.node())
1126 host = socket.gethostname()
1128 node_list.append(host)
1129 node_list.append('localhost')
1130 debug("configuring for host: ", node_list)
1132 TCP_ACCEPTOR = find_prog('acceptor')
1133 if not TCP_ACCEPTOR:
1135 TCP_ACCEPTOR = 'acceptor'
1136 debug('! acceptor not found')
1138 panic('acceptor not found')
1140 lctl = LCTLInterface('lctl')
1142 setupModulePath(sys.argv[0])
1144 doHost(dom.documentElement, node_list, config.cleanup())
1146 if __name__ == "__main__":
1149 except LConfError, e:
1151 except CommandError, e: