3 # Copyright (C) 2002-2003 Cluster File Systems, Inc.
4 # Authors: Robert Read <rread@clusterfs.com>
5 # Mike Shaver <shaver@clusterfs.com>
6 # This file is part of Lustre, http://www.lustre.org.
8 # Lustre is free software; you can redistribute it and/or
9 # modify it under the terms of version 2 of the GNU General Public
10 # License as published by the Free Software Foundation.
12 # Lustre is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with Lustre; if not, write to the Free Software
19 # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 # lconf - lustre configuration tool
23 # lconf is the main driver script for starting and stopping
24 # lustre filesystem services.
26 # Based in part on the XML obdctl modifications done by Brian Behlendorf
28 import sys, getopt, types
29 import string, os, stat, popen2, socket, time, random, fcntl, select
30 import re, exceptions, signal, traceback
31 import xml.dom.minidom
33 if sys.version[0] == '1':
34 from FCNTL import F_GETFL, F_SETFL
36 from fcntl import F_GETFL, F_SETFL
38 PYMOD_DIR = "/usr/lib/lustre/python"
40 def development_mode():
41 base = os.path.dirname(sys.argv[0])
42 if os.access(base+"/Makefile", os.R_OK):
46 if development_mode():
47 sys.path.append('../utils')
49 sys.path.append(PYMOD_DIR)
55 DEFAULT_TCPBUF = 8388608
58 # Maximum number of devices to search for.
59 # (the /dev/loop* nodes need to be created beforehand)
60 MAX_LOOP_DEVICES = 256
61 PORTALS_DIR = '../portals'
63 # Needed to call lconf --record
66 # Please keep these in sync with the values in portals/kp30.h
78 "warning" : (1 << 10),
82 "portals" : (1 << 14),
84 "dlmtrace" : (1 << 16),
88 "rpctrace" : (1 << 20),
89 "vfstrace" : (1 << 21),
93 "console" : (1 << 25),
99 "undefined" : (1 << 0),
109 "portals" : (1 << 10),
111 "pinger" : (1 << 12),
112 "filter" : (1 << 13),
117 "ptlrouter" : (1 << 18),
121 "confobd" : (1 << 22),
128 first_cleanup_error = 0
129 def cleanup_error(rc):
130 global first_cleanup_error
131 if not first_cleanup_error:
132 first_cleanup_error = rc
134 # ============================================================
135 # debugging and error funcs
137 def fixme(msg = "this feature"):
138 raise Lustre.LconfError, msg + ' not implemented yet.'
141 msg = string.join(map(str,args))
142 if not config.noexec:
143 raise Lustre.LconfError(msg)
148 msg = string.join(map(str,args))
153 print string.strip(s)
157 msg = string.join(map(str,args))
160 # ack, python's builtin int() does not support '0x123' syntax.
161 # eval can do it, although what a hack!
165 return eval(s, {}, {})
168 except SyntaxError, e:
169 raise ValueError("not a number")
171 raise ValueError("not a number")
173 # ============================================================
174 # locally defined exceptions
175 class CommandError (exceptions.Exception):
176 def __init__(self, cmd_name, cmd_err, rc=None):
177 self.cmd_name = cmd_name
178 self.cmd_err = cmd_err
183 if type(self.cmd_err) == types.StringType:
185 print "! %s (%d): %s" % (self.cmd_name, self.rc, self.cmd_err)
187 print "! %s: %s" % (self.cmd_name, self.cmd_err)
188 elif type(self.cmd_err) == types.ListType:
190 print "! %s (error %d):" % (self.cmd_name, self.rc)
192 print "! %s:" % (self.cmd_name)
193 for s in self.cmd_err:
194 print "> %s" %(string.strip(s))
199 # ============================================================
200 # handle daemons, like the acceptor
202 """ Manage starting and stopping a daemon. Assumes daemon manages
203 it's own pid file. """
205 def __init__(self, cmd):
211 log(self.command, "already running.")
213 self.path = find_prog(self.command)
215 panic(self.command, "not found.")
216 ret, out = runcmd(self.path +' '+ self.command_line())
218 raise CommandError(self.path, out, ret)
222 pid = self.read_pidfile()
225 log ("killing process", pid)
228 log("was unable to find pid of " + self.command)
229 #time.sleep(1) # let daemon die
231 log("unable to kill", self.command, e)
233 log("unable to kill", self.command)
236 pid = self.read_pidfile()
242 log("was unable to find pid of " + self.command)
249 def read_pidfile(self):
251 fp = open(self.pidfile(), 'r')
261 def clean_pidfile(self):
262 """ Remove a stale pidfile """
263 log("removing stale pidfile:", self.pidfile())
265 os.unlink(self.pidfile())
267 log(self.pidfile(), e)
269 class AcceptorHandler(DaemonHandler):
270 def __init__(self, port, net_type):
271 DaemonHandler.__init__(self, "acceptor")
276 return "/var/run/%s-%d.pid" % (self.command, self.port)
278 def command_line(self):
279 return string.join(map(str,(self.flags, self.port)))
283 # start the acceptors
285 if config.lctl_dump or config.record:
287 for port in acceptors.keys():
288 daemon = acceptors[port]
289 if not daemon.running():
292 def run_one_acceptor(port):
293 if config.lctl_dump or config.record:
295 if acceptors.has_key(port):
296 daemon = acceptors[port]
297 if not daemon.running():
300 panic("run_one_acceptor: No acceptor defined for port:", port)
302 def stop_acceptor(port):
303 if acceptors.has_key(port):
304 daemon = acceptors[port]
309 # ============================================================
310 # handle lctl interface
313 Manage communication with lctl
316 def __init__(self, cmd):
318 Initialize close by finding the lctl binary.
320 self.lctl = find_prog(cmd)
322 self.record_device = ''
325 debug('! lctl not found')
328 raise CommandError('lctl', "unable to find lctl binary.")
330 def use_save_file(self, file):
331 self.save_file = file
333 def record(self, dev_name, logname):
334 log("Recording log", logname, "on", dev_name)
335 self.record_device = dev_name
336 self.record_log = logname
338 def end_record(self):
339 log("End recording log", self.record_log, "on", self.record_device)
340 self.record_device = None
341 self.record_log = None
343 def set_nonblock(self, fd):
344 fl = fcntl.fcntl(fd, F_GETFL)
345 fcntl.fcntl(fd, F_SETFL, fl | os.O_NDELAY)
350 the cmds are written to stdin of lctl
351 lctl doesn't return errors when run in script mode, so
353 should modify command line to accept multiple commands, or
354 create complex command line options
358 cmds = '\n dump ' + self.save_file + '\n' + cmds
359 elif self.record_device:
363 %s""" % (self.record_device, self.record_log, cmds)
365 debug("+", cmd_line, cmds)
366 if config.noexec: return (0, [])
368 child = popen2.Popen3(cmd_line, 1) # Capture stdout and stderr from command
369 child.tochild.write(cmds + "\n")
370 child.tochild.close()
371 # print "LCTL:", cmds
373 # From "Python Cookbook" from O'Reilly
374 outfile = child.fromchild
375 outfd = outfile.fileno()
376 self.set_nonblock(outfd)
377 errfile = child.childerr
378 errfd = errfile.fileno()
379 self.set_nonblock(errfd)
381 outdata = errdata = ''
384 ready = select.select([outfd,errfd],[],[]) # Wait for input
385 if outfd in ready[0]:
386 outchunk = outfile.read()
387 if outchunk == '': outeof = 1
388 outdata = outdata + outchunk
389 if errfd in ready[0]:
390 errchunk = errfile.read()
391 if errchunk == '': erreof = 1
392 errdata = errdata + errchunk
393 if outeof and erreof: break
394 # end of "borrowed" code
397 if os.WIFEXITED(ret):
398 rc = os.WEXITSTATUS(ret)
401 if rc or len(errdata):
402 raise CommandError(self.lctl, errdata, rc)
405 def runcmd(self, *args):
407 run lctl using the command line
409 cmd = string.join(map(str,args))
410 debug("+", self.lctl, cmd)
411 rc, out = run(self.lctl, cmd)
413 raise CommandError(self.lctl, out, rc)
416 def clear_log(self, dev, log):
417 """ clear an existing log """
422 quit """ % (dev, log)
425 def root_squash(self, name, uid, nid):
429 quit""" % (name, uid, nid)
432 def network(self, net, nid):
437 quit """ % (net, nid)
441 def add_interface(self, net, ip, netmask = ""):
442 """ add an interface """
446 quit """ % (net, ip, netmask)
449 # delete an interface
450 def del_interface(self, net, ip):
451 """ delete an interface """
458 # create a new connection
459 def add_uuid(self, net_type, uuid, nid):
460 cmds = "\n add_uuid %s %s %s" %(uuid, nid, net_type)
463 def add_peer(self, net_type, nid, hostaddr, port):
464 if net_type in ('tcp','openib','ra') and not config.lctl_dump:
469 nid, hostaddr, port )
471 elif net_type in ('iib',) and not config.lctl_dump:
478 elif net_type in ('vib',) and not config.lctl_dump:
486 def connect(self, srv):
487 self.add_uuid(srv.net_type, srv.nid_uuid, srv.nid)
488 if srv.net_type in ('tcp','openib','iib','vib','ra') and not config.lctl_dump:
490 hostaddr = string.split(srv.hostaddr[0], '/')[0]
491 self.add_peer(srv.net_type, srv.nid, hostaddr, srv.port)
494 def recover(self, dev_name, new_conn):
497 recover %s""" %(dev_name, new_conn)
500 # add a route to a range
501 def add_route(self, net, gw, lo, hi):
509 except CommandError, e:
513 def del_route(self, net, gw, lo, hi):
518 quit """ % (net, gw, lo, hi)
521 # add a route to a host
522 def add_route_host(self, net, uuid, gw, tgt):
523 self.add_uuid(net, uuid, tgt)
531 except CommandError, e:
535 # add a route to a range
536 def del_route_host(self, net, uuid, gw, tgt):
542 quit """ % (net, gw, tgt)
546 def del_peer(self, net_type, nid, hostaddr):
547 if net_type in ('tcp',) and not config.lctl_dump:
551 del_peer %s %s single_share
555 elif net_type in ('openib','iib','vib','ra') and not config.lctl_dump:
559 del_peer %s single_share
564 # disconnect one connection
565 def disconnect(self, srv):
566 self.del_uuid(srv.nid_uuid)
567 if srv.net_type in ('tcp','openib','iib','vib','ra') and not config.lctl_dump:
569 hostaddr = string.split(srv.hostaddr[0], '/')[0]
570 self.del_peer(srv.net_type, srv.nid, hostaddr)
572 def del_uuid(self, uuid):
580 def disconnectAll(self, net):
588 def attach(self, type, name, uuid):
591 quit""" % (type, name, uuid)
594 def detach(self, name):
601 def set_security(self, name, key, value):
605 quit""" % (name, key, value)
608 def setup(self, name, setup = ""):
612 quit""" % (name, setup)
615 def add_conn(self, name, conn_uuid):
619 quit""" % (name, conn_uuid)
622 def start(self, name, conf_name):
626 quit""" % (name, conf_name)
629 # create a new device with lctl
630 def newdev(self, type, name, uuid, setup = ""):
632 self.attach(type, name, uuid);
634 self.setup(name, setup)
635 except CommandError, e:
636 self.cleanup(name, uuid, 0)
640 def cleanup(self, name, uuid, force, failover = 0):
641 if failover: force = 1
647 quit""" % (name, ('', 'force')[force],
648 ('', 'failover')[failover])
652 def lov_setup(self, name, uuid, desc_uuid, stripe_cnt,
653 stripe_sz, stripe_off, pattern, devlist = None):
656 lov_setup %s %d %d %d %s %s
657 quit""" % (name, uuid, desc_uuid, stripe_cnt, stripe_sz, stripe_off,
661 # add an OBD to a LOV
662 def lov_add_obd(self, name, uuid, obd_uuid, index, gen):
664 lov_modify_tgts add %s %s %s %s
665 quit""" % (name, obd_uuid, index, gen)
669 def lmv_setup(self, name, uuid, desc_uuid, devlist):
673 quit""" % (name, uuid, desc_uuid, devlist)
676 # delete an OBD from a LOV
677 def lov_del_obd(self, name, uuid, obd_uuid, index, gen):
679 lov_modify_tgts del %s %s %s %s
680 quit""" % (name, obd_uuid, index, gen)
684 def deactivate(self, name):
692 def dump(self, dump_file):
695 quit""" % (dump_file)
698 # get list of devices
699 def device_list(self):
700 devices = '/proc/fs/lustre/devices'
702 if os.access(devices, os.R_OK):
704 fp = open(devices, 'r')
712 def lustre_version(self):
713 rc, out = self.runcmd('version')
717 def mount_option(self, profile, osc, mdc):
719 mount_option %s %s %s
720 quit""" % (profile, osc, mdc)
723 # delete mount options
724 def del_mount_option(self, profile):
730 def set_timeout(self, timeout):
736 def set_lustre_upcall(self, upcall):
741 # ============================================================
742 # Various system-level functions
743 # (ideally moved to their own module)
745 # Run a command and return the output and status.
746 # stderr is sent to /dev/null, could use popen3 to
747 # save it if necessary
750 if config.noexec: return (0, [])
751 f = os.popen(cmd + ' 2>&1')
761 cmd = string.join(map(str,args))
764 # Run a command in the background.
765 def run_daemon(*args):
766 cmd = string.join(map(str,args))
768 if config.noexec: return 0
769 f = os.popen(cmd + ' 2>&1')
777 # Determine full path to use for an external command
778 # searches dirname(argv[0]) first, then PATH
780 syspath = string.split(os.environ['PATH'], ':')
781 cmdpath = os.path.dirname(sys.argv[0])
782 syspath.insert(0, cmdpath);
784 syspath.insert(0, os.path.join(config.portals, 'utils/'))
786 prog = os.path.join(d,cmd)
787 if os.access(prog, os.X_OK):
791 # Recursively look for file starting at base dir
792 def do_find_file(base, mod):
793 fullname = os.path.join(base, mod)
794 if os.access(fullname, os.R_OK):
796 for d in os.listdir(base):
797 dir = os.path.join(base,d)
798 if os.path.isdir(dir):
799 module = do_find_file(dir, mod)
803 # is the path a block device?
810 return stat.S_ISBLK(s[stat.ST_MODE])
812 # find the journal device from mkfs options
818 while i < len(x) - 1:
819 if x[i] == '-J' and x[i+1].startswith('device='):
825 # build fs according to type
827 def mkfs(dev, devsize, fstype, jsize, isize, mkfsoptions, isblock=1):
833 panic("size of filesystem on '%s' must be larger than 8MB, but is set to %s"%
835 # devsize is in 1k, and fs block count is in 4k
836 block_cnt = devsize/4
838 if fstype in ('ext3', 'extN', 'ldiskfs'):
839 # ext3 journal size is in megabytes
840 # but don't set jsize if mkfsoptions indicates a separate journal device
841 if jsize == 0 and jdev(mkfsoptions) == '':
843 if not is_block(dev):
844 ret, out = runcmd("ls -l %s" %dev)
845 devsize = int(string.split(out[0])[4]) / 1024
847 # sfdisk works for symlink, hardlink, and realdev
848 ret, out = runcmd("sfdisk -s %s" %dev)
850 devsize = int(out[0])
852 # sfdisk -s will fail for too large block device,
853 # then, read the size of partition from /proc/partitions
855 # get the realpath of the device
856 # it may be the real device, such as /dev/hda7
857 # or the hardlink created via mknod for a device
858 if 'realpath' in dir(os.path):
859 real_dev = os.path.realpath(dev)
863 while os.path.islink(real_dev) and (link_count < 20):
864 link_count = link_count + 1
865 dev_link = os.readlink(real_dev)
866 if os.path.isabs(dev_link):
869 real_dev = os.path.join(os.path.dirname(real_dev), dev_link)
871 panic("Entountered too many symbolic links resolving block device:", dev)
873 # get the major and minor number of the realpath via ls
874 # it seems python(os.stat) does not return
875 # the st_rdev member of the stat structure
876 ret, out = runcmd("ls -l %s" %real_dev)
877 major = string.split(string.split(out[0])[4], ",")[0]
878 minor = string.split(out[0])[5]
880 # get the devsize from /proc/partitions with the major and minor number
881 ret, out = runcmd("cat /proc/partitions")
884 if string.split(line)[0] == major and string.split(line)[1] == minor:
885 devsize = int(string.split(line)[2])
888 if devsize > 1024 * 1024:
889 jsize = ((devsize / 102400) * 4)
892 if jsize: jopt = "-J size=%d" %(jsize,)
893 if isize: iopt = "-I %d" %(isize,)
894 mkfs = 'mkfs.ext2 -j -b 4096 '
895 if not isblock or config.force:
897 if jdev(mkfsoptions) != '':
898 jmkfs = 'mkfs.ext2 -b 4096 -O journal_dev '
900 jmkfs = jmkfs + '-F '
901 jmkfs = jmkfs + jdev(mkfsoptions)
902 (ret, out) = run (jmkfs)
904 panic("Unable format journal device:", jdev(mkfsoptions), string.join(out))
905 elif fstype == 'reiserfs':
906 # reiserfs journal size is in blocks
907 if jsize: jopt = "--journal_size %d" %(jsize,)
908 mkfs = 'mkreiserfs -ff'
910 panic('unsupported fs type: ', fstype)
912 if config.mkfsoptions != None:
913 mkfs = mkfs + ' ' + config.mkfsoptions
914 if mkfsoptions != None:
915 mkfs = mkfs + ' ' + mkfsoptions
916 (ret, out) = run (mkfs, jopt, iopt, dev, block_cnt)
918 panic("Unable to build fs:", dev, string.join(out))
919 # enable hash tree indexing on fsswe
920 if fstype in ('ext3', 'extN', 'ldiskfs'):
921 htree = 'echo "feature FEATURE_C5" | debugfs -w'
922 (ret, out) = run (htree, dev)
924 panic("Unable to enable htree:", dev)
926 # some systems use /dev/loopN, some /dev/loop/N
930 if not os.access(loop + str(0), os.R_OK):
932 if not os.access(loop + str(0), os.R_OK):
933 panic ("can't access loop devices")
936 # find loop device assigned to the file
937 def find_assigned_loop(file):
939 for n in xrange(0, MAX_LOOP_DEVICES):
941 if os.access(dev, os.R_OK):
942 (stat, out) = run('losetup', dev)
943 if out and stat == 0:
944 m = re.search(r'\((.*)\)', out[0])
945 if m and file == m.group(1):
949 # find free loop device
950 def find_free_loop(file):
953 # find next free loop
954 for n in xrange(0, MAX_LOOP_DEVICES):
956 if os.access(dev, os.R_OK):
957 (stat, out) = run('losetup', dev)
962 # create file if necessary and assign the first free loop device
963 def init_loop(file, size, fstype, journal_size, inode_size,
964 mkfsoptions, reformat, autoformat, backfstype, backfile):
967 realfstype = backfstype
968 if is_block(backfile):
969 if reformat or (need_format(realfstype, backfile) and autoformat == 'yes'):
970 mkfs(realfile, size, realfstype, journal_size, inode_size, mkfsoptions, isblock=0)
976 dev = find_assigned_loop(realfile)
978 print 'WARNING: file', realfile, 'already mapped to', dev
981 if reformat or not os.access(realfile, os.R_OK | os.W_OK):
982 (ret, out) = run("dd if=/dev/zero bs=1k count=0 seek=%d of=%s" %(size, realfile))
984 panic("Unable to create backing store:", realfile)
985 mkfs(realfile, size, realfstype, journal_size, inode_size,
986 mkfsoptions, isblock=0)
988 dev = find_free_loop(realfile)
990 print "attach " + realfile + " <-> " + dev
991 run('losetup', dev, realfile)
994 print "out of loop devices"
997 # undo loop assignment
998 def clean_loop(dev, fstype, backfstype, backdev):
1003 if not is_block(realfile):
1004 dev = find_assigned_loop(realfile)
1006 print "detach " + dev + " <-> " + realfile
1007 ret, out = run('losetup -d', dev)
1009 log('unable to clean loop device', dev, 'for file', realfile)
1012 # finilizes passed device
1013 def clean_dev(dev, fstype, backfstype, backdev):
1014 if fstype == 'smfs' or not is_block(dev):
1015 clean_loop(dev, fstype, backfstype, backdev)
1017 # determine if dev is formatted as a <fstype> filesystem
1018 def need_format(fstype, dev):
1019 # FIXME don't know how to implement this
1022 # initialize a block device if needed
1023 def block_dev(dev, size, fstype, reformat, autoformat, journal_size,
1024 inode_size, mkfsoptions, backfstype, backdev):
1028 if fstype == 'smfs' or not is_block(dev):
1029 dev = init_loop(dev, size, fstype, journal_size, inode_size,
1030 mkfsoptions, reformat, autoformat, backfstype, backdev)
1031 elif reformat or (need_format(fstype, dev) and autoformat == 'yes'):
1032 mkfs(dev, size, fstype, journal_size, inode_size, mkfsoptions,
1035 # panic("device:", dev,
1036 # "not prepared, and autoformat is not set.\n",
1037 # "Rerun with --reformat option to format ALL filesystems")
1042 """lookup IP address for an interface"""
1043 rc, out = run("/sbin/ifconfig", iface)
1046 addr = string.split(out[1])[1]
1047 ip = string.split(addr, ':')[1]
1050 def def_mount_options(fstype, target):
1051 """returns deafult mount options for passed fstype and target (mds, ost)"""
1052 if fstype == 'ext3' or fstype == 'ldiskfs':
1053 mountfsoptions = "errors=remount-ro"
1054 if target == 'ost' and sys_get_branch() == '2.4':
1055 mountfsoptions = "%s,asyncdel" % (mountfsoptions)
1056 if target == 'ost' and sys_get_branch() == '2.6':
1057 mountfsoptions = "%s,extents,mballoc" % (mountfsoptions)
1058 return mountfsoptions
1061 def sys_get_elan_position_file():
1062 procfiles = ["/proc/elan/device0/position",
1063 "/proc/qsnet/elan4/device0/position",
1064 "/proc/qsnet/elan3/device0/position"]
1066 if os.access(p, os.R_OK):
1070 def sys_get_local_nid(net_type, wildcard, cluster_id):
1071 """Return the local nid."""
1073 if sys_get_elan_position_file():
1074 local = sys_get_local_address('elan', '*', cluster_id)
1076 local = sys_get_local_address(net_type, wildcard, cluster_id)
1079 def sys_get_local_address(net_type, wildcard, cluster_id):
1080 """Return the local address for the network type."""
1082 if net_type in ('tcp','openib','iib','vib','ra'):
1084 iface, star = string.split(wildcard, ':')
1085 local = if2addr(iface)
1087 panic ("unable to determine ip for:", wildcard)
1089 host = socket.gethostname()
1090 local = socket.gethostbyname(host)
1091 elif net_type == 'elan':
1092 # awk '/NodeId/ { print $2 }' 'sys_get_elan_position_file()'
1093 f = sys_get_elan_position_file()
1095 panic ("unable to determine local Elan ID")
1098 lines = fp.readlines()
1102 if a[0] == 'NodeId':
1106 nid = my_int(cluster_id) + my_int(elan_id)
1107 local = "%d" % (nid)
1108 except ValueError, e:
1112 elif net_type == 'lo':
1113 fixme("automatic local address for loopback")
1114 elif net_type == 'gm':
1115 fixme("automatic local address for GM")
1119 def sys_get_branch():
1120 """Returns kernel release"""
1122 fp = open('/proc/sys/kernel/osrelease')
1123 lines = fp.readlines()
1127 version = string.split(l)
1128 a = string.split(version[0], '.')
1129 return a[0] + '.' + a[1]
1134 # XXX: instead of device_list, ask for $name and see what we get
1135 def is_prepared(name):
1136 """Return true if a device exists for the name"""
1137 if config.lctl_dump:
1139 if (config.noexec or config.record) and config.cleanup:
1142 # expect this format:
1143 # 1 UP ldlm ldlm ldlm_UUID 2
1144 out = lctl.device_list()
1146 if name == string.split(s)[3]:
1148 except CommandError, e:
1152 def net_is_prepared():
1153 """If the any device exists, then assume that all networking
1154 has been configured"""
1155 out = lctl.device_list()
1158 def fs_is_mounted(path):
1159 """Return true if path is a mounted lustre filesystem"""
1161 fp = open('/proc/mounts')
1162 lines = fp.readlines()
1166 if a[1] == path and a[2] == 'lustre_lite':
1172 def kmod_find(src_dir, dev_dir, modname):
1173 modbase = src_dir +'/'+ dev_dir +'/'+ modname
1174 for modext in '.ko', '.o':
1175 module = modbase + modext
1177 if os.access(module, os.R_OK):
1183 def kmod_info(modname):
1184 """Returns reference count for passed module name."""
1186 fp = open('/proc/modules')
1187 lines = fp.readlines()
1190 # please forgive my tired fingers for this one
1191 ret = filter(lambda word, mod = modname: word[0] == mod,
1192 map(lambda line: string.split(line), lines))
1196 except Exception, e:
1200 """Presents kernel module"""
1201 def __init__(self, src_dir, dev_dir, name):
1202 self.src_dir = src_dir
1203 self.dev_dir = dev_dir
1206 # FIXME we ignore the failure of loading gss module, because we might
1207 # don't need it at all.
1210 log ('loading module:', self.name, 'srcdir',
1211 self.src_dir, 'devdir', self.dev_dir)
1213 module = kmod_find(self.src_dir, self.dev_dir,
1215 if not module and self.name != 'ptlrpcs_gss':
1216 panic('module not found:', self.name)
1217 (rc, out) = run('/sbin/insmod', module)
1219 if self.name == 'ptlrpcs_gss':
1220 print "Warning: not support gss security!"
1222 raise CommandError('insmod', out, rc)
1224 (rc, out) = run('/sbin/modprobe', self.name)
1226 if self.name == 'ptlrpcs_gss':
1227 print "Warning: not support gss security!"
1229 raise CommandError('modprobe', out, rc)
1233 log('unloading module:', self.name)
1234 (rc, out) = run('/sbin/rmmod', self.name)
1236 log('unable to unload module:', self.name +
1237 "(" + self.refcount() + ")")
1241 """Returns module info if any."""
1242 return kmod_info(self.name)
1245 """Returns 1 if module is loaded. Otherwise 0 is returned."""
1252 """Returns module refcount."""
1259 """Returns 1 if module is used, otherwise 0 is returned."""
1265 if users and users != '(unused)' and users != '-':
1273 """Returns 1 if module is busy, otherwise 0 is returned."""
1274 if self.loaded() and (self.used() or self.refcount() != '0'):
1280 """Manage kernel modules"""
1281 def __init__(self, lustre_dir, portals_dir):
1282 self.lustre_dir = lustre_dir
1283 self.portals_dir = portals_dir
1284 self.kmodule_list = []
1286 def find_module(self, modname):
1287 """Find module by module name"""
1288 for mod in self.kmodule_list:
1289 if mod.name == modname:
1293 def add_portals_module(self, dev_dir, modname):
1294 """Append a module to list of modules to load."""
1296 mod = self.find_module(modname)
1298 mod = kmod(self.portals_dir, dev_dir, modname)
1299 self.kmodule_list.append(mod)
1301 def add_lustre_module(self, dev_dir, modname):
1302 """Append a module to list of modules to load."""
1304 mod = self.find_module(modname)
1306 mod = kmod(self.lustre_dir, dev_dir, modname)
1307 self.kmodule_list.append(mod)
1309 def load_modules(self):
1310 """Load all the modules in the list in the order they appear."""
1311 for mod in self.kmodule_list:
1312 if mod.loaded() and not config.noexec:
1316 def cleanup_modules(self):
1317 """Unload the modules in the list in reverse order."""
1318 rev = self.kmodule_list
1321 if (not mod.loaded() or mod.busy()) and not config.noexec:
1324 if mod.name == 'portals' and config.dump:
1325 lctl.dump(config.dump)
1328 # ============================================================
1329 # Classes to prepare and cleanup the various objects
1332 """ Base class for the rest of the modules. The default cleanup method is
1333 defined here, as well as some utilitiy funcs.
1335 def __init__(self, module_name, db):
1337 self.module_name = module_name
1338 self.name = self.db.getName()
1339 self.uuid = self.db.getUUID()
1343 def info(self, *args):
1344 msg = string.join(map(str,args))
1345 print self.module_name + ":", self.name, self.uuid, msg
1348 """ default cleanup, used for most modules """
1351 lctl.cleanup(self.name, self.uuid, config.force)
1352 except CommandError, e:
1353 log(self.module_name, "cleanup failed: ", self.name)
1357 def add_module(self, manager):
1358 """Adds all needed modules in the order they appear."""
1361 def safe_to_clean(self):
1364 def safe_to_clean_modules(self):
1365 return self.safe_to_clean()
1367 class Network(Module):
1368 def __init__(self,db):
1369 Module.__init__(self, 'NETWORK', db)
1370 self.net_type = self.db.get_val('nettype')
1371 self.nid = self.db.get_val('nid', '*')
1372 self.cluster_id = self.db.get_val('clusterid', "0")
1373 self.port = self.db.get_val_int('port', 0)
1376 self.nid = sys_get_local_nid(self.net_type, self.nid, self.cluster_id)
1378 panic("unable to set nid for", self.net_type, self.nid, cluster_id)
1379 self.generic_nid = 1
1380 debug("nid:", self.nid)
1382 self.generic_nid = 0
1384 self.nid_uuid = self.nid_to_uuid(self.nid)
1385 self.hostaddr = self.db.get_hostaddr()
1386 if len(self.hostaddr) == 0:
1387 self.hostaddr.append(self.nid)
1388 if '*' in self.hostaddr[0]:
1389 self.hostaddr[0] = sys_get_local_address(self.net_type, self.hostaddr[0], self.cluster_id)
1390 if not self.hostaddr[0]:
1391 panic("unable to set hostaddr for", self.net_type, self.hostaddr[0], self.cluster_id)
1392 debug("hostaddr:", self.hostaddr[0])
1394 def add_module(self, manager):
1395 manager.add_portals_module("libcfs", 'libcfs')
1396 manager.add_portals_module("portals", 'portals')
1398 if node_needs_router():
1399 manager.add_portals_module("router", 'kptlrouter')
1400 if self.net_type == 'tcp':
1401 manager.add_portals_module("knals/socknal", 'ksocknal')
1402 if self.net_type == 'elan':
1403 manager.add_portals_module("knals/qswnal", 'kqswnal')
1404 if self.net_type == 'gm':
1405 manager.add_portals_module("knals/gmnal", 'kgmnal')
1406 if self.net_type == 'openib':
1407 manager.add_portals_module("knals/openibnal", 'kopenibnal')
1408 if self.net_type == 'iib':
1409 manager.add_portals_module("knals/iibnal", 'kiibnal')
1410 if self.net_type == 'vib':
1411 self.add_portals_module("knals/vibnal", 'kvibnal')
1412 if self.net_type == 'lo':
1413 manager.add_portals_module("knals/lonal", 'klonal')
1414 if self.net_type == 'ra':
1415 manager.add_portals_module("knals/ranal", 'kranal')
1417 def nid_to_uuid(self, nid):
1418 return "NID_%s_UUID" %(nid,)
1421 if not config.record and net_is_prepared():
1423 self.info(self.net_type, self.nid, self.port)
1424 if not (config.record and self.generic_nid):
1425 lctl.network(self.net_type, self.nid)
1426 if self.net_type == 'tcp':
1428 for hostaddr in self.db.get_hostaddr():
1429 ip = string.split(hostaddr, '/')[0]
1430 if len(string.split(hostaddr, '/')) == 2:
1431 netmask = string.split(hostaddr, '/')[1]
1434 lctl.add_interface(self.net_type, ip, netmask)
1435 if self.net_type == 'elan':
1437 if self.port and node_is_router():
1438 run_one_acceptor(self.port)
1439 self.connect_peer_gateways()
1441 def connect_peer_gateways(self):
1442 for router in self.db.lookup_class('node'):
1443 if router.get_val_int('router', 0):
1444 for netuuid in router.get_networks():
1445 net = self.db.lookup(netuuid)
1447 if (gw.cluster_id == self.cluster_id and
1448 gw.net_type == self.net_type):
1449 if gw.nid != self.nid:
1452 def disconnect_peer_gateways(self):
1453 for router in self.db.lookup_class('node'):
1454 if router.get_val_int('router', 0):
1455 for netuuid in router.get_networks():
1456 net = self.db.lookup(netuuid)
1458 if (gw.cluster_id == self.cluster_id and
1459 gw.net_type == self.net_type):
1460 if gw.nid != self.nid:
1463 except CommandError, e:
1464 print "disconnect failed: ", self.name
1468 def safe_to_clean(self):
1469 return not net_is_prepared()
1472 self.info(self.net_type, self.nid, self.port)
1474 stop_acceptor(self.port)
1475 if node_is_router():
1476 self.disconnect_peer_gateways()
1477 if self.net_type == 'tcp':
1478 for hostaddr in self.db.get_hostaddr():
1479 ip = string.split(hostaddr, '/')[0]
1480 lctl.del_interface(self.net_type, ip)
1482 def correct_level(self, level, op=None):
1485 class RouteTable(Module):
1486 def __init__(self,db):
1487 Module.__init__(self, 'ROUTES', db)
1489 def server_for_route(self, net_type, gw, gw_cluster_id, tgt_cluster_id,
1491 # only setup connections for tcp, openib, and iib NALs
1493 if not net_type in ('tcp','openib','iib','vib','ra'):
1496 # connect to target if route is to single node and this node is the gw
1497 if lo == hi and local_interface(net_type, gw_cluster_id, gw):
1498 if not local_cluster(net_type, tgt_cluster_id):
1499 panic("target", lo, " not on the local cluster")
1500 srvdb = self.db.nid2server(lo, net_type, gw_cluster_id)
1501 # connect to gateway if this node is not the gw
1502 elif (local_cluster(net_type, gw_cluster_id)
1503 and not local_interface(net_type, gw_cluster_id, gw)):
1504 srvdb = self.db.nid2server(gw, net_type, gw_cluster_id)
1509 panic("no server for nid", lo)
1512 return Network(srvdb)
1515 if not config.record and net_is_prepared():
1518 for net_type, gw, gw_cluster_id, tgt_cluster_id, lo, hi in self.db.get_route_tbl():
1519 lctl.add_route(net_type, gw, lo, hi)
1520 srv = self.server_for_route(net_type, gw, gw_cluster_id, tgt_cluster_id, lo, hi)
1524 def safe_to_clean(self):
1525 return not net_is_prepared()
1528 if net_is_prepared():
1529 # the network is still being used, don't clean it up
1531 for net_type, gw, gw_cluster_id, tgt_cluster_id, lo, hi in self.db.get_route_tbl():
1532 srv = self.server_for_route(net_type, gw, gw_cluster_id, tgt_cluster_id, lo, hi)
1535 lctl.disconnect(srv)
1536 except CommandError, e:
1537 print "disconnect failed: ", self.name
1542 lctl.del_route(net_type, gw, lo, hi)
1543 except CommandError, e:
1544 print "del_route failed: ", self.name
1548 class Management(Module):
1549 def __init__(self, db):
1550 Module.__init__(self, 'MGMT', db)
1552 def add_module(self, manager):
1553 manager.add_lustre_module('lvfs', 'lvfs')
1554 manager.add_lustre_module('obdclass', 'obdclass')
1555 manager.add_lustre_module('ptlrpc', 'ptlrpc')
1556 manager.add_lustre_module('mgmt', 'mgmt_svc')
1559 if not config.record and is_prepared(self.name):
1562 lctl.newdev("mgmt", self.name, self.uuid)
1564 def safe_to_clean(self):
1568 if is_prepared(self.name):
1569 Module.cleanup(self)
1571 def correct_level(self, level, op=None):
1574 # This is only needed to load the modules; the LDLM device
1575 # is now created automatically.
1577 def __init__(self,db):
1578 Module.__init__(self, 'LDLM', db)
1580 def add_module(self, manager):
1581 manager.add_lustre_module('lvfs', 'lvfs')
1582 manager.add_lustre_module('obdclass', 'obdclass')
1583 manager.add_lustre_module('sec', 'ptlrpcs')
1584 manager.add_lustre_module('ptlrpc', 'ptlrpc')
1585 manager.add_lustre_module('sec/gss', 'ptlrpcs_gss')
1593 def correct_level(self, level, op=None):
1597 def __init__(self, db, uuid, fs_name, name_override = None, config_only = None):
1598 Module.__init__(self, 'LOV', db)
1599 if name_override != None:
1600 self.name = "lov_%s" % name_override
1601 self.mds_uuid = self.db.get_first_ref('mds')
1602 self.stripe_sz = self.db.get_val_int('stripesize', 1048576)
1603 self.stripe_off = self.db.get_val_int('stripeoffset', 0)
1604 self.pattern = self.db.get_val_int('stripepattern', 0)
1605 self.devlist = self.db.get_lov_tgts('lov_tgt')
1606 self.stripe_cnt = self.db.get_val_int('stripecount', len(self.devlist))
1609 self.desc_uuid = self.uuid
1610 self.uuid = generate_client_uuid(self.name)
1611 self.fs_name = fs_name
1613 self.config_only = 1
1615 self.config_only = None
1616 mds = self.db.lookup(self.mds_uuid)
1617 self.mds_name = mds.getName()
1618 for (obd_uuid, index, gen, active) in self.devlist:
1621 self.obdlist.append(obd_uuid)
1622 obd = self.db.lookup(obd_uuid)
1623 osc = get_osc(obd, self.uuid, fs_name)
1625 self.osclist.append((osc, index, gen, active))
1627 panic('osc not found:', obd_uuid)
1633 if not config.record and is_prepared(self.name):
1635 self.info(self.mds_uuid, self.stripe_cnt, self.stripe_sz,
1636 self.stripe_off, self.pattern, self.devlist,
1638 lctl.lov_setup(self.name, self.uuid, self.desc_uuid, self.stripe_cnt,
1639 self.stripe_sz, self.stripe_off, self.pattern,
1640 string.join(self.obdlist))
1641 for (osc, index, gen, active) in self.osclist:
1642 target_uuid = osc.target_uuid
1644 # Only ignore connect failures with --force, which
1645 # isn't implemented here yet.
1647 osc.prepare(ignore_connect_failure=0)
1648 except CommandError, e:
1649 print "Error preparing OSC %s\n" % osc.uuid
1651 lctl.lov_add_obd(self.name, self.uuid, target_uuid, index, gen)
1654 for (osc, index, gen, active) in self.osclist:
1655 target_uuid = osc.target_uuid
1657 if is_prepared(self.name):
1658 Module.cleanup(self)
1659 if self.config_only:
1660 panic("Can't clean up config_only LOV ", self.name)
1662 def add_module(self, manager):
1663 if self.config_only:
1664 panic("Can't load modules for config_only LOV ", self.name)
1665 for (osc, index, gen, active) in self.osclist:
1666 osc.add_module(manager)
1668 manager.add_lustre_module('lov', 'lov')
1670 def correct_level(self, level, op=None):
1674 def __init__(self, db, uuid, fs_name, name_override = None):
1675 Module.__init__(self, 'LMV', db)
1676 if name_override != None:
1677 self.name = "lmv_%s" % name_override
1679 self.devlist = self.db.get_lmv_tgts('lmv_tgt')
1680 if self.devlist == None:
1681 self.devlist = self.db.get_refs('mds')
1684 self.desc_uuid = self.uuid
1686 self.fs_name = fs_name
1687 for mds_uuid in self.devlist:
1688 mds = self.db.lookup(mds_uuid)
1690 panic("MDS not found!")
1691 mdc = MDC(mds, self.uuid, fs_name)
1693 self.mdclist.append(mdc)
1695 panic('mdc not found:', mds_uuid)
1698 if is_prepared(self.name):
1702 for mdc in self.mdclist:
1704 # Only ignore connect failures with --force, which
1705 # isn't implemented here yet.
1706 mdc.prepare(ignore_connect_failure=0)
1707 except CommandError, e:
1708 print "Error preparing LMV %s\n" % mdc.uuid
1711 lctl.lmv_setup(self.name, self.uuid, self.desc_uuid,
1712 string.join(self.devlist))
1715 for mdc in self.mdclist:
1717 if is_prepared(self.name):
1718 Module.cleanup(self)
1720 def add_module(self, manager):
1721 for mdc in self.mdclist:
1722 mdc.add_module(manager)
1724 manager.add_lustre_module('lmv', 'lmv')
1726 def correct_level(self, level, op=None):
1729 class CONFDEV(Module):
1730 def __init__(self, db, name, target_uuid, uuid):
1731 Module.__init__(self, 'CONFDEV', db)
1732 self.devpath = self.db.get_val('devpath','')
1733 self.backdevpath = self.db.get_val('devpath','')
1734 self.size = self.db.get_val_int('devsize', 0)
1735 self.journal_size = self.db.get_val_int('journalsize', 0)
1736 self.fstype = self.db.get_val('fstype', '')
1737 self.backfstype = self.db.get_val('backfstype', '')
1738 self.mkfsoptions = self.db.get_val('mkfsoptions', '')
1739 self.mountfsoptions = self.db.get_val('mountfsoptions', '')
1740 self.target = self.db.lookup(target_uuid)
1741 self.name = "conf_%s" % self.target.getName()
1742 self.client_uuids = self.target.get_refs('client')
1743 self.obdtype = self.db.get_val('obdtype', '')
1745 self.mds_sec = self.db.get_val('mds_sec', '')
1746 self.oss_sec = self.db.get_val('oss_sec', '')
1747 self.deny_sec = self.db.get_val('deny_sec', '')
1749 if config.mds_mds_sec:
1750 self.mds_sec = config.mds_mds_sec
1751 if config.mds_oss_sec:
1752 self.oss_sec = config.mds_oss_sec
1753 if config.mds_deny_sec:
1755 self.deny_sec = "%s,%s" %(self.deny_sec, config.mds_deny_sec)
1757 self.deny_sec = config.mds_deny_sec
1759 if self.obdtype == None:
1760 self.obdtype = 'dumb'
1762 self.conf_name = name
1763 self.conf_uuid = uuid
1764 self.realdev = self.devpath
1769 lmv_uuid = self.db.get_first_ref('lmv')
1770 if lmv_uuid != None:
1771 self.lmv = self.db.lookup(lmv_uuid)
1772 if self.lmv != None:
1773 self.client_uuids = self.lmv.get_refs('client')
1775 if self.target.get_class() == 'mds':
1776 if self.target.get_val('failover', 0):
1777 self.failover_mds = 'f'
1779 self.failover_mds = 'n'
1780 self.format = self.db.get_val('autoformat', "no")
1782 self.format = self.db.get_val('autoformat', "yes")
1783 self.osdtype = self.db.get_val('osdtype')
1784 ost = self.db.lookup(target_uuid)
1785 if ost.get_val('failover', 0):
1786 self.failover_ost = 'f'
1788 self.failover_ost = 'n'
1790 self.inode_size = self.get_inode_size()
1792 if self.lmv != None:
1793 client_uuid = self.name + "_lmv_UUID"
1794 self.master = LMV(self.lmv, client_uuid,
1795 self.conf_name, self.conf_name)
1797 def get_inode_size(self):
1798 inode_size = self.db.get_val_int('inodesize', 0)
1799 if inode_size == 0 and self.target.get_class() == 'mds':
1801 # default inode size for case when neither LOV either
1802 # LMV is accessible.
1803 self.inode_size = 256
1805 # find the LOV for this MDS
1806 lovconfig_uuid = self.target.get_first_ref('lovconfig')
1807 if lovconfig_uuid or self.lmv != None:
1808 if self.lmv != None:
1809 lovconfig_uuid = self.lmv.get_first_ref('lovconfig')
1810 lovconfig = self.lmv.lookup(lovconfig_uuid)
1811 lov_uuid = lovconfig.get_first_ref('lov')
1812 if lov_uuid == None:
1813 panic(self.target.getName() + ": No LOV found for lovconfig ",
1816 lovconfig = self.target.lookup(lovconfig_uuid)
1817 lov_uuid = lovconfig.get_first_ref('lov')
1818 if lov_uuid == None:
1819 panic(self.target.getName() + ": No LOV found for lovconfig ",
1821 if self.lmv != None:
1822 lovconfig_uuid = self.lmv.get_first_ref('lovconfig')
1823 lovconfig = self.lmv.lookup(lovconfig_uuid)
1824 lov_uuid = lovconfig.get_first_ref('lov')
1826 lov = LOV(self.db.lookup(lov_uuid), lov_uuid, self.name,
1829 # default stripe count controls default inode_size
1830 if lov.stripe_cnt > 0:
1831 stripe_count = lov.stripe_cnt
1833 stripe_count = len(lov.devlist)
1834 if stripe_count > 77:
1836 elif stripe_count > 35:
1838 elif stripe_count > 13:
1840 #elif stripe_count > 3:
1847 def get_mount_options(self, blkdev):
1848 options = def_mount_options(self.fstype,
1849 self.target.get_class())
1851 if config.mountfsoptions:
1853 options = "%s,%s" %(options, config.mountfsoptions)
1855 options = config.mountfsoptions
1856 if self.mountfsoptions:
1857 options = "%s,%s" %(options, self.mountfsoptions)
1859 if self.mountfsoptions:
1861 options = "%s,%s" %(options, self.mountfsoptions)
1863 options = self.mountfsoptions
1865 if self.fstype == 'smfs':
1867 options = "%s,type=%s,dev=%s" %(options, self.backfstype,
1870 options = "type=%s,dev=%s" %(self.backfstype,
1873 if self.target.get_class() == 'mds':
1875 options = "%s,acl,user_xattr,iopen_nopriv" %(options)
1877 options = "iopen_nopriv"
1882 if is_prepared(self.name):
1885 blkdev = block_dev(self.devpath, self.size, self.fstype,
1886 config.reformat, self.format, self.journal_size,
1887 self.inode_size, self.mkfsoptions, self.backfstype,
1890 if self.fstype == 'smfs':
1895 mountfsoptions = self.get_mount_options(blkdev)
1897 self.info(self.target.get_class(), realdev, mountfsoptions,
1898 self.fstype, self.size, self.format)
1900 lctl.newdev("confobd", self.name, self.uuid,
1901 setup ="%s %s %s" %(realdev, self.fstype,
1904 self.mountfsoptions = mountfsoptions
1905 self.realdev = realdev
1907 def add_module(self, manager):
1908 manager.add_lustre_module('obdclass', 'confobd')
1910 def write_conf(self):
1911 if self.target.get_class() == 'ost':
1913 lctl.clear_log(self.name, self.target.getName() + '-conf')
1914 lctl.record(self.name, self.target.getName() + '-conf')
1915 lctl.newdev(self.osdtype, self.conf_name, self.conf_uuid,
1916 setup ="%s %s %s %s" %(self.realdev, self.fstype,
1918 self.mountfsoptions))
1920 lctl.clear_log(self.name, 'OSS-conf')
1921 lctl.record(self.name, 'OSS-conf')
1922 lctl.newdev("ost", 'OSS', 'OSS_UUID', setup ="")
1927 if self.target.get_class() == 'mds':
1928 if self.master != None:
1929 master_name = self.master.name
1931 master_name = 'dumb'
1934 lctl.clear_log(self.name, self.target.getName() + '-conf')
1935 lctl.record(self.name, self.target.getName() + '-conf')
1936 lctl.attach("mds", self.conf_name, self.conf_uuid)
1938 lctl.set_security(self.conf_name, "mds_sec", self.mds_sec)
1940 lctl.set_security(self.conf_name, "oss_sec", self.oss_sec)
1942 for flavor in string.split(self.deny_sec, ','):
1943 lctl.set_security(self.conf_name, "deny_sec", flavor)
1944 lctl.newdev("mds", self.conf_name, self.conf_uuid,
1945 setup ="%s %s %s %s %s %s" %(self.realdev, self.fstype,
1946 self.conf_name, self.mountfsoptions,
1947 master_name, self.obdtype))
1951 if not self.client_uuids:
1954 for uuid in self.client_uuids:
1955 log("recording client:", uuid)
1956 client_uuid = generate_client_uuid(self.name)
1957 client = VOSC(self.db.lookup(uuid), client_uuid,
1958 self.target.getName(), self.name)
1960 lctl.clear_log(self.name, self.target.getName())
1961 lctl.record(self.name, self.target.getName())
1963 lctl.mount_option(self.target.getName(), client.get_name(), "")
1967 lctl.clear_log(self.name, self.target.getName() + '-clean')
1968 lctl.record(self.name, self.target.getName() + '-clean')
1970 lctl.del_mount_option(self.target.getName())
1978 # record logs for each client
1980 config_options = "--ldapurl " + config.ldapurl + " --config " + config.config
1982 config_options = CONFIG_FILE
1984 for node_db in self.db.lookup_class('node'):
1985 client_name = node_db.getName()
1986 for prof_uuid in node_db.get_refs('profile'):
1987 prof_db = node_db.lookup(prof_uuid)
1988 # refactor this into a funtion to test "clientness"
1990 for ref_class, ref_uuid in prof_db.get_all_refs():
1991 if ref_class in ('mountpoint','echoclient'):
1992 debug("recording", client_name)
1993 old_noexec = config.noexec
1995 noexec_opt = ('', '-n')
1996 ret, out = run (sys.argv[0],
1997 noexec_opt[old_noexec == 1],
1998 " -v --record --nomod",
1999 "--record_log", client_name,
2000 "--record_device", self.name,
2001 "--node", client_name,
2004 for s in out: log("record> ", string.strip(s))
2005 ret, out = run (sys.argv[0],
2006 noexec_opt[old_noexec == 1],
2007 "--cleanup -v --record --nomod",
2008 "--record_log", client_name + "-clean",
2009 "--record_device", self.name,
2010 "--node", client_name,
2013 for s in out: log("record> ", string.strip(s))
2014 config.noexec = old_noexec
2018 lctl.start(self.name, self.conf_name)
2019 except CommandError, e:
2021 if self.target.get_class() == 'ost':
2022 if not is_prepared('OSS'):
2024 lctl.start(self.name, 'OSS')
2025 except CommandError, e:
2029 if is_prepared(self.name):
2031 lctl.cleanup(self.name, self.uuid, 0, 0)
2032 clean_dev(self.devpath, self.fstype,
2033 self.backfstype, self.backdevpath)
2034 except CommandError, e:
2035 log(self.module_name, "cleanup failed: ", self.name)
2038 Module.cleanup(self)
2040 class MDSDEV(Module):
2041 def __init__(self,db):
2042 Module.__init__(self, 'MDSDEV', db)
2043 self.devpath = self.db.get_val('devpath','')
2044 self.backdevpath = self.db.get_val('devpath','')
2045 self.size = self.db.get_val_int('devsize', 0)
2046 self.journal_size = self.db.get_val_int('journalsize', 0)
2047 self.fstype = self.db.get_val('fstype', '')
2048 self.backfstype = self.db.get_val('backfstype', '')
2049 self.nspath = self.db.get_val('nspath', '')
2050 self.mkfsoptions = self.db.get_val('mkfsoptions', '')
2051 self.mountfsoptions = self.db.get_val('mountfsoptions', '')
2052 self.obdtype = self.db.get_val('obdtype', '')
2053 self.root_squash = self.db.get_val('root_squash', '')
2054 self.no_root_squash = self.db.get_val('no_root_squash', '')
2056 target_uuid = self.db.get_first_ref('target')
2057 self.target = self.db.lookup(target_uuid)
2058 self.name = self.target.getName()
2062 lmv_uuid = self.db.get_first_ref('lmv')
2063 if lmv_uuid != None:
2064 self.lmv = self.db.lookup(lmv_uuid)
2066 active_uuid = get_active_target(self.target)
2068 panic("No target device found:", target_uuid)
2069 if active_uuid == self.uuid:
2071 group = self.target.get_val('group')
2072 if config.group and config.group != group:
2077 self.uuid = target_uuid
2080 if self.lmv != None:
2081 client_uuid = self.name + "_lmv_UUID"
2082 self.master = LMV(self.lmv, client_uuid,
2083 self.name, self.name)
2085 self.confobd = CONFDEV(self.db, self.name,
2086 target_uuid, self.uuid)
2088 def add_module(self, manager):
2090 manager.add_lustre_module('mdc', 'mdc')
2091 manager.add_lustre_module('osc', 'osc')
2092 manager.add_lustre_module('ost', 'ost')
2093 manager.add_lustre_module('lov', 'lov')
2094 manager.add_lustre_module('mds', 'mds')
2096 if self.fstype == 'smfs' or self.fstype == 'ldiskfs':
2097 manager.add_lustre_module(self.fstype, self.fstype)
2100 manager.add_lustre_module('lvfs', 'fsfilt_%s' % (self.fstype))
2102 # if fstype is smfs, then we should also take care about backing
2104 if self.fstype == 'smfs':
2105 manager.add_lustre_module(self.backfstype, self.backfstype)
2106 manager.add_lustre_module('lvfs', 'fsfilt_%s' % (self.backfstype))
2108 for option in string.split(self.mountfsoptions, ','):
2109 if option == 'snap':
2110 if not self.fstype == 'smfs':
2111 panic("mountoptions has 'snap', but fstype is not smfs.")
2112 manager.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.fstype))
2113 manager.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.backfstype))
2116 if self.master != None:
2117 self.master.add_module(manager)
2119 # add CONFOBD modules
2120 if self.confobd != None:
2121 self.confobd.add_module(manager)
2123 def write_conf(self):
2124 if is_prepared(self.name):
2127 debug(self.uuid, "not active")
2130 self.confobd.prepare()
2131 self.confobd.write_conf()
2132 self.confobd.cleanup()
2135 if is_prepared(self.name):
2138 debug(self.uuid, "not active")
2142 self.confobd.prepare()
2144 self.confobd.write_conf()
2147 if self.master != None:
2148 self.master.prepare()
2150 if not config.record:
2151 self.confobd.start()
2153 if not is_prepared('MDT'):
2154 lctl.newdev("mdt", 'MDT', 'MDT_UUID', setup ="")
2156 if development_mode():
2157 procentry = "/proc/fs/lustre/mds/lsd_upcall"
2158 upcall = os.path.abspath(os.path.dirname(sys.argv[0]) + "/lsd_upcall")
2159 if not (os.access(procentry, os.R_OK) and os.access(upcall, os.R_OK)):
2160 print "MDS Warning: failed to set lsd cache upcall"
2162 run("echo ", upcall, " > ", procentry)
2164 if config.root_squash == None:
2165 config.root_squash = self.root_squash
2166 if config.no_root_squash == None:
2167 config.no_root_squash = self.no_root_squash
2168 if config.root_squash:
2169 if config.no_root_squash:
2170 nsnid = config.no_root_squash
2173 lctl.root_squash(self.name, config.root_squash, nsnid)
2175 def msd_remaining(self):
2176 out = lctl.device_list()
2178 if string.split(s)[2] in ('mds',):
2181 def safe_to_clean(self):
2184 def safe_to_clean_modules(self):
2185 return not self.msd_remaining()
2189 debug(self.uuid, "not active")
2192 if is_prepared(self.name):
2194 lctl.cleanup(self.name, self.uuid, config.force,
2196 except CommandError, e:
2197 log(self.module_name, "cleanup failed: ", self.name)
2200 Module.cleanup(self)
2202 if self.master != None:
2203 self.master.cleanup()
2204 if not self.msd_remaining() and is_prepared('MDT'):
2206 lctl.cleanup("MDT", "MDT_UUID", config.force,
2208 except CommandError, e:
2209 print "cleanup failed: ", self.name
2214 self.confobd.cleanup()
2216 def correct_level(self, level, op=None):
2217 #if self.master != None:
2222 def __init__(self, db):
2223 Module.__init__(self, 'OSD', db)
2224 self.osdtype = self.db.get_val('osdtype')
2225 self.devpath = self.db.get_val('devpath', '')
2226 self.backdevpath = self.db.get_val('devpath', '')
2227 self.size = self.db.get_val_int('devsize', 0)
2228 self.journal_size = self.db.get_val_int('journalsize', 0)
2229 self.inode_size = self.db.get_val_int('inodesize', 0)
2230 self.mkfsoptions = self.db.get_val('mkfsoptions', '')
2231 self.mountfsoptions = self.db.get_val('mountfsoptions', '')
2232 self.fstype = self.db.get_val('fstype', '')
2233 self.backfstype = self.db.get_val('backfstype', '')
2234 self.nspath = self.db.get_val('nspath', '')
2235 target_uuid = self.db.get_first_ref('target')
2236 ost = self.db.lookup(target_uuid)
2237 self.name = ost.getName()
2238 self.format = self.db.get_val('autoformat', 'yes')
2239 if ost.get_val('failover', 0):
2240 self.failover_ost = 'f'
2242 self.failover_ost = 'n'
2244 self.deny_sec = self.db.get_val('deny_sec', '')
2246 if config.ost_deny_sec:
2248 self.deny_sec = "%s,%s" %(self.deny_sec, config.ost_deny_sec)
2250 self.deny_sec = config.ost_deny_sec
2252 active_uuid = get_active_target(ost)
2254 panic("No target device found:", target_uuid)
2255 if active_uuid == self.uuid:
2257 group = ost.get_val('group')
2258 if config.group and config.group != group:
2263 self.uuid = target_uuid
2264 self.confobd = CONFDEV(self.db, self.name,
2265 target_uuid, self.uuid)
2267 def add_module(self, manager):
2270 manager.add_lustre_module('ost', 'ost')
2272 if self.fstype == 'smfs' or self.fstype == 'ldiskfs':
2273 manager.add_lustre_module(self.fstype, self.fstype)
2276 manager.add_lustre_module('lvfs' , 'fsfilt_%s' % (self.fstype))
2278 if self.fstype == 'smfs':
2279 manager.add_lustre_module(self.backfstype, self.backfstype)
2280 manager.add_lustre_module('lvfs' , 'fsfilt_%s' % (self.backfstype))
2282 for option in self.mountfsoptions:
2283 if option == 'snap':
2284 if not self.fstype == 'smfs':
2285 panic("mountoptions with snap, but fstype is not smfs\n")
2286 manager.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.fstype))
2287 manager.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.backfstype))
2289 manager.add_lustre_module(self.osdtype, self.osdtype)
2291 # add CONFOBD modules
2292 if self.confobd != None:
2293 self.confobd.add_module(manager)
2296 if is_prepared(self.name):
2299 debug(self.uuid, "not active")
2304 if self.osdtype == 'obdecho':
2305 self.info(self.osdtype)
2306 lctl.newdev("obdecho", self.name, self.uuid)
2307 if not is_prepared('OSS'):
2308 lctl.newdev("ost", 'OSS', 'OSS_UUID', setup="")
2310 self.confobd.prepare()
2312 self.confobd.write_conf()
2313 if not config.record:
2314 self.confobd.start()
2317 for flavor in string.split(self.deny_sec, ','):
2318 lctl.set_security(self.name, "deny_sec", flavor)
2320 def write_conf(self):
2321 if is_prepared(self.name):
2324 debug(self.uuid, "not active")
2328 if self.osdtype != 'obdecho':
2329 self.confobd.prepare()
2330 self.confobd.write_conf()
2331 if not config.write_conf:
2332 self.confobd.start()
2333 self.confobd.cleanup()
2335 def osd_remaining(self):
2336 out = lctl.device_list()
2338 if string.split(s)[2] in ('obdfilter', 'obdecho'):
2341 def safe_to_clean(self):
2344 def safe_to_clean_modules(self):
2345 return not self.osd_remaining()
2349 debug(self.uuid, "not active")
2352 if is_prepared(self.name):
2355 lctl.cleanup(self.name, self.uuid, config.force,
2357 except CommandError, e:
2358 log(self.module_name, "cleanup failed: ", self.name)
2361 if not self.osd_remaining() and is_prepared('OSS'):
2363 lctl.cleanup("OSS", "OSS_UUID", config.force,
2365 except CommandError, e:
2366 print "cleanup failed: ", self.name
2370 if self.osdtype != 'obdecho':
2372 self.confobd.cleanup()
2374 def correct_level(self, level, op=None):
2377 # Generic client module, used by OSC and MDC
2378 class Client(Module):
2379 def __init__(self, tgtdb, uuid, module, fs_name,
2380 self_name=None, module_dir=None):
2381 self.target_name = tgtdb.getName()
2382 self.target_uuid = tgtdb.getUUID()
2383 self.module_dir = module_dir
2384 self.backup_targets = []
2385 self.module = module
2388 self.tgt_dev_uuid = get_active_target(tgtdb)
2389 if not self.tgt_dev_uuid:
2390 panic("No target device found for target(1):", self.target_name)
2395 self.module = module
2396 self.module_name = string.upper(module)
2398 self.name = '%s_%s_%s_%s' % (self.module_name, socket.gethostname(),
2399 self.target_name, fs_name)
2401 self.name = self_name
2403 self.lookup_server(self.tgt_dev_uuid)
2404 self.lookup_backup_targets()
2405 self.fs_name = fs_name
2406 if not self.module_dir:
2407 self.module_dir = module
2409 def add_module(self, manager):
2410 manager.add_lustre_module(self.module_dir, self.module)
2412 def lookup_server(self, srv_uuid):
2413 """ Lookup a server's network information """
2414 self._server_nets = get_ost_net(self.db, srv_uuid)
2415 if len(self._server_nets) == 0:
2416 panic ("Unable to find a server for:", srv_uuid)
2421 def get_servers(self):
2422 return self._server_nets
2424 def lookup_backup_targets(self):
2425 """ Lookup alternative network information """
2426 prof_list = toplustreDB.get_refs('profile')
2427 for prof_uuid in prof_list:
2428 prof_db = toplustreDB.lookup(prof_uuid)
2430 panic("profile:", prof_uuid, "not found.")
2431 for ref_class, ref_uuid in prof_db.get_all_refs():
2432 if ref_class in ('osd', 'mdsdev'):
2433 devdb = toplustreDB.lookup(ref_uuid)
2434 uuid = devdb.get_first_ref('target')
2435 if self.target_uuid == uuid and self.tgt_dev_uuid != ref_uuid:
2436 self.backup_targets.append(ref_uuid)
2438 def prepare(self, ignore_connect_failure = 0):
2439 self.info(self.target_uuid)
2440 if not config.record and is_prepared(self.name):
2443 srv = choose_local_server(self.get_servers())
2447 routes = find_route(self.get_servers())
2448 if len(routes) == 0:
2449 panic ("no route to", self.target_uuid)
2450 for (srv, r) in routes:
2451 lctl.add_route_host(r[0], srv.nid_uuid, r[1], r[3])
2452 except CommandError, e:
2453 if not ignore_connect_failure:
2457 if self.target_uuid in config.inactive and self.permits_inactive():
2458 debug("%s inactive" % self.target_uuid)
2459 inactive_p = "inactive"
2461 debug("%s active" % self.target_uuid)
2463 lctl.newdev(self.module, self.name, self.uuid,
2464 setup ="%s %s %s" % (self.target_uuid, srv.nid_uuid,
2466 for tgt_dev_uuid in self.backup_targets:
2467 this_nets = get_ost_net(toplustreDB, tgt_dev_uuid)
2468 if len(this_nets) == 0:
2469 panic ("Unable to find a server for:", tgt_dev_uuid)
2470 srv = choose_local_server(this_nets)
2474 routes = find_route(this_nets);
2475 if len(routes) == 0:
2476 panic("no route to", tgt_dev_uuid)
2477 for (srv, r) in routes:
2478 lctl.add_route_host(r[0]. srv.nid_uuid, r[1], r[3])
2480 lctl.add_conn(self.name, srv.nid_uuid);
2483 if is_prepared(self.name):
2484 Module.cleanup(self)
2486 srv = choose_local_server(self.get_servers())
2488 lctl.disconnect(srv)
2490 for (srv, r) in find_route(self.get_servers()):
2491 lctl.del_route_host(r[0], srv.nid_uuid, r[1], r[3])
2492 except CommandError, e:
2493 log(self.module_name, "cleanup failed: ", self.name)
2497 for tgt_dev_uuid in self.backup_targets:
2498 this_net = get_ost_net(toplustreDB, tgt_dev_uuid)
2499 srv = choose_local_server(this_net)
2501 lctl.disconnect(srv)
2503 for (srv, r) in find_route(this_net):
2504 lctl.del_route_host(r[0]. srv.nid_uuid, r[1], r[3])
2506 def correct_level(self, level, op=None):
2509 def deactivate(self):
2511 lctl.deactivate(self.name)
2512 except CommandError, e:
2513 log(self.module_name, "deactivate failed: ", self.name)
2518 def __init__(self, db, uuid, fs_name):
2519 Client.__init__(self, db, uuid, 'mdc', fs_name)
2521 def permits_inactive(self):
2525 def __init__(self, db, uuid, fs_name):
2526 Client.__init__(self, db, uuid, 'osc', fs_name)
2528 def permits_inactive(self):
2531 class CMOBD(Module):
2532 def __init__(self, db):
2533 Module.__init__(self, 'CMOBD', db)
2534 self.name = self.db.getName();
2535 self.uuid = generate_client_uuid(self.name)
2536 self.master_uuid = self.db.get_first_ref('masterobd')
2537 self.cache_uuid = self.db.get_first_ref('cacheobd')
2539 master_obd = self.db.lookup(self.master_uuid)
2541 panic('master obd not found:', self.master_uuid)
2543 cache_obd = self.db.lookup(self.cache_uuid)
2545 panic('cache obd not found:', self.cache_uuid)
2550 master_class = master_obd.get_class()
2551 cache_class = cache_obd.get_class()
2553 if master_class == 'ost' or master_class == 'lov':
2554 client_uuid = "%s_lov_master_UUID" % (self.name)
2555 self.master = LOV(master_obd, client_uuid, self.name);
2556 elif master_class == 'mds':
2557 self.master = get_mdc(db, self.name, self.master_uuid)
2558 elif master_class == 'lmv':
2559 #tmp fix: cobd and cmobd will use same uuid, so use const name here
2560 client_uuid = "%s_lmv_master_UUID" % "master"
2561 self.master = LMV(master_obd, client_uuid, self.name);
2563 panic("unknown master obd class '%s'" %(master_class))
2565 if cache_class == 'ost' or cache_class == 'lov':
2566 client_uuid = "%s_lov_cache_UUID" % (self.name)
2567 self.cache = LOV(cache_obd, client_uuid, self.name);
2568 elif cache_class == 'mds':
2569 self.cache = get_mdc(db, self.name, self.cache_uuid)
2570 elif cache_class == 'lmv':
2571 client_uuid = "%s_lmv_cache_UUID" % (self.name)
2572 self.cache = LMV(cache_obd, client_uuid, self.name);
2574 panic("unknown cache obd class '%s'" %(cache_class))
2577 self.master.prepare()
2578 if not config.record and is_prepared(self.name):
2580 self.info(self.master_uuid, self.cache_uuid)
2581 lctl.newdev("cmobd", self.name, self.uuid,
2582 setup ="%s %s" %(self.master.uuid,
2591 def get_master_name(self):
2592 return self.master.name
2594 def get_cache_name(self):
2595 return self.cache.name
2598 if is_prepared(self.name):
2599 Module.cleanup(self)
2601 self.master.cleanup()
2603 def add_module(self, manager):
2604 manager.add_lustre_module('smfs', 'smfs')
2605 manager.add_lustre_module('cmobd', 'cmobd')
2606 self.master.add_module(manager)
2608 def correct_level(self, level, op=None):
2612 def __init__(self, db, uuid, name):
2613 Module.__init__(self, 'COBD', db)
2614 self.name = self.db.getName();
2615 self.uuid = generate_client_uuid(self.name)
2616 self.master_uuid = self.db.get_first_ref('masterobd')
2617 self.cache_uuid = self.db.get_first_ref('cacheobd')
2619 master_obd = self.db.lookup(self.master_uuid)
2621 panic('master obd not found:', self.master_uuid)
2623 cache_obd = self.db.lookup(self.cache_uuid)
2625 panic('cache obd not found:', self.cache_uuid)
2630 master_class = master_obd.get_class()
2631 cache_class = cache_obd.get_class()
2633 if master_class == 'ost' or master_class == 'lov':
2634 client_uuid = "%s_lov_master_UUID" % (self.name)
2635 self.master = LOV(master_obd, client_uuid, name);
2636 elif master_class == 'mds':
2637 self.master = get_mdc(db, name, self.master_uuid)
2638 elif master_class == 'lmv':
2639 #tmp fix: cobd and cmobd will use same uuid, so use const name here
2640 client_uuid = "%s_lmv_master_UUID" % "master"
2641 self.master = LMV(master_obd, client_uuid, self.name);
2643 panic("unknown master obd class '%s'" %(master_class))
2645 if cache_class == 'ost' or cache_class == 'lov':
2646 client_uuid = "%s_lov_cache_UUID" % (self.name)
2647 self.cache = LOV(cache_obd, client_uuid, name);
2648 elif cache_class == 'mds':
2649 self.cache = get_mdc(db, name, self.cache_uuid)
2650 elif cache_class == 'lmv':
2651 client_uuid = "%s_lmv_cache_UUID" % "cache"
2652 self.cache = LMV(cache_obd, client_uuid, self.name);
2654 panic("unknown cache obd class '%s'" %(cache_class))
2662 def get_master_name(self):
2663 return self.master.name
2665 def get_cache_name(self):
2666 return self.cache.name
2669 if not config.record and is_prepared(self.name):
2671 self.master.prepare()
2672 self.cache.prepare()
2673 self.info(self.master_uuid, self.cache_uuid)
2674 lctl.newdev("cobd", self.name, self.uuid,
2675 setup ="%s %s" %(self.master.name,
2679 if is_prepared(self.name):
2680 Module.cleanup(self)
2681 self.master.cleanup()
2682 self.cache.cleanup()
2684 def add_module(self, manager):
2685 manager.add_lustre_module('cobd', 'cobd')
2686 self.master.add_module(manager)
2688 # virtual interface for OSC and LOV
2690 def __init__(self, db, client_uuid, name, name_override = None):
2691 Module.__init__(self, 'VOSC', db)
2692 if db.get_class() == 'lov':
2693 self.osc = LOV(db, client_uuid, name, name_override)
2695 elif db.get_class() == 'cobd':
2696 self.osc = COBD(db, client_uuid, name)
2699 self.osc = OSC(db, client_uuid, name)
2703 return self.osc.get_uuid()
2706 return self.osc.get_name()
2714 def add_module(self, manager):
2715 self.osc.add_module(manager)
2717 def correct_level(self, level, op=None):
2718 return self.osc.correct_level(level, op)
2720 # virtual interface for MDC and LMV
2722 def __init__(self, db, client_uuid, name, name_override = None):
2723 Module.__init__(self, 'VMDC', db)
2724 if db.get_class() == 'lmv':
2725 self.mdc = LMV(db, client_uuid, name, name_override)
2726 elif db.get_class() == 'cobd':
2727 self.mdc = COBD(db, client_uuid, name)
2729 self.mdc = MDC(db, client_uuid, name)
2732 return self.mdc.uuid
2735 return self.mdc.name
2743 def add_module(self, manager):
2744 self.mdc.add_module(manager)
2746 def correct_level(self, level, op=None):
2747 return self.mdc.correct_level(level, op)
2749 class ECHO_CLIENT(Module):
2750 def __init__(self,db):
2751 Module.__init__(self, 'ECHO_CLIENT', db)
2752 self.obd_uuid = self.db.get_first_ref('obd')
2753 obd = self.db.lookup(self.obd_uuid)
2754 self.uuid = generate_client_uuid(self.name)
2755 self.osc = VOSC(obd, self.uuid, self.name)
2758 if not config.record and is_prepared(self.name):
2761 self.osc.prepare() # XXX This is so cheating. -p
2762 self.info(self.obd_uuid)
2764 lctl.newdev("echo_client", self.name, self.uuid,
2765 setup = self.osc.get_name())
2768 if is_prepared(self.name):
2769 Module.cleanup(self)
2772 def add_module(self, manager):
2773 self.osc.add_module(manager)
2774 manager.add_lustre_module('obdecho', 'obdecho')
2776 def correct_level(self, level, op=None):
2779 def generate_client_uuid(name):
2780 client_uuid = '%05x_%.19s_%05x%05x' % (int(random.random() * 1048576),
2782 int(random.random() * 1048576),
2783 int(random.random() * 1048576))
2784 return client_uuid[:36]
2786 class Mountpoint(Module):
2787 def __init__(self,db):
2788 Module.__init__(self, 'MTPT', db)
2789 self.path = self.db.get_val('path')
2790 self.clientoptions = self.db.get_val('clientoptions', '')
2791 self.fs_uuid = self.db.get_first_ref('filesystem')
2792 fs = self.db.lookup(self.fs_uuid)
2793 self.mds_uuid = fs.get_first_ref('lmv')
2794 if not self.mds_uuid:
2795 self.mds_uuid = fs.get_first_ref('mds')
2796 self.obd_uuid = fs.get_first_ref('obd')
2797 client_uuid = generate_client_uuid(self.name)
2799 self.oss_sec = self.db.get_val('oss_sec','null')
2800 self.mds_sec = self.db.get_val('mds_sec','null')
2802 self.mds_sec = config.mds_sec
2804 self.oss_sec = config.oss_sec
2806 ost = self.db.lookup(self.obd_uuid)
2808 panic("no ost: ", self.obd_uuid)
2810 mds = self.db.lookup(self.mds_uuid)
2812 panic("no mds: ", self.mds_uuid)
2814 self.vosc = VOSC(ost, client_uuid, self.name, self.name)
2815 self.vmdc = VMDC(mds, client_uuid, self.name, self.name)
2818 if not config.record and fs_is_mounted(self.path):
2819 log(self.path, "already mounted.")
2826 self.info(self.path, self.mds_uuid, self.obd_uuid)
2827 if config.record or config.lctl_dump:
2828 lctl.mount_option(local_node_name, self.vosc.get_name(),
2829 self.vmdc.get_name())
2832 if config.clientoptions:
2833 if self.clientoptions:
2834 self.clientoptions = self.clientoptions + ',' + config.clientoptions
2836 self.clientoptions = config.clientoptions
2837 if self.clientoptions:
2838 self.clientoptions = ',' + self.clientoptions
2839 # Linux kernel will deal with async and not pass it to ll_fill_super,
2840 # so replace it with Lustre async
2841 self.clientoptions = string.replace(self.clientoptions, "async", "lasync")
2843 cmd = "mount -t lustre_lite -o osc=%s,mdc=%s,mds_sec=%s,oss_sec=%s%s %s %s" % \
2844 (self.vosc.get_name(), self.vmdc.get_name(), self.mds_sec,
2845 self.oss_sec, self.clientoptions, config.config, self.path)
2846 run("mkdir", self.path)
2851 panic("mount failed:", self.path, ":", string.join(val))
2854 self.info(self.path, self.mds_uuid,self.obd_uuid)
2856 if config.record or config.lctl_dump:
2857 lctl.del_mount_option(local_node_name)
2859 if fs_is_mounted(self.path):
2861 (rc, out) = run("umount", "-f", self.path)
2863 (rc, out) = run("umount", self.path)
2865 raise CommandError('umount', out, rc)
2867 if fs_is_mounted(self.path):
2868 panic("fs is still mounted:", self.path)
2873 def add_module(self, manager):
2874 self.vosc.add_module(manager)
2875 self.vmdc.add_module(manager)
2876 manager.add_lustre_module('llite', 'llite')
2878 def correct_level(self, level, op=None):
2881 # ============================================================
2882 # misc query functions
2884 def get_ost_net(self, osd_uuid):
2888 osd = self.lookup(osd_uuid)
2889 node_uuid = osd.get_first_ref('node')
2890 node = self.lookup(node_uuid)
2892 panic("unable to find node for osd_uuid:", osd_uuid,
2893 " node_ref:", node_uuid_)
2894 for net_uuid in node.get_networks():
2895 db = node.lookup(net_uuid)
2896 srv_list.append(Network(db))
2899 # the order of iniitailization is based on level.
2900 def getServiceLevel(self):
2901 type = self.get_class()
2903 if type in ('network',):
2905 elif type in ('routetbl',):
2907 elif type in ('ldlm',):
2909 elif type in ('osd',):
2911 elif type in ('mdsdev',):
2913 elif type in ('lmv',):
2915 elif type in ('cmobd', 'cobd',):
2917 elif type in ('mountpoint', 'echoclient'):
2920 panic("Unknown type: ", type)
2922 if ret < config.minlevel or ret > config.maxlevel:
2927 # return list of services in a profile. list is a list of tuples
2928 # [(level, db_object),]
2929 def getServices(self):
2931 for ref_class, ref_uuid in self.get_all_refs():
2932 servdb = self.lookup(ref_uuid)
2934 level = getServiceLevel(servdb)
2936 list.append((level, servdb))
2938 panic('service not found: ' + ref_uuid)
2944 ############################################################
2946 # FIXME: clean this mess up!
2948 # OSC is no longer in the xml, so we have to fake it.
2949 # this is getting ugly and begging for another refactoring
2950 def get_osc(ost_db, uuid, fs_name):
2951 osc = OSC(ost_db, uuid, fs_name)
2954 def get_mdc(db, fs_name, mds_uuid):
2955 mds_db = db.lookup(mds_uuid);
2957 error("no mds:", mds_uuid)
2958 mdc = MDC(mds_db, mds_uuid, fs_name)
2961 ############################################################
2962 # routing ("rooting")
2964 # list of (nettype, cluster_id, nid)
2967 def find_local_clusters(node_db):
2968 global local_clusters
2969 for netuuid in node_db.get_networks():
2970 net = node_db.lookup(netuuid)
2972 debug("add_local", netuuid)
2973 local_clusters.append((srv.net_type, srv.cluster_id, srv.nid))
2975 if not acceptors.has_key(srv.port):
2976 acceptors[srv.port] = AcceptorHandler(srv.port, srv.net_type)
2978 # This node is a gateway.
2980 def node_is_router():
2983 # If there are any routers found in the config, then this will be true
2984 # and all nodes will load kptlrouter.
2986 def node_needs_router():
2987 return needs_router or is_router
2989 # list of (nettype, gw, tgt_cluster_id, lo, hi)
2990 # Currently, these local routes are only added to kptlrouter route
2991 # table if they are needed to connect to a specific server. This
2992 # should be changed so all available routes are loaded, and the
2993 # ptlrouter can make all the decisions.
2996 def find_local_routes(lustre):
2997 """ Scan the lustre config looking for routers . Build list of
2999 global local_routes, needs_router
3001 list = lustre.lookup_class('node')
3003 if router.get_val_int('router', 0):
3005 for (local_type, local_cluster_id, local_nid) in local_clusters:
3007 for netuuid in router.get_networks():
3008 db = router.lookup(netuuid)
3009 if (local_type == db.get_val('nettype') and
3010 local_cluster_id == db.get_val('clusterid')):
3011 gw = db.get_val('nid')
3014 debug("find_local_routes: gw is", gw)
3015 for route in router.get_local_routes(local_type, gw):
3016 local_routes.append(route)
3017 debug("find_local_routes:", local_routes)
3020 def choose_local_server(srv_list):
3021 for srv in srv_list:
3022 if local_cluster(srv.net_type, srv.cluster_id):
3025 def local_cluster(net_type, cluster_id):
3026 for cluster in local_clusters:
3027 if net_type == cluster[0] and cluster_id == cluster[1]:
3031 def local_interface(net_type, cluster_id, nid):
3032 for cluster in local_clusters:
3033 if (net_type == cluster[0] and cluster_id == cluster[1]
3034 and nid == cluster[2]):
3038 def find_route(srv_list):
3040 frm_type = local_clusters[0][0]
3041 for srv in srv_list:
3042 debug("find_route: srv:", srv.nid, "type: ", srv.net_type)
3043 to_type = srv.net_type
3045 cluster_id = srv.cluster_id
3046 debug ('looking for route to', to_type, to)
3047 for r in local_routes:
3048 debug("find_route: ", r)
3049 if (r[3] <= to and to <= r[4]) and cluster_id == r[2]:
3050 result.append((srv, r))
3053 def get_active_target(db):
3054 target_uuid = db.getUUID()
3055 target_name = db.getName()
3056 node_name = get_select(target_name)
3058 tgt_dev_uuid = db.get_node_tgt_dev(node_name, target_uuid)
3060 tgt_dev_uuid = db.get_first_ref('active')
3063 def get_server_by_nid_uuid(db, nid_uuid):
3064 for n in db.lookup_class("network"):
3066 if net.nid_uuid == nid_uuid:
3070 ############################################################
3074 type = db.get_class()
3075 debug('Service:', type, db.getName(), db.getUUID())
3080 n = LOV(db, "YOU_SHOULD_NEVER_SEE_THIS_UUID")
3081 elif type == 'network':
3083 elif type == 'routetbl':
3087 elif type == 'cobd':
3088 n = COBD(db, "YOU_SHOULD_NEVER_SEE_THIS_UUID")
3089 elif type == 'cmobd':
3091 elif type == 'mdsdev':
3093 elif type == 'mountpoint':
3095 elif type == 'echoclient':
3100 panic ("unknown service type:", type)
3104 # Prepare the system to run lustre using a particular profile
3105 # in a the configuration.
3106 # * load & the modules
3107 # * setup networking for the current node
3108 # * make sure partitions are in place and prepared
3109 # * initialize devices with lctl
3110 # Levels is important, and needs to be enforced.
3111 def for_each_profile(db, prof_list, operation):
3112 for prof_uuid in prof_list:
3113 prof_db = db.lookup(prof_uuid)
3115 panic("profile:", prof_uuid, "not found.")
3116 services = getServices(prof_db)
3119 def magic_get_osc(db, rec, lov):
3121 lov_uuid = lov.get_uuid()
3122 lov_name = lov.osc.fs_name
3124 lov_uuid = rec.getAttribute('lov_uuidref')
3125 # FIXME: better way to find the mountpoint?
3126 filesystems = db.root_node.getElementsByTagName('filesystem')
3128 for fs in filesystems:
3129 ref = fs.getElementsByTagName('obd_ref')
3130 if ref[0].getAttribute('uuidref') == lov_uuid:
3131 fsuuid = fs.getAttribute('uuid')
3135 panic("malformed xml: lov uuid '" + lov_uuid + "' referenced in 'add' record is not used by any filesystems.")
3137 mtpts = db.root_node.getElementsByTagName('mountpoint')
3140 ref = fs.getElementsByTagName('filesystem_ref')
3141 if ref[0].getAttribute('uuidref') == fsuuid:
3142 lov_name = fs.getAttribute('name')
3146 panic("malformed xml: 'add' record references lov uuid '" + lov_uuid + "', which references filesystem uuid '" + fsuuid + "', which does not reference a mountpoint.")
3148 print "lov_uuid: " + lov_uuid + "; lov_name: " + lov_name
3150 ost_uuid = rec.getAttribute('ost_uuidref')
3151 obd = db.lookup(ost_uuid)
3154 panic("malformed xml: 'add' record references ost uuid '" + ost_uuid + "' which cannot be found.")
3156 osc = get_osc(obd, lov_uuid, lov_name)
3158 panic('osc not found:', obd_uuid)
3161 # write logs for update records. sadly, logs of all types -- and updates in
3162 # particular -- are something of an afterthought. lconf needs rewritten with
3163 # these as core concepts. so this is a pretty big hack.
3164 def process_update_record(db, update, lov):
3165 for rec in update.childNodes:
3166 if rec.nodeType != rec.ELEMENT_NODE:
3169 log("found "+rec.nodeName+" record in update version " +
3170 str(update.getAttribute('version')))
3172 lov_uuid = rec.getAttribute('lov_uuidref')
3173 ost_uuid = rec.getAttribute('ost_uuidref')
3174 index = rec.getAttribute('index')
3175 gen = rec.getAttribute('generation')
3177 if not lov_uuid or not ost_uuid or not index or not gen:
3178 panic("malformed xml: 'update' record requires lov_uuid, ost_uuid, index, and generation.")
3181 tmplov = db.lookup(lov_uuid)
3183 panic("malformed xml: 'delete' record contains lov UUID '" + lov_uuid + "', which cannot be located.")
3184 lov_name = tmplov.getName()
3186 lov_name = lov.osc.name
3188 # ------------------------------------------------------------- add
3189 if rec.nodeName == 'add':
3191 lctl.lov_del_obd(lov_name, lov_uuid, ost_uuid, index, gen)
3194 osc = magic_get_osc(db, rec, lov)
3197 # Only ignore connect failures with --force, which
3198 # isn't implemented here yet.
3199 osc.prepare(ignore_connect_failure=0)
3200 except CommandError, e:
3201 print "Error preparing OSC %s\n" % osc.uuid
3204 lctl.lov_add_obd(lov_name, lov_uuid, ost_uuid, index, gen)
3206 # ------------------------------------------------------ deactivate
3207 elif rec.nodeName == 'deactivate':
3211 osc = magic_get_osc(db, rec, lov)
3215 except CommandError, e:
3216 print "Error deactivating OSC %s\n" % osc.uuid
3219 # ---------------------------------------------------------- delete
3220 elif rec.nodeName == 'delete':
3224 osc = magic_get_osc(db, rec, lov)
3230 except CommandError, e:
3231 print "Error cleaning up OSC %s\n" % osc.uuid
3234 lctl.lov_del_obd(lov_name, lov_uuid, ost_uuid, index, gen)
3236 def process_updates(db, log_device, log_name, lov = None):
3237 updates = db.root_node.getElementsByTagName('update')
3239 if not u.childNodes:
3240 log("ignoring empty update record (version " +
3241 str(u.getAttribute('version')) + ")")
3244 version = u.getAttribute('version')
3245 real_name = "%s-%s" % (log_name, version)
3246 lctl.clear_log(log_device, real_name)
3247 lctl.record(log_device, real_name)
3249 process_update_record(db, u, lov)
3253 def doWriteconf(services):
3257 if s[1].get_class() == 'mdsdev' or s[1].get_class() == 'osd':
3258 n = newService(s[1])
3262 def doSetup(services):
3267 n = newService(s[1])
3269 slist.append((n.level, n))
3272 nl = n[1].correct_level(n[0])
3273 nlist.append((nl, n[1]))
3278 def doLoadModules(services):
3282 # adding all needed modules from all services
3284 n = newService(s[1])
3285 n.add_module(mod_manager)
3287 # loading all registered modules
3288 mod_manager.load_modules()
3290 def doUnloadModules(services):
3294 # adding all needed modules from all services
3296 n = newService(s[1])
3297 if n.safe_to_clean_modules():
3298 n.add_module(mod_manager)
3300 # unloading all registered modules
3301 mod_manager.cleanup_modules()
3303 def doCleanup(services):
3309 n = newService(s[1])
3311 slist.append((n.level, n))
3314 nl = n[1].correct_level(n[0])
3315 nlist.append((nl, n[1]))
3320 if n[1].safe_to_clean():
3325 def doHost(lustreDB, hosts):
3326 global is_router, local_node_name
3329 node_db = lustreDB.lookup_name(h, 'node')
3333 panic('No host entry found.')
3335 local_node_name = node_db.get_val('name', 0)
3336 is_router = node_db.get_val_int('router', 0)
3337 lustre_upcall = node_db.get_val('lustreUpcall', '')
3338 portals_upcall = node_db.get_val('portalsUpcall', '')
3339 timeout = node_db.get_val_int('timeout', 0)
3340 ptldebug = node_db.get_val('ptldebug', '')
3341 subsystem = node_db.get_val('subsystem', '')
3343 find_local_clusters(node_db)
3345 find_local_routes(lustreDB)
3347 # Two step process: (1) load modules, (2) setup lustre
3348 # if not cleaning, load modules first.
3349 prof_list = node_db.get_refs('profile')
3351 if config.write_conf:
3352 for_each_profile(node_db, prof_list, doLoadModules)
3354 for_each_profile(node_db, prof_list, doWriteconf)
3355 for_each_profile(node_db, prof_list, doUnloadModules)
3358 elif config.recover:
3359 if not (config.tgt_uuid and config.client_uuid and config.conn_uuid):
3360 raise Lustre.LconfError( "--recovery requires --tgt_uuid <UUID> " +
3361 "--client_uuid <UUID> --conn_uuid <UUID>")
3362 doRecovery(lustreDB, lctl, config.tgt_uuid, config.client_uuid,
3364 elif config.cleanup:
3366 # the command line can override this value
3368 # ugly hack, only need to run lctl commands for --dump
3369 if config.lctl_dump or config.record:
3370 for_each_profile(node_db, prof_list, doCleanup)
3373 sys_set_timeout(timeout)
3374 sys_set_ptldebug(ptldebug)
3375 sys_set_subsystem(subsystem)
3376 sys_set_lustre_upcall(lustre_upcall)
3377 sys_set_portals_upcall(portals_upcall)
3379 for_each_profile(node_db, prof_list, doCleanup)
3380 for_each_profile(node_db, prof_list, doUnloadModules)
3384 # ugly hack, only need to run lctl commands for --dump
3385 if config.lctl_dump or config.record:
3386 sys_set_timeout(timeout)
3387 sys_set_lustre_upcall(lustre_upcall)
3388 for_each_profile(node_db, prof_list, doSetup)
3392 sys_set_netmem_max('/proc/sys/net/core/rmem_max', MAXTCPBUF)
3393 sys_set_netmem_max('/proc/sys/net/core/wmem_max', MAXTCPBUF)
3395 for_each_profile(node_db, prof_list, doLoadModules)
3397 sys_set_debug_path()
3398 sys_set_ptldebug(ptldebug)
3399 sys_set_subsystem(subsystem)
3400 script = config.gdb_script
3401 run(lctl.lctl, ' modules >', script)
3403 log ("The GDB module script is in", script)
3404 # pause, so user has time to break and
3407 sys_set_timeout(timeout)
3408 sys_set_lustre_upcall(lustre_upcall)
3409 sys_set_portals_upcall(portals_upcall)
3411 for_each_profile(node_db, prof_list, doSetup)
3414 def doRecovery(lustreDB, lctl, tgt_uuid, client_uuid, nid_uuid):
3415 tgt = lustreDB.lookup(tgt_uuid)
3417 raise Lustre.LconfError("doRecovery: "+ tgt_uuid +" not found.")
3418 new_uuid = get_active_target(tgt)
3420 raise Lustre.LconfError("doRecovery: no active target found for: " +
3422 net = choose_local_server(get_ost_net(lustreDB, new_uuid))
3424 raise Lustre.LconfError("Unable to find a connection to:" + new_uuid)
3426 log("Reconnecting", tgt_uuid, " to ", net.nid_uuid);
3428 oldnet = get_server_by_nid_uuid(lustreDB, nid_uuid)
3431 lctl.disconnect(oldnet)
3432 except CommandError, e:
3433 log("recover: disconnect", nid_uuid, "failed: ")
3438 except CommandError, e:
3439 log("recover: connect failed")
3442 lctl.recover(client_uuid, net.nid_uuid)
3445 def setupModulePath(cmd, portals_dir = PORTALS_DIR):
3446 base = os.path.dirname(cmd)
3447 if development_mode():
3448 if not config.lustre:
3449 debug('using objdir module paths')
3450 config.lustre = (os.path.join(base, ".."))
3451 # normalize the portals dir, using command line arg if set
3453 portals_dir = config.portals
3454 dir = os.path.join(config.lustre, portals_dir)
3455 config.portals = dir
3456 debug('config.portals', config.portals)
3457 elif config.lustre and config.portals:
3459 # if --lustre and --portals, normalize portals
3460 # can ignore POTRALS_DIR here, since it is probly useless here
3461 config.portals = os.path.join(config.lustre, config.portals)
3462 debug('config.portals B', config.portals)
3464 def sysctl(path, val):
3465 debug("+ sysctl", path, val)
3469 fp = open(os.path.join('/proc/sys', path), 'w')
3476 def sys_set_debug_path():
3477 sysctl('portals/debug_path', config.debug_path)
3479 def sys_set_lustre_upcall(upcall):
3480 # the command overrides the value in the node config
3481 if config.lustre_upcall:
3482 upcall = config.lustre_upcall
3484 upcall = config.upcall
3486 lctl.set_lustre_upcall(upcall)
3488 def sys_set_portals_upcall(upcall):
3489 # the command overrides the value in the node config
3490 if config.portals_upcall:
3491 upcall = config.portals_upcall
3493 upcall = config.upcall
3495 sysctl('portals/upcall', upcall)
3497 def sys_set_timeout(timeout):
3498 # the command overrides the value in the node config
3499 if config.timeout and config.timeout > 0:
3500 timeout = config.timeout
3501 if timeout != None and timeout > 0:
3502 lctl.set_timeout(timeout)
3504 def sys_tweak_socknal ():
3505 # reserve at least 8MB, or we run out of RAM in skb_alloc under read
3506 if sys_get_branch() == '2.6':
3507 fp = open('/proc/meminfo')
3508 lines = fp.readlines()
3513 if a[0] == 'MemTotal:':
3515 debug("memtotal" + memtotal)
3516 if int(memtotal) < 262144:
3517 minfree = int(memtotal) / 16
3520 debug("+ minfree ", minfree)
3521 sysctl("vm/min_free_kbytes", minfree)
3522 if config.single_socket:
3523 sysctl("socknal/typed", 0)
3525 def sys_optimize_elan ():
3526 procfiles = ["/proc/elan/config/eventint_punt_loops",
3527 "/proc/qsnet/elan3/config/eventint_punt_loops",
3528 "/proc/qsnet/elan4/config/elan4_mainint_punt_loops"]
3530 if os.access(p, os.W_OK):
3531 run ("echo 1 > " + p)
3533 def sys_set_ptldebug(ptldebug):
3535 ptldebug = config.ptldebug
3538 val = eval(ptldebug, ptldebug_names)
3539 val = "0x%x" % (val & 0xffffffffL)
3540 sysctl('portals/debug', val)
3541 except NameError, e:
3544 def sys_set_subsystem(subsystem):
3545 if config.subsystem:
3546 subsystem = config.subsystem
3549 val = eval(subsystem, subsystem_names)
3550 val = "0x%x" % (val & 0xffffffffL)
3551 sysctl('portals/subsystem_debug', val)
3552 except NameError, e:
3555 def sys_set_netmem_max(path, max):
3556 debug("setting", path, "to at least", max)
3564 fp = open(path, 'w')
3565 fp.write('%d\n' %(max))
3568 def sys_make_devices():
3569 if not os.access('/dev/portals', os.R_OK):
3570 run('mknod /dev/portals c 10 240')
3571 if not os.access('/dev/obd', os.R_OK):
3572 run('mknod /dev/obd c 10 241')
3574 # Add dir to the global PATH, if not already there.
3575 def add_to_path(new_dir):
3576 syspath = string.split(os.environ['PATH'], ':')
3577 if new_dir in syspath:
3579 os.environ['PATH'] = os.environ['PATH'] + ':' + new_dir
3581 def default_debug_path():
3582 path = '/tmp/lustre-log'
3583 if os.path.isdir('/r'):
3588 def default_gdb_script():
3589 script = '/tmp/ogdb'
3590 if os.path.isdir('/r'):
3591 return '/r' + script
3595 DEFAULT_PATH = ('/sbin', '/usr/sbin', '/bin', '/usr/bin')
3596 # ensure basic elements are in the system path
3597 def sanitise_path():
3598 for dir in DEFAULT_PATH:
3601 # global hack for the --select handling
3603 def init_select(args):
3604 # args = [service=nodeA,service2=nodeB service3=nodeC]
3607 list = string.split(arg, ',')
3609 srv, node = string.split(entry, '=')
3610 tgt_select[srv] = node
3612 def get_select(srv):
3613 if tgt_select.has_key(srv):
3614 return tgt_select[srv]
3618 FLAG = Lustre.Options.FLAG
3619 PARAM = Lustre.Options.PARAM
3620 INTPARAM = Lustre.Options.INTPARAM
3621 PARAMLIST = Lustre.Options.PARAMLIST
3623 ('verbose,v', "Print system commands as they are run"),
3624 ('ldapurl',"LDAP server URL, eg. ldap://localhost", PARAM),
3625 ('config', "Cluster config name used for LDAP query", PARAM),
3626 ('select', "service=nodeA,service2=nodeB ", PARAMLIST),
3627 ('node', "Load config for <nodename>", PARAM),
3628 ('sec',"security flavor <null|krb5i|krb5p> between this client with mds", PARAM),
3629 ('mds_sec',"security flavor <null|krb5i|krb5p> between this client with mds", PARAM),
3630 ('oss_sec',"security flavor <null|krb5i|krb5p> between this client with ost", PARAM),
3631 ('mds_mds_sec',"security flavor <null|krb5i|krb5p> between this mds with other mds", PARAM),
3632 ('mds_oss_sec',"security flavor <null|krb5i|krb5p> between this mds with ost", PARAM),
3633 ('mds_deny_sec', "security flavor <null|krb5i|krb5p> denied by this mds", PARAM),
3634 ('ost_deny_sec', "security flavor <null|krb5i|krb5p> denied by this ost", PARAM),
3635 ('cleanup,d', "Cleans up config. (Shutdown)"),
3636 ('force,f', "Forced unmounting and/or obd detach during cleanup",
3638 ('single_socket', "socknal option: only use one socket instead of bundle",
3640 ('failover',"""Used to shut down without saving state.
3641 This will allow this node to "give up" a service to a
3642 another node for failover purposes. This will not
3643 be a clean shutdown.""",
3645 ('gdb', """Prints message after creating gdb module script
3646 and sleeps for 5 seconds."""),
3647 ('noexec,n', """Prints the commands and steps that will be run for a
3648 config without executing them. This can used to check if a
3649 config file is doing what it should be doing"""),
3650 ('nomod', "Skip load/unload module step."),
3651 ('nosetup', "Skip device setup/cleanup step."),
3652 ('reformat', "Reformat all devices (without question)"),
3653 ('mkfsoptions', "Additional options for the mk*fs command line", PARAM),
3654 ('mountfsoptions', "Additional options for mount fs command line", PARAM),
3655 ('clientoptions', "Additional options for Lustre", PARAM),
3656 ('dump', "Dump the kernel debug log to file before portals is unloaded",
3658 ('write_conf', "Save all the client config information on mds."),
3659 ('record', "Write config information on mds."),
3660 ('record_log', "Name of config record log.", PARAM),
3661 ('record_device', "MDS device name that will record the config commands",
3663 ('root_squash', "MDS squash root to appointed uid",
3665 ('no_root_squash', "Don't squash root for appointed nid",
3667 ('minlevel', "Minimum level of services to configure/cleanup",
3669 ('maxlevel', """Maximum level of services to configure/cleanup
3670 Levels are aproximatly like:
3675 70 - mountpoint, echo_client, osc, mdc, lov""",
3677 ('lustre', """Base directory of lustre sources. This parameter will
3678 cause lconf to load modules from a source tree.""", PARAM),
3679 ('portals', """Portals source directory. If this is a relative path,
3680 then it is assumed to be relative to lustre. """, PARAM),
3681 ('timeout', "Set recovery timeout", INTPARAM),
3682 ('upcall', "Set both portals and lustre upcall script", PARAM),
3683 ('lustre_upcall', "Set lustre upcall script", PARAM),
3684 ('portals_upcall', "Set portals upcall script", PARAM),
3685 ('lctl_dump', "Save lctl ioctls to the dumpfile argument", PARAM),
3686 ('ptldebug', "Set the portals debug level", PARAM),
3687 ('subsystem', "Set the portals debug subsystem", PARAM),
3688 ('gdb_script', "Fullname of gdb debug script", PARAM, default_gdb_script()),
3689 ('debug_path', "Path to save debug dumps", PARAM, default_debug_path()),
3690 # Client recovery options
3691 ('recover', "Recover a device"),
3692 ('group', "The group of devices to configure or cleanup", PARAM),
3693 ('tgt_uuid', "The failed target (required for recovery)", PARAM),
3694 ('client_uuid', "The failed client (required for recovery)", PARAM),
3695 ('conn_uuid', "The failed connection (required for recovery)", PARAM),
3697 ('inactive', """The name of an inactive service, to be ignored during
3698 mounting (currently OST-only). Can be repeated.""",
3703 global lctl, config, toplustreDB, CONFIG_FILE, mod_manager
3705 # in the upcall this is set to SIG_IGN
3706 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
3708 cl = Lustre.Options("lconf", "config.xml", lconf_options)
3710 config, args = cl.parse(sys.argv[1:])
3711 except Lustre.OptionError, e:
3715 setupModulePath(sys.argv[0])
3717 host = socket.gethostname()
3719 # the PRNG is normally seeded with time(), which is not so good for starting
3720 # time-synchronized clusters
3721 input = open('/dev/urandom', 'r')
3723 print 'Unable to open /dev/urandom!'
3725 seed = input.read(32)
3731 init_select(config.select)
3734 # allow config to be fetched via HTTP, but only with python2
3735 if sys.version[0] != '1' and args[0].startswith('http://'):
3738 config_file = urllib2.urlopen(args[0])
3739 except (urllib2.URLError, socket.error), err:
3740 if hasattr(err, 'args'):
3742 print "Could not access '%s': %s" %(args[0], err)
3744 elif not os.access(args[0], os.R_OK):
3745 print 'File not found or readable:', args[0]
3749 config_file = open(args[0], 'r')
3751 dom = xml.dom.minidom.parse(config_file)
3753 panic("%s does not appear to be a config file." % (args[0]))
3754 sys.exit(1) # make sure to die here, even in debug mode.
3756 CONFIG_FILE = args[0]
3757 lustreDB = Lustre.LustreDB_XML(dom.documentElement, dom.documentElement)
3758 if not config.config:
3759 config.config = os.path.basename(args[0])# use full path?
3760 if config.config[-4:] == '.xml':
3761 config.config = config.config[:-4]
3762 elif config.ldapurl:
3763 if not config.config:
3764 panic("--ldapurl requires --config name")
3765 dn = "config=%s,fs=lustre" % (config.config)
3766 lustreDB = Lustre.LustreDB_LDAP('', {}, base=dn, url = config.ldapurl)
3767 elif config.ptldebug or config.subsystem:
3768 sys_set_ptldebug(None)
3769 sys_set_subsystem(None)
3772 print 'Missing config file or ldap URL.'
3773 print 'see lconf --help for command summary'
3776 toplustreDB = lustreDB
3778 ver = lustreDB.get_version()
3780 panic("No version found in config data, please recreate.")
3781 if ver != Lustre.CONFIG_VERSION:
3782 panic("Config version", ver, "does not match lconf version",
3783 Lustre.CONFIG_VERSION)
3787 node_list.append(config.node)
3790 node_list.append(host)
3791 node_list.append('localhost')
3793 debug("configuring for host: ", node_list)
3796 config.debug_path = config.debug_path + '-' + host
3797 config.gdb_script = config.gdb_script + '-' + host
3799 lctl = LCTLInterface('lctl')
3801 if config.lctl_dump:
3802 lctl.use_save_file(config.lctl_dump)
3805 if not (config.record_device and config.record_log):
3806 panic("When recording, both --record_log and --record_device must be specified.")
3807 lctl.clear_log(config.record_device, config.record_log)
3808 lctl.record(config.record_device, config.record_log)
3810 # init module manager
3811 mod_manager = kmod_manager(config.lustre, config.portals)
3813 doHost(lustreDB, node_list)
3815 if not config.record:
3820 process_updates(lustreDB, config.record_device, config.record_log)
3822 if __name__ == "__main__":
3825 except Lustre.LconfError, e:
3827 # traceback.print_exc(file=sys.stdout)
3829 except CommandError, e:
3833 if first_cleanup_error:
3834 sys.exit(first_cleanup_error)