Whamcloud - gitweb
- fixed typo in lconf LMV:clanup_module()
[fs/lustre-release.git] / lustre / utils / lconf
index 9f57bb0..ff9c71b 100755 (executable)
@@ -23,7 +23,7 @@
 # lconf is the main driver script for starting and stopping
 # lustre filesystem services.
 #
-# Based in part on the XML obdctl modifications done by Brian Behlendorf 
+# Based in part on the XML obdctl modifications done by Brian Behlendorf
 
 import sys, getopt, types
 import string, os, stat, popen2, socket, time, random, fcntl, select
@@ -39,11 +39,13 @@ PYMOD_DIR = "/usr/lib/lustre/python"
 
 def development_mode():
     base = os.path.dirname(sys.argv[0])
-    if os.access(base+"/Makefile.am", os.R_OK):
+    if os.access(base+"/Makefile", os.R_OK):
         return 1
     return 0
 
-if not development_mode():
+if development_mode():
+    sys.path.append('../utils')
+else:
     sys.path.append(PYMOD_DIR)
 
 import Lustre
@@ -59,10 +61,10 @@ MAX_LOOP_DEVICES = 256
 PORTALS_DIR = 'portals'
 
 # Needed to call lconf --record
-CONFIG_FILE = "" 
+CONFIG_FILE = ""
 
 # Please keep these in sync with the values in portals/kp30.h
-ptldebug_names = { 
+ptldebug_names = {
     "trace" :     (1 << 0),
     "inode" :     (1 << 1),
     "super" :     (1 << 2),
@@ -85,34 +87,35 @@ ptldebug_names = {
     "ha" :        (1 << 19),
     "rpctrace" :  (1 << 20),
     "vfstrace" :  (1 << 21),
+    "reada" :     (1 << 22),
+    "config" :    (1 << 23),
     }
 
 subsystem_names = {
-    "undefined" :    (0 << 24),
-    "mdc" :          (1 << 24),
-    "mds" :          (2 << 24),
-    "osc" :          (3 << 24),
-    "ost" :          (4 << 24),
-    "class" :        (5 << 24),
-    "obdfs" :        (6 << 24),
-    "llite" :        (7 << 24),
-    "rpc" :          (8 << 24),
-    "ext2obd" :      (9 << 24),
-    "portals" :     (10 << 24),
-    "socknal" :     (11 << 24),
-    "qswnal" :      (12 << 24),
-    "pinger" :      (13 << 24),
-    "filter" :      (14 << 24),
-    "trace" :       (15 << 24),
-    "echo" :        (16 << 24),
-    "ldlm" :        (17 << 24),
-    "lov" :         (18 << 24),
-    "gmnal" :       (19 << 24),
-    "ptlrouter" :   (20 << 24),
-    "cobd" :        (21 << 24),
-    "ptlbd" :       (22 << 24),
-    "log" :         (23 << 24),
-    "mgmt" :        (24 << 24),
+    "undefined" :    (1 << 0),
+    "mdc" :          (1 << 1),
+    "mds" :          (1 << 2),
+    "osc" :          (1 << 3),
+    "ost" :          (1 << 4),
+    "class" :        (1 << 5),
+    "log" :          (1 << 6),
+    "llite" :        (1 << 7),
+    "rpc" :          (1 << 8),
+    "mgmt" :         (1 << 9),
+    "portals" :      (1 << 10),
+    "socknal" :      (1 << 11),
+    "qswnal" :       (1 << 12),
+    "pinger" :       (1 << 13),
+    "filter" :       (1 << 14),
+    "ptlbd" :        (1 << 15),
+    "echo" :         (1 << 16),
+    "ldlm" :         (1 << 17),
+    "lov" :          (1 << 18),
+    "gmnal" :        (1 << 19),
+    "ptlrouter" :    (1 << 20),
+    "cobd" :         (1 << 21),
+    "ibnal" :        (1 << 22),
+    "cmobd" :        (1 << 23),
     }
 
 
@@ -122,11 +125,11 @@ def cleanup_error(rc):
     if not first_cleanup_error:
         first_cleanup_error = rc
 
-# ============================================================ 
+# ============================================================
 # debugging and error funcs
 
 def fixme(msg = "this feature"):
-    raise Lustre.LconfError, msg + ' not implmemented yet.'
+    raise Lustre.LconfError, msg + ' not implemented yet.'
 
 def panic(*args):
     msg = string.join(map(str,args))
@@ -239,7 +242,7 @@ class DaemonHandler:
             return pid
         except IOError:
             return 0
-        
+
     def clean_pidfile(self):
         """ Remove a stale pidfile """
         log("removing stale pidfile:", self.pidfile())
@@ -247,7 +250,7 @@ class DaemonHandler:
             os.unlink(self.pidfile())
         except OSError, e:
             log(self.pidfile(), e)
-            
+
 class AcceptorHandler(DaemonHandler):
     def __init__(self, port, net_type, send_mem, recv_mem, irq_aff):
         DaemonHandler.__init__(self, "acceptor")
@@ -256,8 +259,6 @@ class AcceptorHandler(DaemonHandler):
         self.send_mem = send_mem
         self.recv_mem = recv_mem
 
-        if net_type == 'toe':
-            self.flags = self.flags + ' -N 4'
         if irq_aff:
             self.flags = self.flags + ' -i'
 
@@ -266,7 +267,7 @@ class AcceptorHandler(DaemonHandler):
 
     def command_line(self):
         return string.join(map(str,('-s', self.send_mem, '-r', self.recv_mem, self.flags, self.port)))
-    
+
 acceptors = {}
 
 # start the acceptors
@@ -286,14 +287,14 @@ def run_one_acceptor(port):
         if not daemon.running():
             daemon.start()
     else:
-         panic("run_one_acceptor: No acceptor defined for port:", port)   
-        
+         panic("run_one_acceptor: No acceptor defined for port:", port)
+
 def stop_acceptor(port):
     if acceptors.has_key(port):
         daemon = acceptors[port]
         if daemon.running():
             daemon.stop()
-        
+
 
 # ============================================================
 # handle lctl interface
@@ -318,7 +319,7 @@ class LCTLInterface:
 
     def use_save_file(self, file):
         self.save_file = file
-        
+
     def record(self, dev_name, logname):
         log("Recording log", logname, "on", dev_name)
         self.record_device = dev_name
@@ -350,13 +351,14 @@ class LCTLInterface:
     device $%s
     record %s
     %s""" % (self.record_device, self.record_log, cmds)
-            
+
         debug("+", cmd_line, cmds)
         if config.noexec: return (0, [])
 
         child = popen2.Popen3(cmd_line, 1) # Capture stdout and stderr from command
         child.tochild.write(cmds + "\n")
         child.tochild.close()
+#      print "LCTL:", cmds
 
         # From "Python Cookbook" from O'Reilly
         outfile = child.fromchild
@@ -401,7 +403,16 @@ class LCTLInterface:
             raise CommandError(self.lctl, out, rc)
         return rc, out
 
-            
+
+    def clear_log(self, dev, log):
+        """ clear an existing log """
+        cmds =  """
+  device $%s
+  probe
+  clear_log %s
+  quit """ % (dev, log)
+        self.run(cmds)
+
     def network(self, net, nid):
         """ set mynid """
         cmds =  """
@@ -417,7 +428,7 @@ class LCTLInterface:
 
     def add_autoconn(self, net_type, send_mem, recv_mem, nid, hostaddr,
                      port, flags):
-        if net_type  in ('tcp', 'toe') and not config.lctl_dump:
+        if net_type  in ('tcp',) and not config.lctl_dump:
             cmds =  """
   network %s
   send_mem %d
@@ -428,10 +439,10 @@ class LCTLInterface:
              recv_mem,
              nid, hostaddr, port, flags )
             self.run(cmds)
-    
+
     def connect(self, srv):
         self.add_uuid(srv.net_type, srv.nid_uuid, srv.nid)
-        if srv.net_type  in ('tcp', 'toe') and not config.lctl_dump:
+        if srv.net_type  in ('tcp',) and not config.lctl_dump:
             flags = 's'
             if srv.irq_affinity:
                 flags = flags + 'i'
@@ -444,7 +455,7 @@ class LCTLInterface:
     device $%s
     recover %s""" %(dev_name, new_conn)
         self.run(cmds)
-                
+
     # add a route to a range
     def add_route(self, net, gw, lo, hi):
         cmds =  """
@@ -457,7 +468,7 @@ class LCTLInterface:
         except CommandError, e:
             log ("ignore: ")
             e.dump()
-                
+
     def del_route(self, net, gw, lo, hi):
         cmds =  """
   ignore_errors
@@ -492,7 +503,7 @@ class LCTLInterface:
 
 
     def del_autoconn(self, net_type, nid, hostaddr):
-        if net_type  in ('tcp', 'toe') and not config.lctl_dump:
+        if net_type  in ('tcp',) and not config.lctl_dump:
                 cmds =  """
   ignore_errors
   network %s
@@ -500,11 +511,11 @@ class LCTLInterface:
   quit""" % (net_type,
              nid, hostaddr)
                 self.run(cmds)
-        
+
     # disconnect one connection
     def disconnect(self, srv):
         self.del_uuid(srv.nid_uuid)
-        if srv.net_type  in ('tcp', 'toe') and not config.lctl_dump:
+        if srv.net_type  in ('tcp',) and not config.lctl_dump:
             self.del_autoconn(srv.net_type, srv.nid, srv.hostaddr)
 
     def del_uuid(self, uuid):
@@ -529,13 +540,13 @@ class LCTLInterface:
   quit""" % (type, name, uuid)
         self.run(cmds)
         
-    def setup(self,  name, setup = ""):
+    def setup(self, name, setup = ""):
         cmds = """
   cfg_device %s
   setup %s
   quit""" % (name, setup)
         self.run(cmds)
-        
+
 
     # create a new device with lctl
     def newdev(self, type, name, uuid, setup = ""):
@@ -545,7 +556,7 @@ class LCTLInterface:
         except CommandError, e:
             self.cleanup(name, uuid, 0)
             raise e
-        
+
 
     # cleanup a device
     def cleanup(self, name, uuid, force, failover = 0):
@@ -560,23 +571,42 @@ class LCTLInterface:
         self.run(cmds)
 
     # create an lov
-    def lov_setup(self, name, uuid, desc_uuid, mdsuuid, stripe_cnt,
-                  stripe_sz, stripe_off,
-                      pattern, devlist):
+    def lov_setup(self, name, uuid, desc_uuid, stripe_cnt,
+                  stripe_sz, stripe_off, pattern):
         cmds = """
   attach lov %s %s
-  lov_setup %s %d %d %d %s %s
-  quit""" % (name, uuid, desc_uuid, stripe_cnt, stripe_sz, stripe_off,
-             pattern, devlist)
+  lov_setup %s %d %d %d %s
+  quit""" % (name, uuid, desc_uuid, stripe_cnt, stripe_sz, stripe_off, pattern)
         self.run(cmds)
 
-    # create an lov
-    def lov_setconfig(self, uuid, mdsuuid, stripe_cnt, stripe_sz, stripe_off,
-                      pattern, devlist):
+    # add an OBD to a LOV
+    def lov_add_obd(self, name, uuid, obd_uuid, index, gen):
         cmds = """
-  cfg_device $%s
-  lov_setconfig %s %d %d %d %s %s
-  quit""" % (mdsuuid, uuid, stripe_cnt, stripe_sz, stripe_off, pattern, devlist)
+  lov_modify_tgts add %s %s %s %s
+  quit""" % (name, obd_uuid, index, gen)
+        self.run(cmds)
+
+    # create an lmv
+    def lmv_setup(self, name, uuid, desc_uuid, devlist):
+        cmds = """
+  attach lmv %s %s
+  lmv_setup %s %s
+  quit""" % (name, uuid, desc_uuid, devlist)
+        self.run(cmds)
+
+    # delete an OBD from a LOV
+    def lov_del_obd(self, name, uuid, obd_uuid, index, gen):
+        cmds = """
+  lov_modify_tgts del %s %s %s %s
+  quit""" % (name, obd_uuid, index, gen)
+        self.run(cmds)
+
+    # deactivate an OBD
+    def deactivate(self, name):
+        cmds = """
+  device $%s
+  deactivate
+  quit""" % (name)
         self.run(cmds)
 
     # dump the log file
@@ -624,7 +654,6 @@ class LCTLInterface:
   quit""" % (timeout,)
         self.run(cmds)
 
-    # delete mount options
     def set_lustre_upcall(self, upcall):
         cmds = """
   set_lustre_upcall %s
@@ -693,13 +722,14 @@ def do_find_file(base, mod):
                 return module
 
 def find_module(src_dir, dev_dir, modname):
-    mod = '%s.o' % (modname)
-    module = src_dir +'/'+ dev_dir +'/'+ mod
-    try: 
-       if os.access(module, os.R_OK):
-            return module
-    except OSError:
-        pass
+    modbase = src_dir +'/'+ dev_dir +'/'+ modname
+    for modext in '.ko', '.o':
+        module = modbase + modext
+        try:
+            if os.access(module, os.R_OK):
+                return module
+        except OSError:
+            pass
     return None
 
 # is the path a block device?
@@ -710,12 +740,13 @@ def is_block(path):
     except OSError:
         return 0
     return stat.S_ISBLK(s[stat.ST_MODE])
-
+    
 # build fs according to type
 # fixme: dangerous
-def mkfs(dev, devsize, fstype, jsize, mkfsoptions, isblock=1):
+def mkfs(dev, devsize, fstype, jsize, isize, mkfsoptions, isblock=1):
     block_cnt = ''
     jopt = ''
+    iopt = ''
     if devsize:
         if devsize < 8000:
             panic("size of filesystem on '%s' must be larger than 8MB, but is set to %s"%
@@ -723,9 +754,22 @@ def mkfs(dev, devsize, fstype, jsize, mkfsoptions, isblock=1):
         # devsize is in 1k, and fs block count is in 4k
         block_cnt = devsize/4
 
-    if fstype in ('ext3', 'extN'):
+    if fstype in ('ext3', 'extN', 'ldiskfs'):
         # ext3 journal size is in megabytes
+        if jsize == 0:
+            if devsize == 0:
+                if not is_block(dev):
+                    ret, out = runcmd("ls -l %s" %dev)
+                    devsize = int(string.split(out[0])[4]) / 1024
+                else:
+                    ret, out = runcmd("sfdisk -s %s" %dev)
+                    devsize = int(out[0])
+            if devsize > 1024 * 1024:
+                jsize = ((devsize / 102400) * 4)
+            if jsize > 400:
+                jsize = 400
         if jsize:  jopt = "-J size=%d" %(jsize,)
+        if isize:  iopt = "-I %d" %(isize,)
         mkfs = 'mkfs.ext2 -j -b 4096 '
         if not isblock or config.force:
             mkfs = mkfs + ' -F '
@@ -740,11 +784,11 @@ def mkfs(dev, devsize, fstype, jsize, mkfsoptions, isblock=1):
         mkfs = mkfs + ' ' + config.mkfsoptions
     if mkfsoptions != None:
         mkfs = mkfs + ' ' + mkfsoptions
-    (ret, out) = run (mkfs, jopt, dev, block_cnt)
+    (ret, out) = run (mkfs, jopt, iopt, dev, block_cnt)
     if ret:
         panic("Unable to build fs:", dev, string.join(out))
     # enable hash tree indexing on fsswe
-    if fstype in ('ext3', 'extN'):
+    if fstype in ('ext3', 'extN', 'ldiskfs'):
         htree = 'echo "feature FEATURE_C5" | debugfs -w'
         (ret, out) = run (htree, dev)
         if ret:
@@ -760,8 +804,8 @@ def loop_base():
             panic ("can't access loop devices")
     return loop
     
-# find loop device assigned to thefile
-def find_loop(file):
+# find loop device assigned to the file
+def find_assigned_loop(file):
     loop = loop_base()
     for n in xrange(0, MAX_LOOP_DEVICES):
         dev = loop + str(n)
@@ -776,19 +820,33 @@ def find_loop(file):
     return ''
 
 # create file if necessary and assign the first free loop device
-def init_loop(file, size, fstype, journal_size, mkfsoptions, reformat):
-    dev = find_loop(file)
+def init_loop(file, size, fstype, journal_size, inode_size, 
+              mkfsoptions, reformat, autoformat, backfstype, backfile):
+    if fstype == 'smfs':
+        realfile = backfile
+        realfstype = backfstype
+        if is_block(backfile):
+            if reformat or (need_format(realfstype, backfile) and autoformat == 'yes'):
+                mkfs(realfile, size, realfstype, journal_size, inode_size, mkfsoptions, isblock=0)
+            return realfile
+    else:
+        realfile = file
+        realfstype = fstype
+            
+    dev = find_assigned_loop(realfile)
     if dev:
-        print 'WARNING file:', file, 'already mapped to', dev
+        print 'WARNING file:', realfile, 'already mapped to', dev
         return dev
-    if reformat or not os.access(file, os.R_OK | os.W_OK):
+            
+    if reformat or not os.access(realfile, os.R_OK | os.W_OK):
         if size < 8000:
-            panic("size of loopback file '%s' must be larger than 8MB, but is set to %s" % (file,size))
-        (ret, out) = run("dd if=/dev/zero bs=1k count=0 seek=%d of=%s" %(size,
-                                                                         file))
+            panic("size of loopback file '%s' must be larger than 8MB, but is set to %s" % (realfile, size))
+        (ret, out) = run("dd if=/dev/zero bs=1k count=0 seek=%d of=%s" %(size, realfile))
         if ret:
-            panic("Unable to create backing store:", file)
-        mkfs(file, size, fstype, journal_size, mkfsoptions, isblock=0)
+            panic("Unable to create backing store:", realfile)
+            
+        mkfs(realfile, size, realfstype, journal_size, inode_size, 
+             mkfsoptions, isblock=0)
 
     loop = loop_base()
     # find next free loop
@@ -797,7 +855,7 @@ def init_loop(file, size, fstype, journal_size, mkfsoptions, reformat):
         if os.access(dev, os.R_OK):
             (stat, out) = run('losetup', dev)
             if stat:
-                run('losetup', dev, file)
+                run('losetup', dev, realfile)
                 return dev
         else:
             print "out of loop devices"
@@ -807,7 +865,7 @@ def init_loop(file, size, fstype, journal_size, mkfsoptions, reformat):
 
 # undo loop assignment
 def clean_loop(file):
-    dev = find_loop(file)
+    dev = find_assigned_loop(file)
     if dev:
         ret, out = run('losetup -d', dev)
         if ret:
@@ -816,23 +874,26 @@ def clean_loop(file):
 
 # determine if dev is formatted as a <fstype> filesystem
 def need_format(fstype, dev):
-    # FIXME don't know how to implement this    
+    # FIXME don't know how to implement this
     return 0
 
 # initialize a block device if needed
 def block_dev(dev, size, fstype, reformat, autoformat, journal_size,
-              mkfsoptions):
-    if config.noexec: return dev
-    if not is_block(dev):
-        dev = init_loop(dev, size, fstype, journal_size, mkfsoptions, reformat)
+              inode_size, mkfsoptions, backfstype, backdev):
+    if config.noexec: 
+        return dev
+        
+    if fstype == 'smfs' or not is_block(dev):
+        dev = init_loop(dev, size, fstype, journal_size, inode_size,
+                        mkfsoptions, reformat, autoformat, backfstype, backdev)
     elif reformat or (need_format(fstype, dev) and autoformat == 'yes'):
-        mkfs(dev, size, fstype, journal_size, mkfsoptions, isblock=0)
-
+        mkfs(dev, size, fstype, journal_size, inode_size, mkfsoptions,
+             isblock=0)
 #    else:
 #        panic("device:", dev,
 #              "not prepared, and autoformat is not set.\n",
 #              "Rerun with --reformat option to format ALL filesystems")
-        
+
     return dev
 
 def if2addr(iface):
@@ -844,19 +905,37 @@ def if2addr(iface):
     ip = string.split(addr, ':')[1]
     return ip
 
+def def_mount_options(fstype, target):
+    """returns deafult mount options for passed fstype and target (mds, ost)"""
+    if fstype == 'ext3' or fstype == 'ldiskfs':
+        mountfsoptions = "errors=remount-ro"
+        if target == 'ost' and sys_get_branch() == '2.4':
+            mountfsoptions = "%s,asyncdel" % (mountfsoptions)
+        return mountfsoptions
+    return ""
+        
+def sys_get_elan_position_file():
+    procfiles = ["/proc/elan/device0/position",
+                 "/proc/qsnet/elan4/device0/position",
+                 "/proc/qsnet/elan3/device0/position"]
+    for p in procfiles:
+        if os.access(p, os.R_OK):
+            return p
+    return ""
+
 def sys_get_local_nid(net_type, wildcard, cluster_id):
     """Return the local nid."""
     local = ""
-    if os.access('/proc/elan/device0/position', os.R_OK):
+    if sys_get_elan_position_file():
         local = sys_get_local_address('elan', '*', cluster_id)
     else:
         local = sys_get_local_address(net_type, wildcard, cluster_id)
     return local
-        
+
 def sys_get_local_address(net_type, wildcard, cluster_id):
     """Return the local address for the network type."""
     local = ""
-    if net_type in ('tcp', 'toe'):
+    if net_type in ('tcp',):
         if  ':' in wildcard:
             iface, star = string.split(wildcard, ':')
             local = if2addr(iface)
@@ -866,9 +945,12 @@ def sys_get_local_address(net_type, wildcard, cluster_id):
             host = socket.gethostname()
             local = socket.gethostbyname(host)
     elif net_type == 'elan':
-        # awk '/NodeId/ { print $2 }' '/proc/elan/device0/position'
+        # awk '/NodeId/ { print $2 }' 'sys_get_elan_position_file()'
+        f = sys_get_elan_position_file()
+        if not f:
+            panic ("unable to determine local Elan ID")
         try:
-            fp = open('/proc/elan/device0/position', 'r')
+            fp = open(f, 'r')
             lines = fp.readlines()
             fp.close()
             for l in lines:
@@ -877,7 +959,7 @@ def sys_get_local_address(net_type, wildcard, cluster_id):
                     elan_id = a[1]
                     break
             try:
-                nid = my_int(cluster_id) + my_int(elan_id) 
+                nid = my_int(cluster_id) + my_int(elan_id)
                 local = "%d" % (nid)
             except ValueError, e:
                 local = elan_id
@@ -885,18 +967,25 @@ def sys_get_local_address(net_type, wildcard, cluster_id):
             log(e)
     elif net_type == 'gm':
         fixme("automatic local address for GM")
-    elif net_type == 'scimac':
-        scinode="/opt/scali/sbin/scinode"
-        if os.path.exists(scinode):
-            (rc,local) = run(scinode)
-        else:
-            panic (scinode, " not found on node with scimac networking")
-        if rc:
-            panic (scinode, " failed")
-        local=string.rstrip(local[0])
 
     return local
 
+def sys_get_branch():
+    """Returns kernel release"""
+    try:
+        fp = open('/proc/sys/kernel/osrelease')
+        lines = fp.readlines()
+        fp.close()
+        
+        for l in lines:
+            version = string.split(l)
+            a = string.split(version[0], '.')
+            return a[0] + '.' + a[1]
+    except IOError, e:
+        log(e)
+    return ""
+
+
 def mod_loaded(modname):
     """Check if a module is already loaded. Look in /proc/modules for it."""
     try:
@@ -947,7 +1036,7 @@ def fs_is_mounted(path):
     except IOError, e:
         log(e)
     return 0
-        
+
 
 class kmod:
     """Manage kernel modules"""
@@ -1013,7 +1102,7 @@ class Module:
         self._server = None
         self._connected = 0
         self.kmod = kmod(config.lustre, config.portals)
-        
+
     def info(self, *args):
         msg = string.join(map(str,args))
         print self.module_name + ":", self.name, self.uuid, msg
@@ -1027,7 +1116,7 @@ class Module:
             log(self.module_name, "cleanup failed: ", self.name)
             e.dump()
             cleanup_error(e.rc)
-            
+
     def add_portals_module(self, dev_dir, modname):
         """Append a module to list of modules to load."""
         self.kmod.add_portals_module(dev_dir, modname)
@@ -1039,7 +1128,7 @@ class Module:
     def load_module(self):
         """Load all the modules in the list in the order they appear."""
         self.kmod.load_module()
-            
+
     def cleanup_module(self):
         """Unload the modules in the list in reverse order."""
         if self.safe_to_clean():
@@ -1047,10 +1136,10 @@ class Module:
 
     def safe_to_clean(self):
         return 1
-        
+
     def safe_to_clean_modules(self):
         return self.safe_to_clean()
-        
+
 class Network(Module):
     def __init__(self,db):
         Module.__init__(self, 'NETWORK', db)
@@ -1080,25 +1169,22 @@ class Network(Module):
                 panic("unable to set hostaddr for", self.net_type, self.hostaddr, self.cluster_id)
             debug("hostaddr:", self.hostaddr)
 
-        self.add_portals_module("libcfs", 'portals')
+        self.add_portals_module("libcfs", 'libcfs')
+        self.add_portals_module("portals", 'portals')
         if node_needs_router():
             self.add_portals_module("router", 'kptlrouter')
         if self.net_type == 'tcp':
             self.add_portals_module("knals/socknal", 'ksocknal')
-        if self.net_type == 'toe':
-            self.add_portals_module("knals/toenal", 'ktoenal')
         if self.net_type == 'elan':
             self.add_portals_module("knals/qswnal", 'kqswnal')
         if self.net_type == 'gm':
             self.add_portals_module("knals/gmnal", 'kgmnal')
-        if self.net_type == 'scimac':
-            self.add_portals_module("knals/scimacnal", 'kscimacnal')
 
     def nid_to_uuid(self, nid):
         return "NID_%s_UUID" %(nid,)
 
     def prepare(self):
-        if is_network_prepared():
+        if not config.record and is_network_prepared():
             return
         self.info(self.net_type, self.nid, self.port)
         if not (config.record and self.generic_nid):
@@ -1148,6 +1234,9 @@ class Network(Module):
         if  node_is_router():
             self.disconnect_peer_gateways()
 
+    def correct_level(self, level, op=None):
+        return level
+
 class RouteTable(Module):
     def __init__(self,db):
         Module.__init__(self, 'ROUTES', db)
@@ -1156,7 +1245,7 @@ class RouteTable(Module):
                          lo, hi):
         # only setup connections for tcp NALs
         srvdb = None
-        if not net_type in ('tcp', 'toe'):
+        if not net_type in ('tcp',):
             return None
 
         # connect to target if route is to single node and this node is the gw
@@ -1176,9 +1265,9 @@ class RouteTable(Module):
             return None
 
         return Network(srvdb)
-        
+
     def prepare(self):
-        if is_network_prepared():
+        if not config.record and is_network_prepared():
             return
         self.info()
         for net_type, gw, gw_cluster_id, tgt_cluster_id, lo, hi in self.db.get_route_tbl():
@@ -1220,7 +1309,7 @@ class Management(Module):
         self.add_lustre_module('mgmt', 'mgmt_svc')
 
     def prepare(self):
-        if is_prepared(self.name):
+        if not config.record and is_prepared(self.name):
             return
         self.info()
         lctl.newdev("mgmt", self.name, self.uuid)
@@ -1232,6 +1321,9 @@ class Management(Module):
         if is_prepared(self.name):
             Module.cleanup(self)
 
+    def correct_level(self, level, op=None):
+        return level
+
 # This is only needed to load the modules; the LDLM device
 # is now created automatically.
 class LDLM(Module):
@@ -1247,82 +1339,180 @@ class LDLM(Module):
     def cleanup(self):
         return
 
+    def correct_level(self, level, op=None):
+        return level
+
+
 class LOV(Module):
-    def __init__(self, db, uuid, fs_name, name_override = None):
+    def __init__(self, db, uuid, fs_name, name_override = None, config_only = None):
         Module.__init__(self, 'LOV', db)
         if name_override != None:
             self.name = "lov_%s" % name_override
         self.add_lustre_module('lov', 'lov')
         self.mds_uuid = self.db.get_first_ref('mds')
-        mds= self.db.lookup(self.mds_uuid)
-        self.mds_name = mds.getName()
-        self.stripe_sz = self.db.get_val_int('stripesize', 65536)
+        self.stripe_sz = self.db.get_val_int('stripesize', 1048576)
         self.stripe_off = self.db.get_val_int('stripeoffset', 0)
         self.pattern = self.db.get_val_int('stripepattern', 0)
-        self.devlist = self.db.get_refs('obd')
+        self.devlist = self.db.get_lov_tgts('lov_tgt')
         self.stripe_cnt = self.db.get_val_int('stripecount', len(self.devlist))
         self.osclist = []
         self.desc_uuid = self.uuid
         self.uuid = generate_client_uuid(self.name)
         self.fs_name = fs_name
-        for obd_uuid in self.devlist:
+        if config_only:
+            self.config_only = 1
+            return
+        self.config_only = None
+        mds = self.db.lookup(self.mds_uuid)
+        self.mds_name = mds.getName()
+        for (obd_uuid, index, gen, active) in self.devlist:
+            if obd_uuid == '':
+                continue
             obd = self.db.lookup(obd_uuid)
             osc = get_osc(obd, self.uuid, fs_name)
             if osc:
-                self.osclist.append(osc)
+                self.osclist.append((osc, index, gen, active))
             else:
                 panic('osc not found:', obd_uuid)
-            
+    def get_uuid(self):
+        return self.uuid
+    def get_name(self):
+        return self.name
     def prepare(self):
-        if is_prepared(self.name):
+        if not config.record and is_prepared(self.name):
             return
-        for osc in self.osclist:
+        self.info(self.mds_uuid, self.stripe_cnt, self.stripe_sz,
+                  self.stripe_off, self.pattern, self.devlist,
+                  self.mds_name)
+        lctl.lov_setup(self.name, self.uuid, self.desc_uuid,  self.stripe_cnt,
+                       self.stripe_sz, self.stripe_off, self.pattern)
+        for (osc, index, gen, active) in self.osclist:
+            target_uuid = osc.target_uuid
             try:
                 # Only ignore connect failures with --force, which
                 # isn't implemented here yet.
+                osc.active = active
                 osc.prepare(ignore_connect_failure=0)
             except CommandError, e:
                 print "Error preparing OSC %s\n" % osc.uuid
                 raise e
-        self.info(self.mds_uuid, self.stripe_cnt, self.stripe_sz,
-                  self.stripe_off, self.pattern, self.devlist, self.mds_name)
-        lctl.lov_setup(self.name, self.uuid,
-                       self.desc_uuid, self.mds_name, self.stripe_cnt,
-                       self.stripe_sz, self.stripe_off, self.pattern,
-                       string.join(self.devlist))
+            lctl.lov_add_obd(self.name, self.uuid, target_uuid, index, gen)
 
     def cleanup(self):
+        for (osc, index, gen, active) in self.osclist:
+            target_uuid = osc.target_uuid
+            osc.cleanup()
         if is_prepared(self.name):
             Module.cleanup(self)
-        for osc in self.osclist:
-            osc.cleanup()
+        if self.config_only:
+            panic("Can't clean up config_only LOV ", self.name)
 
     def load_module(self):
-        for osc in self.osclist:
+        if self.config_only:
+            panic("Can't load modules for config_only LOV ", self.name)
+        for (osc, index, gen, active) in self.osclist:
             osc.load_module()
             break
         Module.load_module(self)
 
     def cleanup_module(self):
+        if self.config_only:
+            panic("Can't cleanup modules for config_only LOV ", self.name)
         Module.cleanup_module(self)
-        for osc in self.osclist:
-            osc.cleanup_module()
+        for (osc, index, gen, active) in self.osclist:
+            if active:
+                osc.cleanup_module()
             break
 
+    def correct_level(self, level, op=None):
+        return level
+
+class LMV(Module):
+    def __init__(self, db, uuid, fs_name, name_override = None):
+        Module.__init__(self, 'LMV', db)
+        if name_override != None:
+            self.name = "lmv_%s" % name_override
+        self.add_lustre_module('lmv', 'lmv')
+        self.devlist = self.db.get_refs('mds')
+        self.mdclist = []
+        self.desc_uuid = self.uuid
+        self.uuid = uuid
+        self.fs_name = fs_name
+        for mds_uuid in self.devlist:
+            mds = self.db.lookup(mds_uuid)
+           if not mds:
+               panic("MDS not found!")
+           mdc = MDC(mds, self.uuid, fs_name)
+           if mdc:
+                self.mdclist.append(mdc)
+            else:
+                panic('mdc not found:', mds_uuid)
+            
+    def prepare(self):
+        if is_prepared(self.name):
+            return
+        for mdc in self.mdclist:
+            try:
+                # Only ignore connect failures with --force, which
+                # isn't implemented here yet.
+                mdc.prepare(ignore_connect_failure=0)
+            except CommandError, e:
+                print "Error preparing LMV %s\n" % mdc.uuid
+                raise e
+        lctl.lmv_setup(self.name, self.uuid, self.desc_uuid,
+                       string.join(self.devlist))
+
+    def cleanup(self):
+        for mdc in self.mdclist:
+            mdc.cleanup()
+        if is_prepared(self.name):
+            Module.cleanup(self)
+
+    def load_module(self):
+        for mdc in self.mdclist:
+            mdc.load_module()
+            break
+        Module.load_module(self)
+
+    def cleanup_module(self):
+        Module.cleanup_module(self)
+        for mdc in self.mdclist:
+            mdc.cleanup_module()
+            break
+
+    def correct_level(self, level, op=None):
+        return level
+
 class MDSDEV(Module):
     def __init__(self,db):
         Module.__init__(self, 'MDSDEV', db)
         self.devpath = self.db.get_val('devpath','')
+        self.backdevpath = self.db.get_val('backdevpath','')
         self.size = self.db.get_val_int('devsize', 0)
         self.journal_size = self.db.get_val_int('journalsize', 0)
         self.fstype = self.db.get_val('fstype', '')
+        self.backfstype = self.db.get_val('backfstype', '')
         self.nspath = self.db.get_val('nspath', '')
-       self.mkfsoptions = self.db.get_val('mkfsoptions', '')
-        # overwrite the orignal MDSDEV name and uuid with the MDS name and uuid
+        self.mkfsoptions = self.db.get_val('mkfsoptions', '')
+        self.mountfsoptions = self.db.get_val('mountfsoptions', '')
+        self.cachetype = self.db.get_val('cachetype', '')
+       # overwrite the orignal MDSDEV name and uuid with the MDS name and uuid
         target_uuid = self.db.get_first_ref('target')
         mds = self.db.lookup(target_uuid)
         self.name = mds.getName()
         self.filesystem_uuids = mds.get_refs('filesystem')
+       self.lmv_uuid = ''
+       self.lmv = ''
+        self.master_mds = ""
+       if not self.filesystem_uuids:
+           self.lmv_uuid = self.db.get_first_ref('lmv')
+           if not self.lmv_uuid:
+               panic("ALERT: can't find lvm uuid")
+           if self.lmv_uuid:
+               self.lmv = self.db.lookup(self.lmv_uuid)
+               if self.lmv:
+                   self.filesystem_uuids = self.lmv.get_refs('filesystem')
+                    self.master_mds = self.lmv_uuid
         # FIXME: if fstype not set, then determine based on kernel version
         self.format = self.db.get_val('autoformat', "no")
         if mds.get_val('failover', 0):
@@ -1336,26 +1526,94 @@ class MDSDEV(Module):
             self.active = 1
         else:
             self.active = 0
-        if self.active and config.group and config.group != ost.get_val('group'):
+        if self.active and config.group and config.group != mds.get_val('group'):
             self.active = 0
 
+        self.inode_size = self.db.get_val_int('inodesize', 0)
+        if self.inode_size == 0:
+            # find the LOV for this MDS
+            lovconfig_uuid = mds.get_first_ref('lovconfig')
+            if not lovconfig_uuid:
+                if not self.lmv_uuid:
+                    panic("No LOV found for lovconfig ", lovconfig.name)
+
+               if not self.lmv:
+                   panic("No LMV initialized and not lovconfig_uuid found")
+                   
+                lovconfig_uuid = self.lmv.get_first_ref('lovconfig')
+                lovconfig = self.lmv.lookup(lovconfig_uuid)
+                lov_uuid = lovconfig.get_first_ref('lov')
+                if not lov_uuid:
+                    panic("No LOV found for lovconfig ", lovconfig.name)
+           else:
+                lovconfig = mds.lookup(lovconfig_uuid)
+                lov_uuid = lovconfig.get_first_ref('lov')
+                if not lov_uuid:
+                    panic("No LOV found for lovconfig ", lovconfig.name)
+
+               if self.lmv:
+                   lovconfig_uuid = self.lmv.get_first_ref('lovconfig')
+                   lovconfig = self.lmv.lookup(lovconfig_uuid)
+                   lov_uuid = lovconfig.get_first_ref('lov')
+
+            lov = LOV(self.db.lookup(lov_uuid), lov_uuid, 'FS_name', config_only = 1)
+
+            # default stripe count controls default inode_size
+            stripe_count = lov.stripe_cnt
+            if stripe_count > 77:
+                self.inode_size = 4096
+            elif stripe_count > 35:
+                self.inode_size = 2048
+            elif stripe_count > 13:
+                self.inode_size = 1024
+            elif stripe_count > 3:
+                self.inode_size = 512
+            else:
+                self.inode_size = 256
+
         self.target_dev_uuid = self.uuid
         self.uuid = target_uuid
+       # setup LMV
+       if self.master_mds:
+            client_uuid = generate_client_uuid(self.name)
+           client_uuid = self.name + "_lmv_" + "UUID"
+           self.master = LMV(self.db.lookup(self.lmv_uuid), client_uuid, self.name, self.name)
+           self.master_mds = self.master.name
+
         # modules
         self.add_lustre_module('mdc', 'mdc')
         self.add_lustre_module('osc', 'osc')
         self.add_lustre_module('lov', 'lov')
+        self.add_lustre_module('lmv', 'lmv')
+        self.add_lustre_module('ost', 'ost')
         self.add_lustre_module('mds', 'mds')
+
+        if self.fstype == 'smfs':
+            self.add_lustre_module('smfs', 'smfs')
+        
+        if self.fstype == 'ldiskfs':
+            self.add_lustre_module('ldiskfs', 'ldiskfs')
+
         if self.fstype:
             self.add_lustre_module('lvfs', 'fsfilt_%s' % (self.fstype))
             
-
+        # if fstype is smfs, then we should also take care about backing 
+        # store fs.
+        if self.fstype == 'smfs':
+            self.add_lustre_module('lvfs', 'fsfilt_%s' % (self.backfstype))
+
+       for options in string.split(self.mountfsoptions, ','):
+           if options == 'snap':
+               if not self.fstype == 'smfs':
+                   panic("mountoptions with snap, but fstype is not smfs\n")
+               self.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.fstype))
+               self.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.backfstype))
     def load_module(self):
         if self.active:
             Module.load_module(self)
-            
+
     def prepare(self):
-        if is_prepared(self.name):
+        if not config.record and is_prepared(self.name):
             return
         if not self.active:
             debug(self.uuid, "not active")
@@ -1365,14 +1623,56 @@ class MDSDEV(Module):
             self.write_conf()
         self.info(self.devpath, self.fstype, self.size, self.format)
         run_acceptors()
+       # prepare LMV
+       if self.master_mds:
+             self.master.prepare()
         # never reformat here
         blkdev = block_dev(self.devpath, self.size, self.fstype, 0,
-                           self.format, self.journal_size, self.mkfsoptions)
+                           self.format, self.journal_size, self.inode_size,
+                           self.mkfsoptions, self.backfstype, self.backdevpath)
+        
         if not is_prepared('MDT'):
             lctl.newdev("mdt", 'MDT', 'MDT_UUID', setup ="")
         try: 
-            lctl.newdev("mds", self.name, self.uuid,
-                        setup ="%s %s %s" %(blkdev, self.fstype, self.name))
+            mountfsoptions = def_mount_options(self.fstype, 'mds')
+            
+            if config.mountfsoptions:
+                if mountfsoptions:
+                    mountfsoptions = mountfsoptions + ',' + config.mountfsoptions
+                else:
+                    mountfsoptions = config.mountfsoptions
+                if self.mountfsoptions:
+                    mountfsoptions = mountfsoptions + ',' + self.mountfsoptions
+            else:
+                if self.mountfsoptions:
+                    if mountfsoptions:
+                        mountfsoptions = mountfsoptions + ',' + self.mountfsoptions
+                    else:
+                        mountfsoptions = self.mountfsoptions
+            
+            if self.fstype == 'smfs':
+                realdev = self.fstype
+                
+                if mountfsoptions:
+                    mountfsoptions = "%s,type=%s,dev=%s" % (mountfsoptions, 
+                                                            self.backfstype, 
+                                                            blkdev)
+                else:
+                    mountfsoptions = "type=%s,dev=%s" % (self.backfstype, 
+                                                         blkdev)
+            else:
+                realdev = blkdev
+                
+            print 'MDS mount options: ' + mountfsoptions
+            
+           if not self.master_mds:
+                self.master_mds = 'dumb'           
+            if not self.cachetype:
+                self.cachetype = 'dumb'
+           lctl.newdev("mds", self.name, self.uuid,
+                        setup ="%s %s %s %s %s %s" %(realdev, self.fstype, 
+                                               self.name, mountfsoptions,
+                                               self.master_mds, self.cachetype))
         except CommandError, e:
             if e.rc == 2:
                 panic("MDS is missing the config log. Need to run " +
@@ -1381,38 +1681,103 @@ class MDSDEV(Module):
                 raise e
 
     def write_conf(self):
-        if is_prepared(self.name):
-            return
-        self.info(self.devpath, self.fstype, self.format)
-        blkdev = block_dev(self.devpath, self.size, self.fstype,
-                           config.reformat, self.format, self.journal_size,
-                           self.mkfsoptions)
-        lctl.newdev("mds", self.name, self.uuid,
-                    setup ="%s %s" %(blkdev, self.fstype))
+        do_cleanup = 0
+        if not is_prepared(self.name):
+            self.info(self.devpath, self.fstype, self.format)
+
+            blkdev = block_dev(self.devpath, self.size, self.fstype,
+                               config.reformat, self.format, self.journal_size,
+                               self.inode_size, self.mkfsoptions,
+                               self.backfstype, self.backdevpath)
+
+            # Even for writing logs we mount mds with supplied mount options
+            # because it will not mount smfs (if used) otherwise.
+
+            mountfsoptions = def_mount_options(self.fstype, 'mds')
+
+            if config.mountfsoptions:
+                if mountfsoptions:
+                    mountfsoptions = mountfsoptions + ',' + config.mountfsoptions
+                else:
+                    mountfsoptions = config.mountfsoptions
+                if self.mountfsoptions:
+                    mountfsoptions = mountfsoptions + ',' + self.mountfsoptions
+            else:
+                if self.mountfsoptions:
+                    if mountfsoptions:
+                        mountfsoptions = mountfsoptions + ',' + self.mountfsoptions
+                    else:
+                        mountfsoptions = self.mountfsoptions
+
+            if self.fstype == 'smfs':
+                realdev = self.fstype
                 
+                if mountfsoptions:
+                    mountfsoptions = "%s,type=%s,dev=%s" % (mountfsoptions, 
+                                                            self.backfstype, 
+                                                            blkdev)
+                else:
+                    mountfsoptions = "type=%s,dev=%s" % (self.backfstype, 
+                                                         blkdev)
+            else:
+                realdev = blkdev
+       
+                print 'MDS mount options: ' + mountfsoptions
+
+            # As mount options are passed by 4th param to config tool, we need 
+            # to pass something in 3rd param. But we do not want this 3rd param
+            # be counted as a profile name for reading log on MDS setup, thus,
+            # we pass there some predefined sign like 'dumb', which will be 
+            # checked in MDS code and skipped. Probably there is more nice way
+            # like pass empty string and check it in config tool and pass null
+            # as 4th param.
+            lctl.newdev("mds", self.name, self.uuid,
+                        setup ="%s %s %s %s" %(realdev, self.fstype, 
+                                               'dumb', mountfsoptions))
+            do_cleanup = 1
+
         # record logs for the MDS lov
         for uuid in self.filesystem_uuids:
             log("recording clients for filesystem:", uuid)
             fs = self.db.lookup(uuid)
-            obd_uuid = fs.get_first_ref('obd')
+
+            # this is ugly, should be organized nice later.
+            target_uuid = self.db.get_first_ref('target')
+            mds = self.db.lookup(target_uuid)
+            
+            lovconfig_uuid = mds.get_first_ref('lovconfig')
+            if lovconfig_uuid:
+                lovconfig = mds.lookup(lovconfig_uuid)
+                obd_uuid = lovconfig.get_first_ref('lov')
+            else:
+                obd_uuid = fs.get_first_ref('obd')
+                
             client_uuid = generate_client_uuid(self.name)
             client = VOSC(self.db.lookup(obd_uuid), client_uuid, self.name,
                           self.name)
             config.record = 1
+            lctl.clear_log(self.name, self.name)
             lctl.record(self.name, self.name)
             client.prepare()
             lctl.mount_option(self.name, client.get_name(), "")
             lctl.end_record()
+            process_updates(self.db, self.name, self.name, client)
 
             config.cleanup = 1
+            lctl.clear_log(self.name, self.name + '-clean')
             lctl.record(self.name, self.name + '-clean')
             client.cleanup()
             lctl.del_mount_option(self.name)
             lctl.end_record()
+            process_updates(self.db, self.name, self.name + '-clean', client)
             config.cleanup = 0
             config.record = 0
 
         # record logs for each client
+        if config.noexec:
+            noexec_opt = '-n'
+        else:
+            noexec_opt = ''
         if config.ldapurl:
             config_options = "--ldapurl " + config.ldapurl + " --config " + config.config
         else:
@@ -1429,9 +1794,7 @@ class MDSDEV(Module):
                         debug("recording", client_name)
                         old_noexec = config.noexec
                         config.noexec = 0
-                        noexec_opt = ('', '-n')
-                        ret, out = run (sys.argv[0],
-                                        noexec_opt[old_noexec == 1],
+                        ret, out = run (sys.argv[0], noexec_opt,
                                         " -v --record --nomod",
                                         "--record_log", client_name,
                                         "--record_device", self.name,
@@ -1439,8 +1802,7 @@ class MDSDEV(Module):
                                         config_options)
                         if config.verbose:
                             for s in out: log("record> ", string.strip(s))
-                        ret, out = run (sys.argv[0],
-                                        noexec_opt[old_noexec == 1],
+                        ret, out = run (sys.argv[0], noexec_opt,
                                         "--cleanup -v --record --nomod",
                                         "--record_log", client_name + "-clean",
                                         "--record_device", self.name,
@@ -1449,15 +1811,20 @@ class MDSDEV(Module):
                         if config.verbose:
                             for s in out: log("record> ", string.strip(s))
                         config.noexec = old_noexec
-        try:
-            lctl.cleanup(self.name, self.uuid, 0, 0)
-        except CommandError, e:
-            log(self.module_name, "cleanup failed: ", self.name)
-            e.dump()
-            cleanup_error(e.rc)
-            Module.cleanup(self)
-        clean_loop(self.devpath)
+        if do_cleanup:
+            try:
+                lctl.cleanup(self.name, self.uuid, 0, 0)
+            except CommandError, e:
+                log(self.module_name, "cleanup failed: ", self.name)
+                e.dump()
+                cleanup_error(e.rc)
+                Module.cleanup(self)
+        
+            if self.fstype == 'smfs':
+                clean_loop(self.backdevpath)
+            else:
+                clean_loop(self.devpath)
+
     def msd_remaining(self):
         out = lctl.device_list()
         for s in out:
@@ -1484,6 +1851,9 @@ class MDSDEV(Module):
                 e.dump()
                 cleanup_error(e.rc)
                 Module.cleanup(self)
+           # cleanup LMV
+           if self.master_mds:
+                self.master.cleanup()
         if not self.msd_remaining() and is_prepared('MDT'):
             try:
                 lctl.cleanup("MDT", "MDT_UUID", config.force,
@@ -1492,17 +1862,30 @@ class MDSDEV(Module):
                 print "cleanup failed: ", self.name
                 e.dump()
                 cleanup_error(e.rc)
-        clean_loop(self.devpath)
+        
+        if self.fstype == 'smfs':
+            clean_loop(self.backdevpath)
+        else:
+            clean_loop(self.devpath)
+
+    def correct_level(self, level, op=None):
+       #if self.master_mds:
+       #   level = level + 2
+        return level
 
 class OSD(Module):
     def __init__(self, db):
         Module.__init__(self, 'OSD', db)
         self.osdtype = self.db.get_val('osdtype')
         self.devpath = self.db.get_val('devpath', '')
+        self.backdevpath = self.db.get_val('backdevpath', '')
         self.size = self.db.get_val_int('devsize', 0)
         self.journal_size = self.db.get_val_int('journalsize', 0)
-        self.mkfsoptions = self.db.get_val_int('mkfsoptions', '')
+        self.inode_size = self.db.get_val_int('inodesize', 0)
+        self.mkfsoptions = self.db.get_val('mkfsoptions', '')
+        self.mountfsoptions = self.db.get_val('mountfsoptions', '')
         self.fstype = self.db.get_val('fstype', '')
+        self.backfstype = self.db.get_val('backfstype', '')
         self.nspath = self.db.get_val('nspath', '')
         target_uuid = self.db.get_first_ref('target')
         ost = self.db.lookup(target_uuid)
@@ -1522,14 +1905,28 @@ class OSD(Module):
             self.active = 0
         if self.active and config.group and config.group != ost.get_val('group'):
             self.active = 0
-            
+
         self.target_dev_uuid = self.uuid
         self.uuid = target_uuid
         # modules
         self.add_lustre_module('ost', 'ost')
-       # FIXME: should we default to ext3 here?
+        if self.fstype == 'smfs':
+            self.add_lustre_module('smfs', 'smfs')
+        # FIXME: should we default to ext3 here?
+        if self.fstype == 'ldiskfs':
+            self.add_lustre_module('ldiskfs', 'ldiskfs')
         if self.fstype:
             self.add_lustre_module('lvfs' , 'fsfilt_%s' % (self.fstype))
+        if self.fstype == 'smfs':
+            self.add_lustre_module('lvfs' , 'fsfilt_%s' % (self.backfstype))
+
+       for options in self.mountfsoptions:
+           if options == 'snap':
+               if not self.fstype == 'smfs':
+                   panic("mountoptions with snap, but fstype is not smfs\n")
+               self.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.fstype))
+               self.add_lustre_module('lvfs', 'fsfilt_snap_%s' % (self.backfstype))
+
         self.add_lustre_module(self.osdtype, self.osdtype)
 
     def load_module(self):
@@ -1546,17 +1943,51 @@ class OSD(Module):
             debug(self.uuid, "not active")
             return
         self.info(self.osdtype, self.devpath, self.size, self.fstype,
-                  self.format, self.journal_size)
+                  self.format, self.journal_size, self.inode_size)
         run_acceptors()
         if self.osdtype == 'obdecho':
             blkdev = ''
         else:
             blkdev = block_dev(self.devpath, self.size, self.fstype,
                                config.reformat, self.format, self.journal_size,
-                               self.mkfsoptions)
+                               self.inode_size, self.mkfsoptions, self.backfstype,
+                               self.backdevpath)
+
+        mountfsoptions = def_mount_options(self.fstype, 'ost')
+            
+        if config.mountfsoptions:
+            if mountfsoptions:
+                mountfsoptions = mountfsoptions + ',' + config.mountfsoptions
+            else:
+                mountfsoptions = config.mountfsoptions
+            if self.mountfsoptions:
+                mountfsoptions = mountfsoptions + ',' + self.mountfsoptions
+        else:
+            if self.mountfsoptions:
+                if mountfsoptions:
+                    mountfsoptions = mountfsoptions + ',' + self.mountfsoptions
+                else:
+                    mountfsoptions = self.mountfsoptions
+            
+        if self.fstype == 'smfs':
+            realdev = self.fstype
+                
+            if mountfsoptions:
+                mountfsoptions = "%s,type=%s,dev=%s" % (mountfsoptions, 
+                                                        self.backfstype, 
+                                                        blkdev)
+            else:
+                mountfsoptions = "type=%s,dev=%s" % (self.backfstype, 
+                                                     blkdev)
+        else:
+            realdev = blkdev
+                
+        print 'OSD mount options: ' + mountfsoptions
+        
         lctl.newdev(self.osdtype, self.name, self.uuid,
-                    setup ="%s %s %s" %(blkdev, self.fstype,
-                                           self.failover_ost))
+                    setup ="%s %s %s %s" %(realdev, self.fstype,
+                                           self.failover_ost, 
+                                           mountfsoptions))
         if not is_prepared('OSS'):
             lctl.newdev("ost", 'OSS', 'OSS_UUID', setup ="")
 
@@ -1594,14 +2025,20 @@ class OSD(Module):
                 e.dump()
                 cleanup_error(e.rc)
         if not self.osdtype == 'obdecho':
-            clean_loop(self.devpath)
+            if self.fstype == 'smfs':
+                clean_loop(self.backdevpath)
+            else:
+                clean_loop(self.devpath)
+
+    def correct_level(self, level, op=None):
+        return level
 
 def mgmt_uuid_for_fs(mtpt_name):
     if not mtpt_name:
         return ''
-    mtpt_db = toplevel.lookup_name(mtpt_name)
+    mtpt_db = toplustreDB.lookup_name(mtpt_name)
     fs_uuid = mtpt_db.get_first_ref('filesystem')
-    fs = toplevel.lookup(fs_uuid)
+    fs = toplustreDB.lookup(fs_uuid)
     if not fs:
         return ''
     return fs.get_first_ref('mgmt')
@@ -1613,11 +2050,12 @@ class Client(Module):
         self.target_name = tgtdb.getName()
         self.target_uuid = tgtdb.getUUID()
         self.db = tgtdb
+        self.active = 1
 
         self.tgt_dev_uuid = get_active_target(tgtdb)
         if not self.tgt_dev_uuid:
-            panic("No target device found for target:", self.target_name)
-            
+            panic("No target device found for target(1):", self.target_name)
+
         self.kmod = kmod(config.lustre, config.portals)
         self._server = None
         self._connected = 0
@@ -1646,13 +2084,14 @@ class Client(Module):
         self._server_nets = get_ost_net(self.db, srv_uuid)
         if len(self._server_nets) == 0:
             panic ("Unable to find a server for:", srv_uuid)
-
+    def get_name(self):
+        return self.name
     def get_servers(self):
         return self._server_nets
 
     def prepare(self, ignore_connect_failure = 0):
         self.info(self.target_uuid)
-        if is_prepared(self.name):
+        if not config.record and is_prepared(self.name):
             self.cleanup()
         try:
             srv = choose_local_server(self.get_servers())
@@ -1668,7 +2107,7 @@ class Client(Module):
             if not ignore_connect_failure:
                 raise e
         if srv:
-            if self.target_uuid in config.inactive and self.permits_inactive():
+            if self.permits_inactive() and (self.target_uuid in config.inactive or self.active == 0):
                 debug("%s inactive" % self.target_uuid)
                 inactive_p = "inactive"
             else:
@@ -1693,20 +2132,30 @@ class Client(Module):
                 e.dump()
                 cleanup_error(e.rc)
 
+    def correct_level(self, level, op=None):
+        return level
+
+    def deactivate(self):
+        try:
+            lctl.deactivate(self.name)
+        except CommandError, e:
+            log(self.module_name, "deactivate failed: ", self.name)
+            e.dump()
+            cleanup_error(e.rc)
 
 class MDC(Client):
     def __init__(self, db, uuid, fs_name):
          Client.__init__(self, db, uuid, 'mdc', fs_name)
 
     def permits_inactive(self):
-       return 0
+        return 0
 
 class OSC(Client):
     def __init__(self, db, uuid, fs_name):
          Client.__init__(self, db, uuid, 'osc', fs_name)
 
     def permits_inactive(self):
-       return 1
+        return 1
 
 def mgmtcli_name_for_uuid(uuid):
     return 'MGMTCLI_%s' % uuid
@@ -1716,37 +2165,191 @@ class ManagementClient(Client):
         Client.__init__(self, db, uuid, 'mgmt_cli', '',
                         self_name = mgmtcli_name_for_uuid(db.getUUID()),
                         module_dir = 'mgmt')
-            
+class VLOV(Module):
+    def __init__(self, db, uuid, fs_name, name_override = None, config_only = None):
+        Module.__init__(self, 'VLOV', db)
+        if name_override != None:
+            self.name = "lov_%s" % name_override
+        self.add_lustre_module('lov', 'lov')
+        self.stripe_sz = 65536 
+        self.stripe_off = 0 
+        self.pattern =  0
+        self.stripe_cnt = 1 
+        self.desc_uuid = self.uuid
+        self.uuid = generate_client_uuid(self.name)
+        self.fs_name = fs_name
+        self.osc = get_osc(db, self.uuid, fs_name)
+        if not self.osc:        
+           panic('osc not found:', self.uuid)
+       if config_only:
+            self.config_only = 1
+            return
+        self.config_only = None
+    def get_uuid(self):
+        return self.uuid
+    def get_name(self):
+        return self.name
+    def prepare(self):
+        if not config.record and is_prepared(self.name):
+            return
+        lctl.lov_setup(self.name, self.uuid, self.desc_uuid, self.stripe_cnt,
+                       self.stripe_sz, self.stripe_off, self.pattern)
+        target_uuid = self.osc.target_uuid
+        try:
+           self.osc.active = 1 
+            self.osc.prepare(ignore_connect_failure=0)
+        except CommandError, e:
+            print "Error preparing OSC %s\n" % osc.uuid
+            raise e
+        lctl.lov_add_obd(self.name, self.uuid, target_uuid, 0, 1)
+
+    def cleanup(self):
+        target_uuid = self.osc.target_uuid
+        self.osc.cleanup()
+        if is_prepared(self.name):
+            Module.cleanup(self)
+        if self.config_only:
+            panic("Can't clean up config_only LOV ", self.name)
+
+    def load_module(self):
+        if self.config_only:
+            panic("Can't load modules for config_only LOV ", self.name)
+        self.osc.load_module()
+        Module.load_module(self)
+
+    def cleanup_module(self):
+        if self.config_only:
+            panic("Can't cleanup modules for config_only LOV ", self.name)
+        Module.cleanup_module(self)
+        self.osc.cleanup_module()
+
+    def correct_level(self, level, op=None):
+        return level
+
+class CMOBD(Module):
+    def __init__(self,db):
+       Module.__init__(self, 'CMOBD', db)
+       self.name = self.db.getName(); 
+       self.uuid = generate_client_uuid(self.name)
+       self.master_uuid = self.db.get_first_ref('masterobd')
+       self.cache_uuid = self.db.get_first_ref('cacheobd')
+       self.add_lustre_module('cmobd', 'cmobd')
+       master_obd = self.db.lookup(self.master_uuid)
+       if not master_obd:
+           panic('master obd not found:', self.master_uuid)
+       cache_obd = self.db.lookup(self.cache_uuid)
+       if not cache_obd:
+           panic('cache obd not found:', self.cache_uuid)
+       
+       if master_obd.get_class() == 'ost':
+           self.client_uuid = generate_client_uuid(self.name) 
+           self.master= VLOV(master_obd, self.client_uuid, self.name, 
+                           "%s_master" % (self.name))
+           self.master_uuid = self.master.get_uuid()
+       else:
+           self.master = get_mdc(db, self.name, self.master_uuid) 
+    # need to check /proc/mounts and /etc/mtab before
+    # formatting anything.
+    # FIXME: check if device is already formatted.
+    def prepare(self):
+        self.master.prepare()
+        if not config.record and is_prepared(self.name):
+            return
+        self.info(self.master_uuid, self.cache_uuid)
+        lctl.newdev("cmobd", self.name, self.uuid,
+                    setup ="%s %s" %(self.master_uuid,
+                                     self.cache_uuid))
+
+    def cleanup(self):
+        if is_prepared(self.name):
+            Module.cleanup(self)
+        self.master.cleanup()
+
+    def load_module(self):
+        self.master.load_module()
+        Module.load_module(self)
+
+    def cleanup_module(self):
+        Module.cleanup_module(self)
+        self.master.cleanup_module()
+                                                                                                                                                                                                     
+    def correct_level(self, level, op=None):
+        return level
+
 class COBD(Module):
-    def __init__(self, db):
+    def __init__(self, db, uuid, name, type, name_override = None):
         Module.__init__(self, 'COBD', db)
+        self.name = self.db.getName(); 
+        self.uuid = generate_client_uuid(self.name)
         self.real_uuid = self.db.get_first_ref('realobd')
         self.cache_uuid = self.db.get_first_ref('cacheobd')
-        self.add_lustre_module('cobd' , 'cobd')
-
+        self.add_lustre_module('cobd', 'cobd')
+        real_obd = self.db.lookup(self.real_uuid)
+        if not real_obd:
+            panic('real obd not found:', self.real_uuid)
+        cache_obd = self.db.lookup(self.cache_uuid)
+        if not cache_obd:
+            panic('cache obd not found:', self.cache_uuid)
+        if type == 'obd':
+            self.real = LOV(real_obd, self.real_uuid, name, 
+                            "%s_real" % (self.name));
+            self.cache = LOV(cache_obd, self.cache_uuid, name, 
+                            "%s_cache" % (self.name));
+        else:
+            self.real = get_mdc(db,  name, self.real_uuid) 
+            self.cache = get_mdc(db, name, self.cache_uuid) 
     # need to check /proc/mounts and /etc/mtab before
     # formatting anything.
     # FIXME: check if device is already formatted.
+    def get_uuid(self):
+        return self.uuid
+    def get_name(self):
+        return self.name
+    def get_real_name(self):
+        return self.real.name
+    def get_cache_name(self):
+        return self.cache.name
     def prepare(self):
-        if is_prepared(self.name):
+        self.real.prepare()
+        self.cache.prepare()
+        if not config.record and is_prepared(self.name):
             return
         self.info(self.real_uuid, self.cache_uuid)
         lctl.newdev("cobd", self.name, self.uuid,
-                    setup ="%s %s" %(self.real_uuid, self.cache_uuid))
+                    setup ="%s %s" %(self.real.name,
+                                     self.cache.name))
 
+    def cleanup(self):
+        if is_prepared(self.name):
+            Module.cleanup(self)
+        self.real.cleanup()
+        self.cache.cleanup()
+
+    def load_module(self):
+        self.real.load_module()
+        Module.load_module(self)
+
+    def cleanup_module(self):
+        Module.cleanup_module(self)
+        self.real.cleanup_module()
 
 # virtual interface for  OSC and LOV
 class VOSC(Module):
-    def __init__(self, db, uuid, fs_name, name_override = None):
+    def __init__(self, db, client_uuid, name, name_override = None):
         Module.__init__(self, 'VOSC', db)
         if db.get_class() == 'lov':
-            self.osc = LOV(db, uuid, fs_name, name_override)
+            self.osc = LOV(db, client_uuid, name, name_override)
+            self.type = 'lov'
+        elif db.get_class() == 'cobd':
+            self.osc = COBD(db, client_uuid, name, 'obd')
+            self.type = 'cobd'
         else:
-            self.osc = get_osc(db, uuid, fs_name)
+            self.osc = OSC(db, client_uuid, name)
+            self.type = 'osc'
     def get_uuid(self):
-        return self.osc.uuid
+        return self.osc.get_uuid()
     def get_name(self):
-        return self.osc.name
+        return self.osc.get_name()
     def prepare(self):
         self.osc.prepare()
     def cleanup(self):
@@ -1755,7 +2358,33 @@ class VOSC(Module):
         self.osc.load_module()
     def cleanup_module(self):
         self.osc.cleanup_module()
-
+    def correct_level(self, level, op=None):
+        return self.osc.correct_level(level, op)
+
+# virtual interface for MDC and LMV
+class VMDC(Module):
+    def __init__(self, db, client_uuid, name, name_override = None):
+        Module.__init__(self, 'VMDC', db)
+        if db.get_class() == 'lmv':
+            self.mdc = LMV(db, client_uuid, name)
+        elif db.get_class() == 'cobd':
+            self.mdc = COBD(db, client_uuid, name, 'mds')
+        else:
+            self.mdc = MDC(db, client_uuid, name)
+    def get_uuid(self):
+        return self.mdc.uuid
+    def get_name(self):
+        return self.mdc.name
+    def prepare(self):
+        self.mdc.prepare()
+    def cleanup(self):
+        self.mdc.cleanup()
+    def load_module(self):
+        self.mdc.load_module()
+    def cleanup_module(self):
+        self.mdc.cleanup_module()
+    def correct_level(self, level, op=None):
+        return self.mdc.correct_level(level, op)
 
 class ECHO_CLIENT(Module):
     def __init__(self,db):
@@ -1767,7 +2396,7 @@ class ECHO_CLIENT(Module):
         self.osc = VOSC(obd, self.uuid, self.name)
 
     def prepare(self):
-        if is_prepared(self.name):
+        if not config.record and is_prepared(self.name):
             return
         run_acceptors()
         self.osc.prepare() # XXX This is so cheating. -p
@@ -1789,6 +2418,8 @@ class ECHO_CLIENT(Module):
         Module.cleanup_module(self)
         self.osc.cleanup_module()
 
+    def correct_level(self, level, op=None):
+        return level
 
 def generate_client_uuid(name):
         client_uuid = '%05x_%.19s_%05x%05x' % (int(random.random() * 1048576),
@@ -1797,23 +2428,35 @@ def generate_client_uuid(name):
                                                int(random.random() * 1048576))
         return client_uuid[:36]
 
-
 class Mountpoint(Module):
     def __init__(self,db):
         Module.__init__(self, 'MTPT', db)
         self.path = self.db.get_val('path')
+       self.clientoptions = self.db.get_val('clientoptions', '')
         self.fs_uuid = self.db.get_first_ref('filesystem')
         fs = self.db.lookup(self.fs_uuid)
-        self.mds_uuid = fs.get_first_ref('mds')
+        self.mds_uuid = fs.get_first_ref('lmv')
+       if not self.mds_uuid:
+           self.mds_uuid = fs.get_first_ref('mds')
         self.obd_uuid = fs.get_first_ref('obd')
         self.mgmt_uuid = fs.get_first_ref('mgmt')
-        obd = self.db.lookup(self.obd_uuid)
         client_uuid = generate_client_uuid(self.name)
-        self.vosc = VOSC(obd, client_uuid, self.name)
-        self.mdc = get_mdc(db, client_uuid, self.name, self.mds_uuid)
 
+        ost = self.db.lookup(self.obd_uuid)
+        if not ost:
+            panic("no ost: ", self.obd_uuid)
+            
+        mds = self.db.lookup(self.mds_uuid)
+       if not mds:
+           panic("no mds: ", self.mds_uuid)
+       
         self.add_lustre_module('mdc', 'mdc')
+        self.add_lustre_module('lmv', 'lmv')
         self.add_lustre_module('llite', 'llite')
+        
+        self.vosc = VOSC(ost, client_uuid, self.name)
+       self.vmdc = VMDC(mds, client_uuid, self.name)
+        
         if self.mgmt_uuid:
             self.mgmtcli = ManagementClient(db.lookup(self.mgmt_uuid),
                                             client_uuid)
@@ -1821,26 +2464,41 @@ class Mountpoint(Module):
             self.mgmtcli = None
 
     def prepare(self):
-        if fs_is_mounted(self.path):
+        if not config.record and fs_is_mounted(self.path):
             log(self.path, "already mounted.")
             return
         run_acceptors()
         if self.mgmtcli:
             self.mgmtcli.prepare()
         self.vosc.prepare()
-        self.mdc.prepare()
-        mdc_name = self.mdc.name
+        self.vmdc.prepare()
+        vmdc_name = self.vmdc.get_name()
 
         self.info(self.path, self.mds_uuid, self.obd_uuid)
         if config.record or config.lctl_dump:
-            lctl.mount_option(local_node_name, self.vosc.get_name(), mdc_name)
+            lctl.mount_option(local_node_name, self.vosc.get_name(), vmdc_name)
             return
-        cmd = "mount -t lustre_lite -o osc=%s,mdc=%s %s %s" % \
-              (self.vosc.get_name(), mdc_name, config.config, self.path)
+
+        if config.clientoptions:
+            if self.clientoptions:
+                self.clientoptions = self.clientoptions + ',' + \
+                                    config.clientoptions
+            else:
+                self.clientoptions = config.clientoptions
+        if self.clientoptions:
+            self.clientoptions = ',' + self.clientoptions
+            # Linux kernel will deal with async and not pass it to ll_fill_super,
+            # so replace it with Lustre async
+            self.clientoptions = string.replace(self.clientoptions, "async", 
+                                               "lasync")
+
+        cmd = "mount -t lustre_lite -o osc=%s,mdc=%s%s %s %s" % \
+              (self.vosc.get_name(), vmdc_name, self.clientoptions, 
+              config.config, self.path)
         run("mkdir", self.path)
         ret, val = run(cmd)
         if ret:
-            self.mdc.cleanup()            
+            self.vmdc.cleanup()            
             self.vosc.cleanup()
             panic("mount failed:", self.path, ":", string.join(val))
 
@@ -1861,7 +2519,7 @@ class Mountpoint(Module):
             if fs_is_mounted(self.path):
                 panic("fs is still mounted:", self.path)
 
-        self.mdc.cleanup()
+        self.vmdc.cleanup()
         self.vosc.cleanup()
         if self.mgmtcli:
             self.mgmtcli.cleanup()
@@ -1878,6 +2536,8 @@ class Mountpoint(Module):
         if self.mgmtcli:
             self.mgmtcli.cleanup_module()
 
+    def correct_level(self, level, op=None):
+        return level
 
 # ============================================================
 # misc query functions
@@ -1891,14 +2551,14 @@ def get_ost_net(self, osd_uuid):
     node = self.lookup(node_uuid)
     if not node:
         panic("unable to find node for osd_uuid:", osd_uuid,
-              " node_ref:", node_uuid)
+              " node_ref:", node_uuid_)
     for net_uuid in node.get_networks():
         db = node.lookup(net_uuid)
         srv_list.append(Network(db))
     return srv_list
 
 
-# the order of iniitailization is based on level. 
+# the order of iniitailization is based on level.
 def getServiceLevel(self):
     type = self.get_class()
     ret=0;
@@ -1914,13 +2574,17 @@ def getServiceLevel(self):
         ret = 30
     elif type in ('mdsdev',):
         ret = 40
+    elif type in ('lmv',):
+        ret = 45
+    elif type in ('cmobd',):
+        ret = 50 
     elif type in ('mountpoint', 'echoclient'):
         ret = 70
     else:
         panic("Unknown type: ", type)
 
     if ret < config.minlevel or ret > config.maxlevel:
-        ret = 0 
+        ret = 0
     return ret
 
 #
@@ -1928,7 +2592,7 @@ def getServiceLevel(self):
 # [(level, db_object),]
 def getServices(self):
     list = []
-    for ref_class, ref_uuid in self.get_all_refs(): 
+    for ref_class, ref_uuid in self.get_all_refs():
             servdb = self.lookup(ref_uuid)
             if  servdb:
                 level = getServiceLevel(servdb)
@@ -1942,7 +2606,7 @@ def getServices(self):
 
 
 ############################################################
-# MDC UUID hack - 
+# MDC UUID hack -
 # FIXME: clean this mess up!
 #
 # OSC is no longer in the xml, so we have to fake it.
@@ -1951,16 +2615,15 @@ def get_osc(ost_db, uuid, fs_name):
     osc = OSC(ost_db, uuid, fs_name)
     return osc
 
-def get_mdc(db, uuid, fs_name, mds_uuid):
+def get_mdc(db, fs_name, mds_uuid):
     mds_db = db.lookup(mds_uuid);
     if not mds_db:
-        panic("no mds:", mds_uuid)
-    mdc = MDC(mds_db, uuid, fs_name)
+        error("no mds:", mds_uuid)
+    mdc = MDC(mds_db, mds_uuid, fs_name)
     return mdc
 
 ############################################################
 # routing ("rooting")
-
 # list of (nettype, cluster_id, nid)
 local_clusters = []
 
@@ -2052,7 +2715,7 @@ def find_route(srv_list):
             if  (r[3] <= to and to <= r[4]) and cluster_id == r[2]:
                 result.append((srv, r))
     return result
-           
+
 def get_active_target(db):
     target_uuid = db.getUUID()
     target_name = db.getName()
@@ -2068,7 +2731,7 @@ def get_server_by_nid_uuid(db,  nid_uuid):
         net = Network(n)
         if net.nid_uuid == nid_uuid:
             return net
-        
+
 
 ############################################################
 # lconf level logic
@@ -2088,7 +2751,9 @@ def newService(db):
     elif type == 'osd':
         n = OSD(db)
     elif type == 'cobd':
-        n = COBD(db)
+        n = COBD(db, "YOU_SHOULD_NEVER_SEE_THIS_UUID")
+    elif type == 'cmobd':
+        n = CMOBD(db)
     elif type == 'mdsdev':
         n = MDSDEV(db)
     elif type == 'mountpoint':
@@ -2097,13 +2762,15 @@ def newService(db):
         n = ECHO_CLIENT(db)
     elif type == 'mgmt':
         n = Management(db)
+    elif type == 'lmv':
+        n = LMV(db)
     else:
         panic ("unknown service type:", type)
     return n
 
 #
 # Prepare the system to run lustre using a particular profile
-# in a the configuration. 
+# in a the configuration.
 #  * load & the modules
 #  * setup networking for the current node
 #  * make sure partitions are in place and prepared
@@ -2116,10 +2783,144 @@ def for_each_profile(db, prof_list, operation):
             panic("profile:", profile, "not found.")
         services = getServices(prof_db)
         operation(services)
-        
+
+def magic_get_osc(db, rec, lov):
+    if lov:
+        lov_uuid = lov.get_uuid()
+        lov_name = lov.osc.fs_name
+    else:
+        lov_uuid = rec.getAttribute('lov_uuidref')
+        # FIXME: better way to find the mountpoint?
+        filesystems = db.root_node.getElementsByTagName('filesystem')
+        fsuuid = None
+        for fs in filesystems:
+            ref = fs.getElementsByTagName('obd_ref')
+            if ref[0].getAttribute('uuidref') == lov_uuid:
+                fsuuid = fs.getAttribute('uuid')
+                break
+
+        if not fsuuid:
+            panic("malformed xml: lov uuid '" + lov_uuid + "' referenced in 'add' record is not used by any filesystems.")
+
+        mtpts = db.root_node.getElementsByTagName('mountpoint')
+        lov_name = None
+        for fs in mtpts:
+            ref = fs.getElementsByTagName('filesystem_ref')
+            if ref[0].getAttribute('uuidref') == fsuuid:
+                lov_name = fs.getAttribute('name')
+                break
+
+        if not lov_name:
+            panic("malformed xml: 'add' record references lov uuid '" + lov_uuid + "', which references filesystem uuid '" + fsuuid + "', which does not reference a mountpoint.")
+
+    print "lov_uuid: " + lov_uuid + "; lov_name: " + lov_name
+
+    ost_uuid = rec.getAttribute('ost_uuidref')
+    obd = db.lookup(ost_uuid)
+
+    if not obd:
+        panic("malformed xml: 'add' record references ost uuid '" + ost_uuid + "' which cannot be found.")
+
+    osc = get_osc(obd, lov_uuid, lov_name)
+    if not osc:
+        panic('osc not found:', obd_uuid)
+    return osc
+
+# write logs for update records.  sadly, logs of all types -- and updates in
+# particular -- are something of an afterthought.  lconf needs rewritten with
+# these as core concepts.  so this is a pretty big hack.
+def process_update_record(db, update, lov):
+    for rec in update.childNodes:
+        if rec.nodeType != rec.ELEMENT_NODE:
+            continue
+
+        log("found "+rec.nodeName+" record in update version " +
+            str(update.getAttribute('version')))
+
+        lov_uuid = rec.getAttribute('lov_uuidref')
+        ost_uuid = rec.getAttribute('ost_uuidref')
+        index = rec.getAttribute('index')
+        gen = rec.getAttribute('generation')
+
+        if not lov_uuid or not ost_uuid or not index or not gen:
+            panic("malformed xml: 'update' record requires lov_uuid, ost_uuid, index, and generation.")
+
+        if not lov:
+            tmplov = db.lookup(lov_uuid)
+            if not tmplov:
+                panic("malformed xml: 'delete' record contains lov UUID '" + lov_uuid + "', which cannot be located.")
+            lov_name = tmplov.getName()
+        else:
+            lov_name = lov.osc.name
+
+        # ------------------------------------------------------------- add
+        if rec.nodeName == 'add':
+            if config.cleanup:
+                lctl.lov_del_obd(lov_name, lov_uuid, ost_uuid, index, gen)
+                continue
+
+            osc = magic_get_osc(db, rec, lov)
+
+            try:
+                # Only ignore connect failures with --force, which
+                # isn't implemented here yet.
+                osc.prepare(ignore_connect_failure=0)
+            except CommandError, e:
+                print "Error preparing OSC %s\n" % osc.uuid
+                raise e
+
+            lctl.lov_add_obd(lov_name, lov_uuid, ost_uuid, index, gen)
+
+        # ------------------------------------------------------ deactivate
+        elif rec.nodeName == 'deactivate':
+            if config.cleanup:
+                continue
+
+            osc = magic_get_osc(db, rec, lov)
+
+            try:
+                osc.deactivate()
+            except CommandError, e:
+                print "Error deactivating OSC %s\n" % osc.uuid
+                raise e
+
+        # ---------------------------------------------------------- delete
+        elif rec.nodeName == 'delete':
+            if config.cleanup:
+                continue
+
+            osc = magic_get_osc(db, rec, lov)
+
+            try:
+                config.cleanup = 1
+                osc.cleanup()
+                config.cleanup = 0
+            except CommandError, e:
+                print "Error cleaning up OSC %s\n" % osc.uuid
+                raise e
+
+            lctl.lov_del_obd(lov_name, lov_uuid, ost_uuid, index, gen)
+
+def process_updates(db, log_device, log_name, lov = None):
+    updates = db.root_node.getElementsByTagName('update')
+    for u in updates:
+        if not u.childNodes:
+            log("ignoring empty update record (version " +
+                str(u.getAttribute('version')) + ")")
+            continue
+
+        version = u.getAttribute('version')
+        real_name = "%s-%s" % (log_name, version)
+        lctl.clear_log(log_device, real_name)
+        lctl.record(log_device, real_name)
+
+        process_update_record(db, u, lov)
+
+        lctl.end_record()
+
 def doWriteconf(services):
-    if config.nosetup:
-        return
+    #if config.nosetup:
+    #    return
     for s in services:
         if s[1].get_class() == 'mdsdev':
             n = newService(s[1])
@@ -2128,10 +2929,19 @@ def doWriteconf(services):
 def doSetup(services):
     if config.nosetup:
         return
+    slist = []
     for s in services:
         n = newService(s[1])
-        n.prepare()
-    
+       n.level = s[0]
+       slist.append((n.level, n))
+    nlist = []
+    for n in slist:
+       nl = n[1].correct_level(n[0])
+       nlist.append((nl, n[1]))
+    nlist.sort()
+    for n in nlist:
+        n[1].prepare()
+
 def doModules(services):
     if config.nomod:
         return
@@ -2142,11 +2952,20 @@ def doModules(services):
 def doCleanup(services):
     if config.nosetup:
         return
-    services.reverse()
+    slist = []
     for s in services:
         n = newService(s[1])
-        if n.safe_to_clean():
-            n.cleanup()
+       n.level = s[0]
+       slist.append((n.level, n))
+    nlist = []
+    for n in slist:
+       nl = n[1].correct_level(n[0])
+       nlist.append((nl, n[1]))
+    nlist.sort()
+    nlist.reverse()
+    for n in nlist:
+        if n[1].safe_to_clean():
+            n[1].cleanup()
 
 def doUnloadModules(services):
     if config.nomod:
@@ -2158,7 +2977,7 @@ def doUnloadModules(services):
             n.cleanup_module()
 
 #
-# Load profile for 
+# Load profile for
 def doHost(lustreDB, hosts):
     global is_router, local_node_name
     node_db = None
@@ -2167,15 +2986,16 @@ def doHost(lustreDB, hosts):
         if node_db:
             break
     if not node_db:
-        print 'No host entry found.'
-        return
+        panic('No host entry found.')
 
     local_node_name = node_db.get_val('name', 0)
     is_router = node_db.get_val_int('router', 0)
     lustre_upcall = node_db.get_val('lustreUpcall', '')
     portals_upcall = node_db.get_val('portalsUpcall', '')
     timeout = node_db.get_val_int('timeout', 0)
-    
+    ptldebug = node_db.get_val('ptldebug', '')
+    subsystem = node_db.get_val('subsystem', '')
+
     find_local_clusters(node_db)
     if not is_router:
         find_local_routes(lustreDB)
@@ -2185,6 +3005,7 @@ def doHost(lustreDB, hosts):
     prof_list = node_db.get_refs('profile')
 
     if config.write_conf:
+        lustreDB.close()
         for_each_profile(node_db, prof_list, doModules)
         sys_make_devices()
         for_each_profile(node_db, prof_list, doWriteconf)
@@ -2206,13 +3027,14 @@ def doHost(lustreDB, hosts):
             return
 
         sys_set_timeout(timeout)
-        sys_set_ptldebug()
-        sys_set_subsystem()
+        sys_set_ptldebug(ptldebug)
+        sys_set_subsystem(subsystem)
         sys_set_lustre_upcall(lustre_upcall)
         sys_set_portals_upcall(portals_upcall)
 
         for_each_profile(node_db, prof_list, doCleanup)
         for_each_profile(node_db, prof_list, doUnloadModules)
+        lustreDB.close()
 
     else:
         # ugly hack, only need to run lctl commands for --dump
@@ -2229,8 +3051,8 @@ def doHost(lustreDB, hosts):
         for_each_profile(node_db, prof_list, doModules)
 
         sys_set_debug_path()
-        sys_set_ptldebug()
-        sys_set_subsystem()
+        sys_set_ptldebug(ptldebug)
+        sys_set_subsystem(subsystem)
         script = config.gdb_script
         run(lctl.lctl, ' modules >', script)
         if config.gdb:
@@ -2243,22 +3065,24 @@ def doHost(lustreDB, hosts):
         sys_set_portals_upcall(portals_upcall)
 
         for_each_profile(node_db, prof_list, doSetup)
+        lustreDB.close()
 
-def doRecovery(db, lctl, tgt_uuid, client_uuid, nid_uuid):
-    tgt = db.lookup(tgt_uuid)
+def doRecovery(lustreDB, lctl, tgt_uuid, client_uuid, nid_uuid):
+    tgt = lustreDB.lookup(tgt_uuid)
     if not tgt:
         raise Lustre.LconfError("doRecovery: "+ tgt_uuid +" not found.")
     new_uuid = get_active_target(tgt)
     if not new_uuid:
         raise Lustre.LconfError("doRecovery: no active target found for: " +
                                 tgt_uuid)
-    net = choose_local_server(get_ost_net(db, new_uuid))
+    net = choose_local_server(get_ost_net(lustreDB, new_uuid))
     if not net:
         raise Lustre.LconfError("Unable to find a connection to:" + new_uuid)
 
     log("Reconnecting", tgt_uuid, " to ",  net.nid_uuid);
     try:
-        oldnet = get_server_by_nid_uuid(db, nid_uuid)
+        oldnet = get_server_by_nid_uuid(lustreDB, nid_uuid)
+        lustreDB.close()
         if oldnet:
             lctl.disconnect(oldnet)
     except CommandError, e:
@@ -2278,6 +3102,7 @@ def setupModulePath(cmd, portals_dir = PORTALS_DIR):
     base = os.path.dirname(cmd)
     if development_mode():
         if not config.lustre:
+            debug('using objdir module paths')
             config.lustre = (os.path.join(base, ".."))
         # normalize the portals dir, using command line arg if set
         if config.portals:
@@ -2287,7 +3112,7 @@ def setupModulePath(cmd, portals_dir = PORTALS_DIR):
         debug('config.portals', config.portals)
     elif config.lustre and config.portals:
         # production mode
-        # if --lustre and --portals, normalize portals 
+        # if --lustre and --portals, normalize portals
         # can ignore POTRALS_DIR here, since it is probly useless here
         config.portals = os.path.join(config.lustre, config.portals)
         debug('config.portals B', config.portals)
@@ -2337,22 +3162,31 @@ def sys_tweak_socknal ():
         sysctl("socknal/typed", 0)
 
 def sys_optimize_elan ():
-        run ("echo 0 > /proc/elan/config/eventint_punt_loops")
-
-def sys_set_ptldebug():
-    if config.ptldebug != None:
+    procfiles = ["/proc/elan/config/eventint_punt_loops",
+                 "/proc/qsnet/elan3/config/eventint_punt_loops",
+                 "/proc/qsnet/elan4/config/elan4_mainint_punt_loops"]
+    for p in procfiles:
+        if os.access(p, os.R_OK):
+            run ("echo 1 > " + p)
+
+def sys_set_ptldebug(ptldebug):
+    if config.ptldebug:
+        ptldebug = config.ptldebug
+    if ptldebug:
         try:
-            val = eval(config.ptldebug, ptldebug_names)
-            val = "0x%x" % (val,)
+            val = eval(ptldebug, ptldebug_names)
+            val = "0x%x" % (val)
             sysctl('portals/debug', val)
         except NameError, e:
             panic(str(e))
 
-def sys_set_subsystem():
-    if config.subsystem != None:
+def sys_set_subsystem(subsystem):
+    if config.subsystem:
+        subsystem = config.subsystem
+    if subsystem:
         try:
-            val = eval(config.subsystem, subsystem_names)
-            val = "0x%x" % (val,)
+            val = eval(subsystem, subsystem_names)
+            val = "0x%x" % (val)
             sysctl('portals/subsystem_debug', val)
         except NameError, e:
             panic(str(e))
@@ -2369,8 +3203,8 @@ def sys_set_netmem_max(path, max):
         fp = open(path, 'w')
         fp.write('%d\n' %(max))
         fp.close()
-    
-    
+
+
 def sys_make_devices():
     if not os.access('/dev/portals', os.R_OK):
         run('mknod /dev/portals c 10 240')
@@ -2384,7 +3218,7 @@ def add_to_path(new_dir):
     if new_dir in syspath:
         return
     os.environ['PATH'] = os.environ['PATH'] + ':' + new_dir
-    
+
 def default_debug_path():
     path = '/tmp/lustre-log'
     if os.path.isdir('/r'):
@@ -2452,6 +3286,8 @@ lconf_options = [
     ('nosetup', "Skip device setup/cleanup step."),
     ('reformat', "Reformat all devices (without question)"),
     ('mkfsoptions', "Additional options for the mk*fs command line", PARAM),
+    ('mountfsoptions', "Additional options for mount fs command line", PARAM),
+    ('clientoptions', "Additional options for Lustre", PARAM),
     ('dump',  "Dump the kernel debug log to file before portals is unloaded",
                PARAM),
     ('write_conf', "Save all the client config information on mds."),
@@ -2461,7 +3297,7 @@ lconf_options = [
               PARAM),
     ('minlevel', "Minimum level of services to configure/cleanup",
                  INTPARAM, 0),
-    ('maxlevel', """Maximum level of services to configure/cleanup 
+    ('maxlevel', """Maximum level of services to configure/cleanup
                     Levels are aproximatly like:
                             10 - netwrk
                             20 - device, ldlm
@@ -2492,14 +3328,14 @@ lconf_options = [
     ('inactive', """The name of an inactive service, to be ignored during
                     mounting (currently OST-only). Can be repeated.""",
                 PARAMLIST),
-    ]      
+    ]
 
 def main():
-    global lctl, config, toplevel, CONFIG_FILE
+    global lctl, config, toplustreDB, CONFIG_FILE
 
     # in the upcall this is set to SIG_IGN
     signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-    
+
     cl = Lustre.Options("lconf", "config.xml", lconf_options)
     try:
         config, args = cl.parse(sys.argv[1:])
@@ -2522,20 +3358,34 @@ def main():
     random.seed(seed)
 
     sanitise_path()
-    
+
     init_select(config.select)
 
     if len(args) > 0:
-        if not os.access(args[0], os.R_OK):
+        # allow config to be fetched via HTTP, but only with python2
+        if sys.version[0] != '1' and args[0].startswith('http://'):
+            import urllib2
+            try:
+                config_file = urllib2.urlopen(args[0])
+            except (urllib2.URLError, socket.error), err:
+                if hasattr(err, 'args'):
+                    err = err.args[1]
+                print "Could not access '%s': %s" %(args[0], err)
+                sys.exit(1)
+        elif not os.access(args[0], os.R_OK):
             print 'File not found or readable:', args[0]
             sys.exit(1)
+        else:
+            # regular file
+            config_file = open(args[0], 'r')
         try:
-            dom = xml.dom.minidom.parse(args[0])
+            dom = xml.dom.minidom.parse(config_file)
         except Exception:
             panic("%s does not appear to be a config file." % (args[0]))
             sys.exit(1) # make sure to die here, even in debug mode.
+        config_file.close()
         CONFIG_FILE = args[0]
-        db = Lustre.LustreDB_XML(dom.documentElement, dom.documentElement)
+        lustreDB = Lustre.LustreDB_XML(dom.documentElement, dom.documentElement)
         if not config.config:
             config.config = os.path.basename(args[0])# use full path?
             if config.config[-4:] == '.xml':
@@ -2544,15 +3394,19 @@ def main():
         if not config.config:
             panic("--ldapurl requires --config name")
         dn = "config=%s,fs=lustre" % (config.config)
-        db = Lustre.LustreDB_LDAP('', {}, base=dn, url = config.ldapurl)
+        lustreDB = Lustre.LustreDB_LDAP('', {}, base=dn, url = config.ldapurl)
+    elif config.ptldebug or config.subsystem:
+        sys_set_ptldebug(None)
+        sys_set_subsystem(None)
+        sys.exit(0)
     else:
         print 'Missing config file or ldap URL.'
         print 'see lconf --help for command summary'
         sys.exit(1)
 
-    toplevel = db
+    toplustreDB = lustreDB
 
-    ver = db.get_version()
+    ver = lustreDB.get_version()
     if not ver:
         panic("No version found in config data, please recreate.")
     if ver != Lustre.CONFIG_VERSION:
@@ -2581,12 +3435,17 @@ def main():
     if config.record:
         if not (config.record_device and config.record_log):
             panic("When recording, both --record_log and --record_device must be specified.")
+        lctl.clear_log(config.record_device, config.record_log)
         lctl.record(config.record_device, config.record_log)
 
-    doHost(db, node_list)
+    doHost(lustreDB, node_list)
 
-    if config.record:
-        lctl.end_record()
+    if not config.record:
+        return
+
+    lctl.end_record()
+
+    process_updates(lustreDB, config.record_device, config.record_log)
 
 if __name__ == "__main__":
     try: