Remove the unused clownfish RPM.
Test-Parameters: trivial testlist=hot-pools,sanity-lipe
Signed-off-by: John L. Hammond <jhammond@whamcloud.com>
Change-Id: I0e858e431189fa134dc7979db7bed89e611c9e0a
Reviewed-on: https://review.whamcloud.com/46392
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
*.checked
*~
/build
-/example_configs/clownfish/clownfish_test.conf
/lipe.spec
/lipe-*.tar.bz2
/lipe-*.tar.gz
-/pyclownfish/clownfish_pb2.py
/pylipe/lipe_constant.py
/src/*.o
/src/ext4_inode2path
/src/generate_definition
/src/laudit
/src/laudit-report
-/src/lcreatemany
/src/ldumpstripe
/src/lfill
/src/lipe_expression_test
$(PYTHON_COMMANDS) \
detect-distro.sh \
lipe-revision.sh \
- example_configs/clownfish/seperate_mgs/clownfish.conf \
example_configs/lipe/lipe_install.conf \
example_configs/lipe/lipe_launch.json \
example_configs/loris/loris.conf \
lpcc.conf \
laudit.conf.example \
pybuild/*.py \
- pyclownfish/*.py \
pylipe/.pylintrc \
pylipe/*.py \
pyloris/*.py \
man/* \
.pylintrc
-PYTHON_LIB_FILES = $(wildcard pyclownfish/*.py pylustre/*.py pyloris/*.py)
+PYTHON_LIB_FILES = $(wildcard pylustre/*.py pyloris/*.py)
PYTHON_FILES = $(PYTHON_LIB_FILES) $(PYTHON_COMMANDS)
PYTHON_CHECKS = $(PYTHON_FILES:%=%.python_checked)
CHECKS = $(PYTHON_CHECKS)
rm -f compile depcomp install-sh missing
PYLUSTRE_RPM = build/RPMS/x86_64/lipe-pylustre-$(PACKAGE_VERSION)-$(LIPE_RELEASE).el$(DISTRO_RELEASE)*.x86_64.rpm
-CLOWNFISH_RPM = build/RPMS/x86_64/lipe-clownfish-$(PACKAGE_VERSION)-$(LIPE_RELEASE).el$(DISTRO_RELEASE)*.x86_64.rpm
LIPE_RPM = build/RPMS/x86_64/lipe-$(PACKAGE_VERSION)-$(LIPE_RELEASE).el$(DISTRO_RELEASE)*.x86_64.rpm
LIPE_DEBUGINFO_RPM = build/RPMS/x86_64/lipe-debuginfo-$(PACKAGE_VERSION)-$(LIPE_RELEASE).el$(DISTRO_RELEASE)*.x86_64.rpm
LORIS_RPM = build/RPMS/x86_64/lipe-loris-$(PACKAGE_VERSION)-$(LIPE_RELEASE).el$(DISTRO_RELEASE)*.x86_64.rpm
rm -f lipe-*.md5
cp -a $(CACHED_ISO_PATH) $(ISO_PATH)
mkdir -p $(PACKAGE_PATH)
- cp $(CLOWNFISH_RPM) $(PACKAGE_PATH)
cp $(LIPE_RPM) $(PACKAGE_PATH)
cp $(LIPE_DEBUGINFO_RPM) $(PACKAGE_PATH)
cp $(PYLUSTRE_RPM) $(PACKAGE_PATH)
+++ /dev/null
-# Configuration file of Clownfish
-#
-# Configuration Guide:
-#
-#
-high_availability: true # Whether to enable automatical HA
-clownfish_port: 3002 # Port of Clownfish server
-ssh_hosts: # Array of hosts
- - host_id: server17-el7-vm1 # ID of this SSH host
- hostname: server17-el7-vm1 # The host name
- ssh_identity_file: /root/.ssh/id_dsa # The SSH key to connect to the host
- - host_id: server17-el7-vm2
- hostname: server17-el7-vm2
- - host_id: server17-el7-vm3
- hostname: server17-el7-vm3
- - host_id: server17-el7-vm4
- hostname: server17-el7-vm4
- - host_id: server17-el7-vm5
- hostname: server17-el7-vm5
- - host_id: server17-el7-vm6
- hostname: server17-el7-vm6
- - host_id: server17-el7-vm7
- hostname: server17-el7-vm7
- - host_id: server17-el7-vm8
- hostname: server17-el7-vm8
- - host_id: server17-el7-vm9
- hostname: server17-el7-vm9
-lazy_prepare: true # Whether to do lazy prepare to servers
-lustres: # Lustre file systems
- - fsname: lipe0 # Name of Lustre
- lustre_server_rpm_dir: /work/lustre_rpms/es3_2/RPMS/x86_64 # Directory for Lustre RPMs
- e2fsprogs_rpm_dir: /work/e2fsprogs_rpms/rhel7 # Directory for E2fsprogs RPMs
- mdts: # MDTs
- - is_mgs: true # Whether this MDT is also MGS
- index: 0 # Index of this MDT
- instances:
- - host_id: server17-el7-vm1 # Hostid on which MDT is hosted
- device: /dev/mapper/lipe0_mdt0 # Device name of MDT
- nid: 10.0.1.148@tcp # NID of this MDS
- - host_id: server17-el7-vm2
- device: /dev/mapper/lipe0_mdt0
- nid: 10.0.1.149@tcp
- - is_mgs: false
- index: 1
- instances:
- - host_id: server17-el7-vm1
- device: /dev/mapper/lipe0_mdt1
- nid: 10.0.1.148@tcp
- - host_id: server17-el7-vm2
- device: /dev/mapper/lipe0_mdt1
- nid: 10.0.1.149@tcp
- osts: # OSTs
- - index: 0 # NID of this OST
- instances:
- - host_id: server17-el7-vm3 # Hostid on which OST is hosted
- device: /dev/mapper/lipe0_ost0 # Device name of OST
- nid: 10.0.1.251@tcp
- - host_id: server17-el7-vm4
- device: /dev/mapper/lipe0_ost0
- nid: 10.0.1.252@tcp
- - index: 1
- instances:
- - host_id: server17-el7-vm3
- device: /dev/mapper/lipe0_ost1
- nid: 10.0.1.251@tcp
- - host_id: server17-el7-vm4
- device: /dev/mapper/lipe0_ost1
- nid: 10.0.1.252@tcp
- - index: 2
- instances:
- - host_id: server17-el7-vm3
- device: /dev/mapper/lipe0_ost2
- nid: 10.0.1.251@tcp
- - host_id: server17-el7-vm4
- device: /dev/mapper/lipe0_ost2
- nid: 10.0.1.252@tcp
- - index: 3
- instances:
- - host_id: server17-el7-vm3
- device: /dev/mapper/lipe0_ost3
- nid: 10.0.1.251@tcp
- - host_id: server17-el7-vm4
- device: /dev/mapper/lipe0_ost3
- nid: 10.0.1.252@tcp
- clients:
- - host_id: server17-el7-vm9 # Hostid on which client is hosted
- mnt: /mnt/lustre_lipe0 # Mount point of Lustre client
- lustre_client_rpm_dir: /work/lustre_rpms/es4/RPMS/x86_64/
- - fsname: lipe1
- lustre_server_rpm_dir: /work/lustre_rpms/es3_2/RPMS/x86_64
- e2fsprogs_rpm_dir: /work/e2fsprogs_rpms/rhel7
- mdts:
- - is_mgs: true
- index: 0
- instances:
- - host_id: server17-el7-vm5
- device: /dev/mapper/lipe1_mdt0
- nid: 10.0.1.253@tcp
- - host_id: server17-el7-vm6
- device: /dev/mapper/lipe1_mdt0
- nid: 10.0.1.254@tcp
- - is_mgs: false
- index: 1
- instances:
- - host_id: server17-el7-vm5
- device: /dev/mapper/lipe1_mdt1
- nid: 10.0.1.253@tcp
- - host_id: server17-el7-vm6
- device: /dev/mapper/lipe1_mdt1
- nid: 10.0.1.254@tcp
- osts:
- - index: 0
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost0
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost0
- nid: 10.0.2.197@tcp
- - index: 1
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost1
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost1
- nid: 10.0.2.197@tcp
- - index: 2
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost2
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost2
- nid: 10.0.2.197@tcp
- - index: 3
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost3
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost3
- nid: 10.0.2.197@tcp
- clients:
- - host_id: server17-el7-vm9
- mnt: /mnt/lustre_lipe1
+++ /dev/null
-../seperate_mgs/esmon_install.conf
\ No newline at end of file
+++ /dev/null
-# Configuration file of Clownfish
-#
-# Configuration Guide:
-#
-#
-high_availability: false # Whether to enable automatical HA
-clownfish_port: 3002 # Port of Clownfish server
-lustre_distributions: # Distributions of Lustre
- - lustre_distribution_id: es3_2 # Distribution ID
- lustre_rpm_dir: /work/lustre_rpms/es3_2/RPMS/x86_64 # Directory for Lustre RPMs
- e2fsprogs_rpm_dir: /work/e2fsprogs_rpms/rhel7 # Directory for E2fsprogs RPMs
- - lustre_distribution_id: es4_0
- lustre_rpm_dir: /work/lustre_rpms/es4/RPMS/x86_64/
- e2fsprogs_rpm_dir: /work/e2fsprogs_rpms/rhel7
-ssh_hosts: # Array of hosts
- - host_id: server17-el7-vm1 # ID of this SSH host
- hostname: server17-el7-vm1 # The host name
- ssh_identity_file: /root/.ssh/id_dsa # The SSH key to connect to the host
- lustre_distribution_id: es3_2 # Lustre Distribution ID
- - host_id: server17-el7-vm2
- hostname: server17-el7-vm2
- lustre_distribution_id: es3_2
- - host_id: server17-el7-vm3
- hostname: server17-el7-vm3
- lustre_distribution_id: es3_2
- - host_id: server17-el7-vm4
- hostname: server17-el7-vm4
- lustre_distribution_id: es3_2
- - host_id: server17-el7-vm5
- hostname: server17-el7-vm5
- lustre_distribution_id: es3_2
- - host_id: server17-el7-vm6
- hostname: server17-el7-vm6
- lustre_distribution_id: es3_2
- - host_id: server17-el7-vm7
- hostname: server17-el7-vm7
- lustre_distribution_id: es3_2
- - host_id: server17-el7-vm8
- hostname: server17-el7-vm8
- lustre_distribution_id: es3_2
- - host_id: server17-el7-vm9
- hostname: server17-el7-vm9
- lustre_distribution_id: es4_0
-lazy_prepare: true # Whether to do lazy prepare to servers
-mgs_list:
- - mgs_id: lipe_mgs
- backfstype: ldiskfs # Backfs type
- instances:
- - host_id: server17-el7-vm1 # Hostid on which MGS is hosted
- device: /dev/mapper/lipe_mgs # Device name of MGS
- nid: 10.0.1.148@tcp # NID of this MGS
- - host_id: server17-el7-vm2
- device: /dev/mapper/lipe_mgs
- nid: 10.0.1.149@tcp
-lustres: # Lustre file systems
- - fsname: lipe0 # Name of Lustre
- mgs_id: lipe_mgs # MGS ID
- mdts: # MDTs
- - is_mgs: false # Whether this MDT is also MGS
- index: 0 # Index of this MDT
- instances:
- - host_id: server17-el7-vm1 # Hostid on which MDT is hosted
- device: /dev/mapper/lipe0_mdt0 # Device name of MDT
- nid: 10.0.1.148@tcp # NID of this MDS
- - host_id: server17-el7-vm2
- device: /dev/mapper/lipe0_mdt0
- nid: 10.0.1.149@tcp
- backfstype: ldiskfs
- - is_mgs: false
- index: 1
- instances:
- - host_id: server17-el7-vm1
- device: lipe0_mdt1/mdt1
- nid: 10.0.1.148@tcp
- zpool_create: "zpool create -f lipe0_mdt1 /dev/mapper/lipe0_mdt1"
- - host_id: server17-el7-vm2
- device: lipe0_mdt1/mdt1
- zpool_create: "zpool create -f lipe0_mdt1 /dev/mapper/lipe0_mdt1"
- nid: 10.0.1.149@tcp
- backfstype: zfs
- osts: # OSTs
- - index: 0 # NID of this OST
- instances:
- - host_id: server17-el7-vm3 # Hostid on which OST is hosted
- device: /dev/mapper/lipe0_ost0 # Device name of OST
- nid: 10.0.1.251@tcp
- - host_id: server17-el7-vm4
- device: /dev/mapper/lipe0_ost0
- nid: 10.0.1.252@tcp
- backfstype: ldiskfs
- - index: 1
- instances:
- - host_id: server17-el7-vm3
- device: lipe0_ost1/ost1
- nid: 10.0.1.251@tcp
- zpool_create: "zpool create -f lipe0_ost1 /dev/mapper/lipe0_ost1"
- - host_id: server17-el7-vm4
- device: lipe0_ost1/ost1
- nid: 10.0.1.252@tcp
- zpool_create: "zpool create -f lipe0_ost1 /dev/mapper/lipe0_ost1"
- backfstype: zfs
- - index: 2
- instances:
- - host_id: server17-el7-vm3
- device: /dev/mapper/lipe0_ost2
- nid: 10.0.1.251@tcp
- - host_id: server17-el7-vm4
- device: /dev/mapper/lipe0_ost2
- nid: 10.0.1.252@tcp
- backfstype: ldiskfs
- - index: 3
- instances:
- - host_id: server17-el7-vm3
- device: /dev/mapper/lipe0_ost3
- nid: 10.0.1.251@tcp
- - host_id: server17-el7-vm4
- device: /dev/mapper/lipe0_ost3
- nid: 10.0.1.252@tcp
- backfstype: ldiskfs
- clients:
- - host_id: server17-el7-vm9 # Hostid on which client is hosted
- mnt: /mnt/lustre_lipe0 # Mount point of Lustre client
- - fsname: lipe1
- mgs_id: lipe_mgs
- mdts:
- - is_mgs: false
- index: 0
- instances:
- - host_id: server17-el7-vm5
- device: /dev/mapper/lipe1_mdt0
- nid: 10.0.1.253@tcp
- - host_id: server17-el7-vm6
- device: /dev/mapper/lipe1_mdt0
- nid: 10.0.1.254@tcp
- backfstype: ldiskfs
- - is_mgs: false
- index: 1
- instances:
- - host_id: server17-el7-vm5
- device: lipe1_mdt1/mdt1
- nid: 10.0.1.253@tcp
- zpool_create: "zpool create -f lipe1_mdt1 /dev/mapper/lipe1_mdt1"
- - host_id: server17-el7-vm6
- device: lipe1_mdt1/mdt1
- nid: 10.0.1.254@tcp
- zpool_create: "zpool create -f lipe1_mdt1 /dev/mapper/lipe1_mdt1"
- backfstype: zfs
- osts:
- - index: 0
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost0
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost0
- nid: 10.0.2.197@tcp
- backfstype: ldiskfs
- - index: 1
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost1
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost1
- nid: 10.0.2.197@tcp
- backfstype: ldiskfs
- - index: 2
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost2
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost2
- nid: 10.0.2.197@tcp
- backfstype: ldiskfs
- - index: 3
- instances:
- - host_id: server17-el7-vm7
- device: /dev/mapper/lipe1_ost3
- nid: 10.0.1.255@tcp
- - host_id: server17-el7-vm8
- device: /dev/mapper/lipe1_ost3
- nid: 10.0.2.197@tcp
- backfstype: ldiskfs
- clients:
- - host_id: server17-el7-vm9
- mnt: /mnt/lustre_lipe1
- qos:
- esmon_server_hostname: server17 # Hostname of ESMON server
- esmon_collect_interval: 5 # Collect interval of esmon in second
- enabled: true # Whether QoS management is enabled
- interval: 60 # QoS interval in second, this interval should be larger than esmon_collect_interval
- mbps_threshold: 70 # mbps_threshold * interval is the throughput limit of MB
- throttled_oss_rpc_rate: 10 # Default PRC per second on each OSS partition
- iops_threshold: 100 # iops_threshold * interval is the metadata operation limit
- throttled_mds_rpc_rate: 10 # Default PRC per second on each MDS partition
- users:
- - uid: 0
- mbps_threshold: 1000000 # Overwrites global mbps_threshold for this user
- throttled_oss_rpc_rate: 20 # Overwrites global throttled_oss_rpc_rate for this user
- throttled_mds_rpc_rate: 20 # Overwrites global throttled_mds_rpc_rate for this user
- - uid: 100
- mbps_threshold: 80
- throttled_oss_rpc_rate: 1
-
+++ /dev/null
-#
-# Configuration file of ESPerfMon from DDN
-#
-# This file is automatically generated by esmon_config command. To update this
-# file, please run esmon_config command.
-#
-# Configuration Guide:
-#
-# 1. agents
-# This list includes the information of the ES PERFMON agents.
-# Default value: [{'host_id': 'localhost'}]
-#
-# 1.1 enable_disk
-# This option determines whether to collect disk metrics from this agent.
-# Default value: False
-#
-# 1.2 host_id
-# This option is the ID of the host. The ID of a host is a unique value to
-# identify the host.
-#
-# 1.3 ime
-# This option determines whether to enable IME metrics collection on this
-# ES PERFMON agent.
-# Default value: False
-#
-# 1.4 infiniband
-# This option determines whether to enable Infiniband metrics collection on this
-# ES PERFMON agent.
-# Default value: False
-#
-# 1.5 lustre_mds
-# This option determines whether to enable Lustre MDS metrics collection on
-# this ES PERFMON agent.
-# Default value: True
-#
-# 1.6 lustre_oss
-# This option determines whether to enable Lustre OSS metrics collection on this
-# ES PERFMON agent.
-# Default value: True
-#
-# 1.7 sfas
-# This list includes the information of SFAs on this ES PERFMON agent.
-# Default value: []
-#
-# 1.7.1 controller0_host
-# This option is the hostname/IP of the controller 0 of this SFA.
-# Default value: controller0_host
-#
-# 1.7.2 controller1_host
-# This option is the hostname/IP of the controller 1 of this SFA.
-# Default value: controller1_host
-#
-# 1.7.3 name
-# This option is the unique name of this controller. This value will be used as
-# the value of "fqdn" tag for metrics of this SFA. Thus, two SFAs shouldn't have
-# the same name.
-#
-# 2. agents_reinstall
-# This option determines whether to reinstall ESMON agents or not.
-# Default value: True
-#
-# 3. collect_interval
-# This option determines the interval seconds to collect datapoint.
-# Default value: 60
-#
-# 4. continuous_query_interval
-# This option determines the interval of continuous queries. ES PERFMON uses
-# continuous queries of Influxdb to aggregate data. To calculate the interval
-# seconds of continuous queries, please multiply this number by the value of
-# the "collect_interval" option. If this number is "1", the interval of
-# continous queries would be "collect_interval" seconds. Usually, in order to
-# downsample the data and reduce performance impact, this value should be
-# larger than "1".
-# Default value: 4
-#
-# 5. iso_path
-# This option is the path of ES PERFMON ISO.
-# Default value: /root/esmon.iso
-#
-# 6. lustre_default_version
-# This option determines the default Lustre version to use, if the Lustre
-# RPMs installed on the ES PERFMON agent is not with the supported version.
-# Default value: es3
-#
-# 7. lustre_exp_mdt
-# This option determines whether ES PERFMON agents collect exp_md_stats_*
-# metrics from Lustre MDT. If there are too many Lustre clients on the system,
-# this option should be disabled to avoid performance issues.
-# Default value: False
-#
-# 8. lustre_exp_ost
-# This option determines whether ES PERFMON agents collect exp_ost_stats_[read|
-# write] metrics from Lustre OST or not. If there are too many Lustre clients on
-# the system, this option should be disabled to avoid performance issues.
-# Default value: False
-#
-# 9. server
-# This group of options include the information about the ES PERFMON server.
-# Default value: {'host_id': 'localhost'}
-#
-# 9.1 drop_database
-# This option determines whether to drop existing ES PERFMON database of
-# Influxdb.
-# Important: This option should ONLY be set to "True" if the data/metadata in
-# ES PERFMON database of Influxdb is not needed any more.
-# Default value: False
-#
-# 9.2 erase_influxdb
-# This option determines whether to erase all data and metadata of Influxdb.
-# Important: This option should ONLY be set to "True" if the data/metadata of
-# Influxdb is not needed any more. When Influxdb is totally
-# corrupted, please enable this option to erase and fix. And please
-# double check the influxdb_path option is properly configured before
-# enabling this option.
-# Default value: False
-#
-# 9.3 host_id
-# This option is the ID of the host. The ID of a host is a unique value to
-# identify the host.
-#
-# 9.4 influxdb_path
-# This option is Influxdb directory path on ES PERFMON server node.
-# Important: Please do not put any other files/directries under this directory of
-# ES PERFMON server node, because, with "erase_influxdb" option
-# enabled, all of the files/directries under that directory will be
-# removed.
-# Default value: /esmon/influxdb
-#
-# 9.5 reinstall
-# This option determines whether to reinstall the ES PERFMON server.
-# Default value: True
-#
-# 10. ssh_hosts
-# This list includes the informations about how to login into the hosts using
-# SSH connections.
-# Default value: [{'local_host': True, 'host_id': 'localhost', 'hostname': 'localhost'}]
-#
-# 10.1 host_id
-# This option is the ID of the host. The ID of a host is a unique value to
-# identify the host.
-#
-# 10.2 hostname
-# This option is the hostname or IP of the host. "ssh" command will use this
-# hostname/IP to login into the host. If the host is the ES PERFMON server, this
-# hostname/IP will be used as the server host in the write_tsdb plugin of
-# ES PERFMON agent.
-#
-# 10.3 ssh_identity_file
-# This option is the SSH key file used when using SSH command to login into
-# the host. If the default SSH identity file works, this option can be set to
-# "None".
-# Default value: None
-#
-# 10.4 local_host
-# This option determines whether this host is local host.
-# Default value: False
-#
-agents:
- - enable_disk: true
- host_id: server17-el7-vm1
- ime: false
- infiniband: false
- lustre_mds: true
- lustre_oss: true
- sfas: []
- - enable_disk: true
- host_id: server17-el7-vm2
- - enable_disk: true
- host_id: server17-el7-vm3
- - enable_disk: true
- host_id: server17-el7-vm4
- - enable_disk: true
- host_id: server17-el7-vm5
- - enable_disk: true
- host_id: server17-el7-vm6
- - enable_disk: true
- host_id: server17-el7-vm7
- - enable_disk: true
- host_id: server17-el7-vm8
- - enable_disk: true
- host_id: server17-el7-vm9
- sfas:
- - controller0_host: 10.0.0.6
- controller1_host: 10.0.0.5
- name: OST
-agents_reinstall: true
-collect_interval: 5
-continuous_query_interval: 4
-iso_path: /root/esmon.iso
-lustre_default_version: es3
-lustre_exp_mdt: false
-lustre_exp_ost: false
-server:
- drop_database: false
- erase_influxdb: false
- host_id: server17
- influxdb_path: /esmon/influxdb
- reinstall: true
-ssh_hosts:
- - host_id: server17-el7-vm1
- hostname: server17-el7-vm1
- local_host: false
- ssh_identity_file: /root/.ssh/id_dsa
- - host_id: server17-el7-vm2
- hostname: server17-el7-vm2
- ssh_identity_file: None
- - host_id: server17-el7-vm3
- hostname: server17-el7-vm3
- - host_id: server17-el7-vm4
- hostname: server17-el7-vm4
- - host_id: server17-el7-vm5
- hostname: server17-el7-vm5
- - host_id: server17-el7-vm6
- hostname: server17-el7-vm6
- - host_id: server17-el7-vm7
- hostname: server17-el7-vm7
- - host_id: server17-el7-vm8
- hostname: server17-el7-vm8
- - host_id: server17-el7-vm9
- hostname: server17-el7-vm9
- - host_id: server17
- hostname: server17
- local_host: true
+++ /dev/null
-# Configuration file of Clownfish
-#
-# Configuration Guide:
-#
-#
-high_availability: false # Whether to enable automatical HA
-clownfish_port: 3002 # Port of Clownfish server
-lustre_distributions: # Distributions of Lustre
- - lustre_distribution_id: es3_2 # Distribution ID
- lustre_rpm_dir: /work/lustre_rpms/es3_2/RPMS/x86_64 # Directory for Lustre RPMs
- e2fsprogs_rpm_dir: /work/e2fsprogs_rpms/rhel7 # Directory for E2fsprogs RPMs
- - lustre_distribution_id: es4_0
- lustre_rpm_dir: /work/lustre_rpms/es4/RPMS/x86_64/
- e2fsprogs_rpm_dir: /work/e2fsprogs_rpms/rhel7
-ssh_hosts: # Array of hosts
- - host_id: ime02 # ID of this SSH host
- hostname: ime02 # The host name
- ssh_identity_file: /root/.ssh/id_dsa # The SSH key to connect to the host
- lustre_distribution_id: es4_0 # Lustre Distribution ID
-lazy_prepare: true # Whether to do lazy prepare to servers
-lustres: # Lustre file systems
- - fsname: ime02 # Name of Lustre
- mdts: # MDTs
- - is_mgs: true # Whether this MDT is also MGS
- index: 0 # Index of this MDT
- instances:
- - host_id: ime02 # Hostid on which MDT is hosted
- device: /dev/sdv # Device name of MDT
- nid: 10.0.0.39@tcp # NID of this MDS
- backfstype: ldiskfs # Backfs type
- - is_mgs: false
- index: 1
- instances:
- - host_id: ime02
- device: /dev/sdw
- nid: 10.0.0.39@tcp
- backfstype: ldiskfs
- osts: # OSTs
- - index: 0 # NID of this OST
- instances:
- - host_id: ime02 # Hostid on which OST is hosted
- device: /dev/sdx # Device name of OST
- nid: 10.0.0.39@tcp
- backfstype: ldiskfs
- - index: 1
- instances:
- - host_id: ime02
- device: /dev/sdy
- nid: 10.0.0.39@tcp
- backfstype: ldiskfs
- clients:
- - host_id: ime02 # Hostid on which client is hosted
- mnt: /mnt/lustre_ime02 # Mount point of Lustre client
- qos:
- esmon_server_hostname: server17 # Hostname of ESMON server
- esmon_collect_interval: 5 # Collect interval of esmon in second
- enabled: true # Whether QoS management is enabled
- interval: 60 # QoS interval in second, this interval should be larger than esmon_collect_interval
- mbps_threshold: 70 # mbps_threshold * interval is the throughput limit of MB
- throttled_oss_rpc_rate: 10 # Default PRC per second on each OSS partition
- iops_threshold: 100 # iops_threshold * interval is the metadata operation limit
- throttled_mds_rpc_rate: 10 # Default PRC per second on each MDS partition
- users:
- - uid: 0
- mbps_threshold: 1000000 # Overwrites global mbps_threshold for this user
- throttled_oss_rpc_rate: 20 # Overwrites global throttled_oss_rpc_rate for this user
- throttled_mds_rpc_rate: 20 # Overwrites global throttled_mds_rpc_rate for this user
- - uid: 100
- mbps_threshold: 80
- throttled_oss_rpc_rate: 1
+++ /dev/null
-#
-# Configuration file of ESPerfMon from DDN
-#
-# This file is automatically generated by esmon_config command. To update this
-# file, please run esmon_config command.
-#
-# Configuration Guide:
-#
-# 1. agents
-# This list includes the information of the ES PERFMON agents.
-# Default value: [{'host_id': 'localhost'}]
-#
-# 1.1 enable_disk
-# This option determines whether to collect disk metrics from this agent.
-# Default value: False
-#
-# 1.2 host_id
-# This option is the ID of the host. The ID of a host is a unique value to
-# identify the host.
-#
-# 1.3 ime
-# This option determines whether to enable IME metrics collection on this
-# ES PERFMON agent.
-# Default value: False
-#
-# 1.4 infiniband
-# This option determines whether to enable Infiniband metrics collection on this
-# ES PERFMON agent.
-# Default value: False
-#
-# 1.5 lustre_mds
-# This option determines whether to enable Lustre MDS metrics collection on
-# this ES PERFMON agent.
-# Default value: True
-#
-# 1.6 lustre_oss
-# This option determines whether to enable Lustre OSS metrics collection on this
-# ES PERFMON agent.
-# Default value: True
-#
-# 1.7 sfas
-# This list includes the information of SFAs on this ES PERFMON agent.
-# Default value: []
-#
-# 1.7.1 controller0_host
-# This option is the hostname/IP of the controller 0 of this SFA.
-# Default value: controller0_host
-#
-# 1.7.2 controller1_host
-# This option is the hostname/IP of the controller 1 of this SFA.
-# Default value: controller1_host
-#
-# 1.7.3 name
-# This option is the unique name of this controller. This value will be used as
-# the value of "fqdn" tag for metrics of this SFA. Thus, two SFAs shouldn't have
-# the same name.
-#
-# 2. agents_reinstall
-# This option determines whether to reinstall ESMON agents or not.
-# Default value: True
-#
-# 3. collect_interval
-# This option determines the interval seconds to collect datapoint.
-# Default value: 60
-#
-# 4. continuous_query_interval
-# This option determines the interval of continuous queries. ES PERFMON uses
-# continuous queries of Influxdb to aggregate data. To calculate the interval
-# seconds of continuous queries, please multiply this number by the value of
-# the "collect_interval" option. If this number is "1", the interval of
-# continous queries would be "collect_interval" seconds. Usually, in order to
-# downsample the data and reduce performance impact, this value should be
-# larger than "1".
-# Default value: 4
-#
-# 5. iso_path
-# This option is the path of ES PERFMON ISO.
-# Default value: /root/esmon.iso
-#
-# 6. lustre_default_version
-# This option determines the default Lustre version to use, if the Lustre
-# RPMs installed on the ES PERFMON agent is not with the supported version.
-# Default value: es3
-#
-# 7. lustre_exp_mdt
-# This option determines whether ES PERFMON agents collect exp_md_stats_*
-# metrics from Lustre MDT. If there are too many Lustre clients on the system,
-# this option should be disabled to avoid performance issues.
-# Default value: False
-#
-# 8. lustre_exp_ost
-# This option determines whether ES PERFMON agents collect exp_ost_stats_[read|
-# write] metrics from Lustre OST or not. If there are too many Lustre clients on
-# the system, this option should be disabled to avoid performance issues.
-# Default value: False
-#
-# 9. server
-# This group of options include the information about the ES PERFMON server.
-# Default value: {'host_id': 'localhost'}
-#
-# 9.1 drop_database
-# This option determines whether to drop existing ES PERFMON database of
-# Influxdb.
-# Important: This option should ONLY be set to "True" if the data/metadata in
-# ES PERFMON database of Influxdb is not needed any more.
-# Default value: False
-#
-# 9.2 erase_influxdb
-# This option determines whether to erase all data and metadata of Influxdb.
-# Important: This option should ONLY be set to "True" if the data/metadata of
-# Influxdb is not needed any more. When Influxdb is totally
-# corrupted, please enable this option to erase and fix. And please
-# double check the influxdb_path option is properly configured before
-# enabling this option.
-# Default value: False
-#
-# 9.3 host_id
-# This option is the ID of the host. The ID of a host is a unique value to
-# identify the host.
-#
-# 9.4 influxdb_path
-# This option is Influxdb directory path on ES PERFMON server node.
-# Important: Please do not put any other files/directries under this directory of
-# ES PERFMON server node, because, with "erase_influxdb" option
-# enabled, all of the files/directries under that directory will be
-# removed.
-# Default value: /esmon/influxdb
-#
-# 9.5 reinstall
-# This option determines whether to reinstall the ES PERFMON server.
-# Default value: True
-#
-# 10. ssh_hosts
-# This list includes the informations about how to login into the hosts using
-# SSH connections.
-# Default value: [{'local_host': True, 'host_id': 'localhost', 'hostname': 'localhost'}]
-#
-# 10.1 host_id
-# This option is the ID of the host. The ID of a host is a unique value to
-# identify the host.
-#
-# 10.2 hostname
-# This option is the hostname or IP of the host. "ssh" command will use this
-# hostname/IP to login into the host. If the host is the ES PERFMON server, this
-# hostname/IP will be used as the server host in the write_tsdb plugin of
-# ES PERFMON agent.
-#
-# 10.3 ssh_identity_file
-# This option is the SSH key file used when using SSH command to login into
-# the host. If the default SSH identity file works, this option can be set to
-# "None".
-# Default value: None
-#
-# 10.4 local_host
-# This option determines whether this host is local host.
-# Default value: False
-#
-# 11. jobid_var
-# This option determines the job ID var used by the file system, which can
-# be read by command "lctl get_param jobid_var"
-# Default value: unknown
-#
-agents:
- - enable_disk: false
- host_id: ime02
- ime: false
- infiniband: false
- lustre_mds: true
- lustre_oss: true
- sfas: []
-agents_reinstall: true
-collect_interval: 5
-continuous_query_interval: 4
-iso_path: /root/esmon.iso
-jobid_var: procname_uid
-lustre_default_version: es3
-lustre_exp_mdt: false
-lustre_exp_ost: false
-server:
- drop_database: true
- erase_influxdb: true
- host_id: server17
- influxdb_path: /esmon/influxdb
- reinstall: true
-ssh_hosts:
- - host_id: ime02
- hostname: ime02
- local_host: false
- ssh_identity_file: /root/.ssh/id_dsa
- - host_id: server17
- hostname: server17
- local_host: true
- ssh_identity_file: None
+++ /dev/null
-# Configuration file of testing Exascaler monitoring system
-#
-# Configuration Guide:
-#
-#
-high_availability: false # Whether to enable automatical HA
-clownfish_port: 3002 # Port of Clownfish server
-lustre_distributions: # Distributions of Lustre
- - lustre_distribution_id: tmp
- lustre_rpm_dir: /work/lustre_rpms/tmp/x86_64
- e2fsprogs_rpm_dir: /work/e2fsprogs_rpms/rhel7
-ssh_hosts: # Array of hosts
- - host_id: server17-el7-vm1 # ID of this SSH host
- hostname: server17-el7-vm1 # The host name
- ssh_identity_file: /root/.ssh/id_dsa # The SSH key to connect to the host
- lustre_distribution_id: tmp # Lustre Distribution ID
-lazy_prepare: true # Whether to do lazy prepare to servers
-lustres: # Lustre file systems
- - fsname: test # Name of Lustre
- mdts: # MDTs
- - is_mgs: true # Whether this MDT is also MGS
- index: 0 # Index of this MDT
- instances:
- - host_id: server17-el7-vm1 # Hostid on which MDT is hosted
- device: /dev/sda # Device name of MDT
- nid: 10.0.1.148@tcp # NID of this MDS
- backfstype: ldiskfs # Backfs type
- osts: # OSTs
- - index: 0 # NID of this OST
- instances:
- - host_id: server17-el7-vm1 # Hostid on which OST is hosted
- device: /dev/sdb # Device name of OST
- nid: 10.0.1.148@tcp
- backfstype: ldiskfs
- - index: 1
- instances:
- - host_id: server17-el7-vm1
- device: /dev/sdc
- nid: 10.0.1.148@tcp
- backfstype: ldiskfs
- clients:
- - host_id: server17-el7-vm1 # Hostid on which client is hosted
- mnt: /mnt/lustre_test # Mount point of Lustre client
+++ /dev/null
-../clownfish/seperate_mgs/clownfish.conf
\ No newline at end of file
LORIS(Lustre Online Reliability Improvement System) backups MDTs for purposes
of disaster recovery.
-%package clownfish
-Summary: Lustre Management System
-Requires: lipe-pylustre = %{version}-%{release}
-Requires: rsync
-Provides: lipe-clownfish = %{version}-%{release}
-Group: Applications/System
-
-%description clownfish
-Clownfish manages Lustre clusters for HA purposes.
-
%package server
Summary: Lipe Server Package
Requires: lustre
python2 -m py_compile pylustre/*.py
%if %{with server}
-python2 -m py_compile pyclownfish/*.py
python2 -m py_compile pylipe/*.py
python2 -m py_compile pyloris/*.py
%endif
-find pyclownfish pylustre pylipe pyloris -maxdepth 1 -type f -a -name "*.python_checked" -o -name "*.py" | xargs rm -f
+find pylustre pylipe pyloris -maxdepth 1 -type f -a -name "*.python_checked" -o -name "*.py" | xargs rm -f
%install
rm -rf $RPM_BUILD_ROOT
loris_crontab \
loris_test \
src/ext4_inode2path \
- src/lcreatemany \
src/ldumpstripe \
src/lfill \
src/lipe_scan \
install -m 0755 scripts/*.sh $RPM_BUILD_ROOT%{ddntoolsdir}/
%endif
-cp -a pyclownfish $RPM_BUILD_ROOT%{python_sitelib}
cp -a pylipe $RPM_BUILD_ROOT%{python_sitelib}
cp -a pyloris $RPM_BUILD_ROOT%{python_sitelib}
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}
cp -a \
- example_configs/clownfish/seperate_mgs/clownfish.conf \
example_configs/lipe/lipe_install.conf \
example_configs/lipe/lipe_launch.json \
example_configs/loris/loris.conf \
%{_bindir}/loris_test
%config(noreplace) %{_sysconfdir}/loris.conf
-%files clownfish
-%{python2_sitelib}/pyclownfish
-%{_bindir}/lcreatemany
-%config(noreplace) %{_sysconfdir}/clownfish.conf
-
%files server
%defattr(-,root,root)
%{_bindir}/ext4_inode2path
%{_bindir}/lipe_delete
%{_bindir}/lipe_purge
%{_bindir}/lipe-func.sh
-%{_bindir}/lipe_convert_expr
%{python2_sitelib}/pylipe
%config(noreplace) %{_sysconfdir}/lipe_install.conf
%config(noreplace) %{_sysconfdir}/lipe_launch.json
from pylustre import install_common
from pylustre import cmd_general
from pylipe import lipe_common
-from pyclownfish import clownfish_common
STRING_LIBCIRCLE_GIT_URL = "libcircle_git_url"
STRING_LIBCIRCLE_GIT_NAME = "libcircle.git"
existing_rpm_fnames = retval.cr_stdout.split()
dependent_rpms = lipe_common.LIPE_DEPENDENT_RPMS[:]
- for rpm_name in clownfish_common.CLOWNFISH_DEPENDENT_RPMS:
- if rpm_name not in dependent_rpms:
- dependent_rpms.append(rpm_name)
for rpm_name in install_common.LIPE_INSTALL_DEPENDENT_RPMS:
if rpm_name not in dependent_rpms:
dependent_rpms.append(rpm_name)
log.cl_info("building RPMs of MpiFileUtils")
mpi_devel_rpm = "mvapich2-2.2-devel"
# mvapich2 libarchive-devel is needed by MpiFileUtils
- # python2-zmq is needed by Clownfish
# npm is need by installing uglifyjs
- build_dependent_rpms = [mpi_devel_rpm, "libarchive-devel", "python2-zmq",
+ build_dependent_rpms = [mpi_devel_rpm, "libarchive-devel",
"npm", "libattr-devel", "bzip2-devel"]
for build_dependent_rpm in build_dependent_rpms:
+++ /dev/null
-"""
-Python library for LiPE
-"""
-__all__ = ["clownfish",
- "clownfish_common"]
+++ /dev/null
-syntax = "proto2";
-package clownfish;
-message ClownfishMessage {
- enum ClownfishProtocolVersion {
- CPV_ZERO = 0;
- }
-
- enum ClownfishMessageType {
- CMT_COMMAND_CHILDREN_REQUEST = 0;
- CMT_COMMAND_CHILDREN_REPLY = 1;
- CMT_COMMAND_DICT_REQUEST = 2;
- CMT_COMMAND_DICT_REPLY = 3;
- CMT_COMMAND_REQUEST = 4;
- CMT_COMMAND_PARTWAY_QUERY = 5;
- CMT_COMMAND_REPLY = 6;
- CMT_CONNECT_REQUEST = 7;
- CMT_CONNECT_REPLY = 8;
- CMT_PING_REQUEST = 9;
- CMT_PING_REPLY = 10;
- CMT_PWD_REQUEST = 11;
- CMT_PWD_REPLY = 12;
- CMT_GENERAL = 13;
- }
-
- enum ClownfishErrno {
- CE_NO_ERROR = 0;
- /* UUID doesnot exist on server */
- CE_NO_UUID = 1;
- /* Request type is not supported */
- CE_NO_TYPE = 2;
- }
-
- message ClownfishCommandPartwayQuery {
- required bool ccpq_abort = 1;
- }
-
- message ClownfishCommandRequest {
- required string ccrt_cmd_line = 1;
- }
-
- message ClownfishCommandFinalReply {
- required int32 ccfr_exit_status = 1;
- required int32 ccfr_quit = 2;
- }
-
- message ClownfishLogRecord {
- /* Name of the log */
- required bool clr_is_stdout = 1;
- required bool clr_is_stderr = 2;
- required string clr_name = 3;
- /* level of the log: ERROR/WARNING/INFO/DEBUG */
- required int32 clr_levelno = 4;
- required string clr_pathname = 5;
- required int32 clr_lineno = 6;
- required string clr_funcname = 7;
- required float clr_created_time = 8;
- required string clr_msg = 9;
- }
-
- message ClownfishCommandReply {
- required bool ccry_is_final = 1;
- optional ClownfishCommandFinalReply ccry_final = 2;
- repeated ClownfishLogRecord ccry_logs = 3;
- }
-
- message ClownfishCommandChildrenRequest {
- }
-
- message ClownfishCommandChildrenReply {
- repeated string cccry_children = 1;
- }
-
- message ClownfishCommandDictRequest {
- }
-
- message ClownfishCommandItem {
- required string cci_command = 1;
- required bool cci_need_child = 2;
- repeated string cci_arguments = 3;
- }
-
- message ClownfishCommandDictReply {
- repeated ClownfishCommandItem ccdry_items = 1;
- }
-
- message ClownfishConnectRequest {
- required int64 ccrt_client_hash = 1;
- }
-
- message ClownfishConnectReply {
- required int64 ccry_client_hash = 1;
- }
-
- message ClownfishDisconnectRequest {
- }
-
- message ClownfishDisconnectReply {
- }
-
- message ClownfishPingRequest {
- }
-
- message ClownfishPingReply {
- }
-
- message ClownfishPwdRequest {
- }
-
- message ClownfishPwdReply {
- required string cpry_pwd = 1;
- }
-
- required ClownfishProtocolVersion cm_protocol_version = 1;
- /* The UUID in connect request is ignored */
- required int64 cm_client_uuid = 2;
- required ClownfishMessageType cm_type = 3;
- required ClownfishErrno cm_errno = 4;
- optional ClownfishCommandChildrenRequest cm_command_children_request = 5;
- optional ClownfishCommandChildrenReply cm_command_children_reply = 6;
- optional ClownfishCommandDictRequest cm_command_dict_request = 7;
- optional ClownfishCommandDictReply cm_command_dict_reply = 8;
- optional ClownfishCommandRequest cm_command_request = 9;
- optional ClownfishCommandReply cm_command_reply = 10;
- optional ClownfishCommandPartwayQuery cm_command_partway_query = 11;
- optional ClownfishConnectRequest cm_connect_request = 12;
- optional ClownfishConnectReply cm_connect_reply = 13;
- optional ClownfishDisconnectRequest cm_disconnect_request = 14;
- optional ClownfishDisconnectReply cm_disconnect_reply = 15;
- optional ClownfishPingRequest cm_ping_request = 16;
- optional ClownfishPingReply cm_ping_reply = 17;
- optional ClownfishPwdRequest cm_pwd_request = 18;
- optional ClownfishPwdReply cm_pwd_reply = 19;
-}
-
+++ /dev/null
-# Copyright (c) 2018 DataDirect Networks, Inc.
-# All Rights Reserved.
-# Author: lixi@ddn.com
-"""
-Library for clownfish
-Clownfish is a management system for Lustre
-"""
-# Local libs
-from pylustre import utils
-from pylustre import parallel
-from pylustre import lustre
-from pylustre import cstr
-
-
-class ClownfishInstance(object):
- """
- This instance saves the global clownfish information
- """
- # pylint: disable=too-few-public-methods,too-many-instance-attributes
- # pylint: disable=too-many-arguments,too-many-public-methods
- def __init__(self, workspace, lazy_prepare, hosts, mgs_dict, lustres,
- high_availability):
- self.ci_lazy_prepare = lazy_prepare
- # Keys are the host IDs, not the hostnames
- self.ci_hosts = hosts
- # Keys are the MGS IDs, values ares instances of LustreService
- self.ci_mgs_dict = mgs_dict
- # Keys are the fsname, values ares instances of LustreFilesystem
- self.ci_lustres = lustres
- self.ci_workspace = workspace
- self.ci_high_availability = high_availability
-
- def ci_mount_lustres(self, log):
- """
- Mount all Lustre file systems, including MGS if necessary
- """
- for lustrefs in self.ci_lustres.values():
- if log.cl_abort:
- log.cl_stderr("aborting mounting file systems")
- return -1
- ret = lustrefs.lf_mount(log)
- if ret:
- log.cl_stderr("failed to mount file system [%s]",
- lustrefs.lf_fsname)
- return -1
- return 0
-
- def ci_umount_lustres(self, log):
- """
- Umount all Lustre file systems, not including MGS
- """
- for lustrefs in self.ci_lustres.values():
- ret = lustrefs.lf_umount(log)
- if ret:
- log.cl_stderr("failed to umount file system [%s]",
- lustrefs.lf_fsname)
- return -1
- return 0
-
- def ci_mount_mgs(self, log):
- """
- Mount all MGS
- """
- for mgs in self.ci_mgs_dict.values():
- ret = mgs.ls_mount(log)
- if ret:
- log.cl_stderr("failed to mount MGS [%s]",
- mgs.ls_service_name)
- return -1
- return 0
-
- def ci_umount_mgs(self, log):
- """
- Umount all MGS
- """
- for mgs in self.ci_mgs_dict.values():
- ret = mgs.ls_umount(log)
- if ret:
- log.cl_stderr("failed to umount MGS [%s]",
- mgs.ls_service_name)
- return -1
- return 0
-
- def ci_umount_all(self, log):
- """
- Umount all file system and MGS
- """
- ret = self.ci_umount_lustres(log)
- if ret:
- log.cl_stderr("failed to umount all Lustre file systems")
- return -1
-
- ret = self.ci_umount_mgs(log)
- if ret:
- log.cl_stderr("failed to umount all MGS")
- return -1
-
- return 0
-
- def ci_umount_all_nolock(self, log):
- """
- Umount all file system and MGS
- Locks should be held
- """
- for lustrefs in self.ci_lustres.values():
- ret = lustrefs.lf_umount_nolock(log)
- if ret:
- log.cl_stderr("failed to umount file system [%s]",
- lustrefs.lf_fsname)
- return -1
-
- for mgs in self.ci_mgs_dict.values():
- ret = mgs.ls_umount_nolock(log)
- if ret:
- log.cl_stderr("failed to umount MGS [%s]",
- mgs.ls_service_name)
- return -1
- return 0
-
- def ci_mount_all(self, log):
- """
- Mount all file system and MGS
- """
- ret = self.ci_mount_mgs(log)
- if ret:
- log.cl_stderr("failed to mount all MGS")
- return -1
-
- ret = self.ci_mount_lustres(log)
- if ret:
- log.cl_stderr("failed to mount all Lustre file systems")
- return -1
-
- return 0
-
- def ci_format_all_nolock(self, log):
- """
- Format all file system and MGS
- Locks should be held
- """
- ret = self.ci_umount_all_nolock(log)
- if ret:
- log.cl_stderr("failed to umount all")
- return ret
-
- for mgs in self.ci_mgs_dict.values():
- ret = mgs.ls_format_nolock(log)
- if ret:
- log.cl_stderr("failed to umount and format MGS [%s]",
- mgs.ls_service_name)
- return -1
-
- for lustrefs in self.ci_lustres.values():
- ret = lustrefs.lf_format_nolock(log)
- if ret:
- log.cl_stderr("failed to umount and format Lustre file system "
- "[%s]",
- lustrefs.lf_fsname)
- return -1
- return 0
-
- def ci_format_all(self, log):
- """
- Format all file system and MGS
- """
-
- lock_handles = []
- for mgs in self.ci_mgs_dict.values():
- mgs_lock_handle = mgs.ls_lock.rwl_writer_acquire(log)
- if mgs_lock_handle is None:
- log.cl_stderr("aborting formating all file systems and MGS")
- for lock_handle in reversed(lock_handles):
- lock_handle.rwh_release()
- return -1
- lock_handles.append(mgs_lock_handle)
-
- for lustrefs in self.ci_lustres.values():
- fs_lock_handle = lustrefs.lf_lock.rwl_writer_acquire(log)
- if fs_lock_handle is None:
- log.cl_stderr("aborting formating all file systems and MGS")
- for lock_handle in reversed(lock_handles):
- lock_handle.rwh_release()
- return -1
- lock_handles.append(fs_lock_handle)
-
- ret = self.ci_format_all_nolock(log)
-
- for lock_handle in reversed(lock_handles):
- lock_handle.rwh_release()
-
- return ret
-
- def ci_prepare_all_nolock(self, log, workspace):
- """
- Prepare all hosts
- Locks should be held
- """
- ret = self.ci_umount_all_nolock(log)
- if ret:
- log.cl_stderr("failed to umount all")
- return ret
-
- args_array = []
- thread_ids = []
- for host in self.ci_hosts.values():
- args = (host, self.ci_lazy_prepare)
- args_array.append(args)
- thread_id = "prepare_%s" % host.sh_host_id
- thread_ids.append(thread_id)
-
- parallel_execute = parallel.ParallelExecute(log, workspace,
- "host_prepare",
- lustre.host_lustre_prepare,
- args_array,
- thread_ids=thread_ids,
- parallelism=8)
- return parallel_execute.pe_run()
-
- def ci_prepare_all(self, log, workspace):
- """
- Prepare all hosts
- """
- lock_handles = []
- for mgs in self.ci_mgs_dict.values():
- mgs_lock_handle = mgs.ls_lock.rwl_writer_acquire(log)
- if mgs_lock_handle is None:
- log.cl_stderr("aborting preparing all hosts")
- for lock_handle in reversed(lock_handles):
- lock_handle.rwh_release()
- return -1
- lock_handles.append(mgs_lock_handle)
-
- for lustrefs in self.ci_lustres.values():
- fs_lock_handle = lustrefs.lf_lock.rwl_writer_acquire(log)
- if fs_lock_handle is None:
- log.cl_stderr("aborting preparing all hosts")
- for lock_handle in reversed(lock_handles):
- lock_handle.rwh_release()
- return -1
- lock_handles.append(fs_lock_handle)
-
- ret = self.ci_prepare_all_nolock(log, workspace)
-
- for lock_handle in reversed(lock_handles):
- lock_handle.rwh_release()
-
- return ret
-
-
-def init_instance(log, workspace, config, config_fpath):
- """
- Parse the config and init the instance
- """
- # pylint: disable=too-many-locals,too-many-return-statements
- # pylint: disable=too-many-branches,too-many-statements
- lazy_prepare = utils.config_value(config, cstr.CSTR_LAZY_PREPARE)
- if lazy_prepare is None:
- lazy_prepare = False
- log.cl_info("no [%s] is configured, using default value false",
- cstr.CSTR_LAZY_PREPARE)
-
- if lazy_prepare:
- lazy_prepare_string = "enabled"
- else:
- lazy_prepare_string = "disabled"
-
- log.cl_info("lazy prepare is %s", lazy_prepare_string)
-
- high_availability = utils.config_value(config,
- cstr.CSTR_HIGH_AVAILABILITY)
- if high_availability is None:
- high_availability = False
- log.cl_info("no [%s] is configured, using default value false",
- cstr.CSTR_HIGH_AVAILABILITY)
-
- if high_availability:
- high_availability_string = "enabled"
- else:
- high_availability_string = "disabled"
- log.cl_info("high availability is %s", high_availability_string)
-
- dist_configs = utils.config_value(config, cstr.CSTR_LUSTRE_DISTRIBUTIONS)
- if dist_configs is None:
- log.cl_error("can NOT find [%s] in the config file, "
- "please correct file [%s]",
- cstr.CSTR_LUSTRE_DISTRIBUTIONS, config_fpath)
- return None
-
- # Keys are the distribution IDs, values are LustreRPMs
- lustre_distributions = {}
- for dist_config in dist_configs:
- lustre_distribution_id = utils.config_value(dist_config,
- cstr.CSTR_LUSTRE_DISTRIBUTION_ID)
- if lustre_distribution_id is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_LUSTRE_DISTRIBUTION_ID, config_fpath)
- return None
-
- if lustre_distribution_id in lustre_distributions:
- log.cl_error("multiple distributions with ID [%s] is "
- "configured, please correct file [%s]",
- lustre_distribution_id, config_fpath)
- return None
-
- lustre_rpm_dir = utils.config_value(dist_config,
- cstr.CSTR_LUSTRE_RPM_DIR)
- if lustre_rpm_dir is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_LUSTRE_RPM_DIR, config_fpath)
- return None
-
- lustre_rpm_dir = lustre_rpm_dir.rstrip("/")
-
- e2fsprogs_rpm_dir = utils.config_value(dist_config,
- cstr.CSTR_E2FSPROGS_RPM_DIR)
- if e2fsprogs_rpm_dir is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_E2FSPROGS_RPM_DIR, config_fpath)
- return None
-
- e2fsprogs_rpm_dir = e2fsprogs_rpm_dir.rstrip("/")
-
- lustre_rpms = lustre.LustreRPMs(lustre_distribution_id,
- lustre_rpm_dir, e2fsprogs_rpm_dir)
- ret = lustre_rpms.lr_prepare(log)
- if ret:
- log.cl_error("failed to prepare Lustre RPMs")
- return None
-
- lustre_distributions[lustre_distribution_id] = lustre_rpms
-
- ssh_host_configs = utils.config_value(config, cstr.CSTR_SSH_HOSTS)
- if ssh_host_configs is None:
- log.cl_error("can NOT find [%s] in the config file, "
- "please correct file [%s]",
- cstr.CSTR_SSH_HOSTS, config_fpath)
- return None
-
- hosts = {}
- for host_config in ssh_host_configs:
- host_id = utils.config_value(host_config,
- cstr.CSTR_HOST_ID)
- if host_id is None:
- log.cl_error("can NOT find [%s] in the config of a "
- "SSH host, please correct file [%s]",
- cstr.CSTR_HOST_ID, config_fpath)
- return None
-
- if host_id in hosts:
- log.cl_error("multiple SSH hosts with the same ID [%s], please "
- "correct file [%s]", host_id, config_fpath)
- return None
-
- lustre_distribution_id = utils.config_value(host_config,
- cstr.CSTR_LUSTRE_DISTRIBUTION_ID)
- if lustre_distribution_id is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_LUSTRE_DISTRIBUTION_ID, config_fpath)
- return None
-
- if lustre_distribution_id not in lustre_distributions:
- log.cl_error("no Lustre distributions with ID [%s] is "
- "configured, please correct file [%s]",
- lustre_distribution_id, config_fpath)
- return None
-
- lustre_distribution = lustre_distributions[lustre_distribution_id]
-
- hostname = utils.config_value(host_config, cstr.CSTR_HOSTNAME)
- if hostname is None:
- log.cl_error("can NOT find [%s] in the config of SSH host "
- "with ID [%s], please correct file [%s]",
- cstr.CSTR_HOSTNAME, host_id, config_fpath)
- return None
-
- ssh_identity_file = utils.config_value(host_config, cstr.CSTR_SSH_IDENTITY_FILE)
-
- host = lustre.LustreServerHost(hostname,
- lustre_rpms=lustre_distribution,
- identity_file=ssh_identity_file,
- host_id=host_id)
- hosts[host_id] = host
-
- lustre_configs = utils.config_value(config, cstr.CSTR_LUSTRES)
- if lustre_configs is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_LUSTRES, config_fpath)
- return None
-
- mgs_configs = utils.config_value(config, cstr.CSTR_MGS_LIST)
- if mgs_configs is None:
- log.cl_debug("no [%s] is configured", cstr.CSTR_MGS_LIST)
- mgs_configs = []
-
- server_hosts = {}
- mgs_dict = {}
- for mgs_config in mgs_configs:
- # Parse MGS configs
- mgs_id = utils.config_value(mgs_config, cstr.CSTR_MGS_ID)
- if mgs_id is None:
- log.cl_error("no [%s] is configured for a MGS, please correct "
- "file [%s]",
- cstr.CSTR_MGS_ID, config_fpath)
- return None
-
- if mgs_id in mgs_dict:
- log.cl_error("multiple configurations for MGS [%s], please "
- "correct file [%s]",
- mgs_id, config_fpath)
- return None
-
- backfstype = utils.config_value(mgs_config, cstr.CSTR_BACKFSTYPE)
- if backfstype is None:
- log.cl_debug("no [%s] is configured for MGS [%s], using [%s] as "
- "default value", cstr.CSTR_BACKFSTYPE, mgs_id,
- lustre.BACKFSTYPE_LDISKFS)
- backfstype = lustre.BACKFSTYPE_LDISKFS
-
- mgs = lustre.LustreMGS(log, mgs_id, backfstype)
- mgs_dict[mgs_id] = mgs
-
- instance_configs = utils.config_value(mgs_config, cstr.CSTR_INSTANCES)
- if instance_configs is None:
- log.cl_error("no [%s] is configured for MGS [%s], please correct "
- "file [%s]",
- cstr.CSTR_INSTANCES, mgs_id, config_fpath)
- return None
-
- for instance_config in instance_configs:
- host_id = utils.config_value(instance_config, cstr.CSTR_HOST_ID)
- if host_id is None:
- log.cl_error("no [%s] is configured for instance of MGS "
- "[%s], please correct file [%s]",
- cstr.CSTR_HOST_ID, mgs_id, config_fpath)
- return None
-
- if host_id not in hosts:
- log.cl_error("no host with [%s] is configured in hosts, "
- "please correct file [%s]",
- host_id, config_fpath)
- return None
-
- device = utils.config_value(instance_config, cstr.CSTR_DEVICE)
- if device is None:
- log.cl_error("no [%s] is configured for instance of "
- "MGS [%s], please correct file [%s]",
- cstr.CSTR_DEVICE, mgs_id, config_fpath)
- return None
-
- if backfstype == lustre.BACKFSTYPE_ZFS:
- if device.startswith("/"):
- log.cl_error("device [%s] with absolute path is "
- "configured for instance of MGS [%s] "
- "with ZFS type, please correct file [%s]",
- cstr.CSTR_DEVICE, mgs_id, config_fpath)
- return None
- else:
- if not device.startswith("/"):
- log.cl_error("device [%s] with absolute path should be "
- "configured for instance of MGS [%s] with "
- "ldiskfs type, please correct file [%s]",
- cstr.CSTR_DEVICE, mgs_id, config_fpath)
- return None
-
- nid = utils.config_value(instance_config, cstr.CSTR_NID)
- if nid is None:
- log.cl_error("no [%s] is configured for instance of "
- "MGS [%s], please correct file [%s]",
- cstr.CSTR_NID, mgs_id, config_fpath)
- return None
-
- zpool_create = None
- if backfstype == lustre.BACKFSTYPE_ZFS:
- zpool_create = utils.config_value(instance_config,
- cstr.CSTR_ZPOOL_CREATE)
- if zpool_create is None:
- log.cl_error("no [%s] is configured for an instance of "
- "MGS [%s], please correct file [%s]",
- cstr.CSTR_ZPOOL_CREATE, mgs_id, config_fpath)
- return None
-
- lustre_host = hosts[host_id]
- if host_id not in server_hosts:
- server_hosts[host_id] = lustre_host
-
- mnt = "/mnt/mgs_%s" % (mgs_id)
- lustre.LustreMGSInstance(log, mgs, lustre_host, device, mnt,
- nid, add_to_host=True)
-
- lustres = {}
- for lustre_config in lustre_configs:
- # Parse general configs of Lustre file system
- fsname = utils.config_value(lustre_config, cstr.CSTR_FSNAME)
- if fsname is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_FSNAME, config_fpath)
- return None
-
- if fsname in lustres:
- log.cl_error("file system [%s] is configured for multiple times, "
- "please correct file [%s]",
- fsname, config_fpath)
- return None
-
- lustre_fs = lustre.LustreFilesystem(fsname)
- lustres[fsname] = lustre_fs
-
- mgs_configured = False
-
- # Parse MGS config
- mgs_id = utils.config_value(lustre_config, cstr.CSTR_MGS_ID)
- if mgs_id is not None:
- log.cl_debug("[%s] is configured for file system [%s]",
- cstr.CSTR_MGS_ID, fsname)
-
- if mgs_id not in mgs_dict:
- log.cl_error("no MGS with ID [%s] is configured, please "
- "correct file [%s]",
- mgs_id, config_fpath)
- return None
-
- mgs = mgs_dict[mgs_id]
-
- ret = mgs.lmgs_add_fs(log, lustre_fs)
- if ret:
- log.cl_error("failed to add file system [%s] to MGS [%s], "
- "please correct file [%s]",
- fsname, mgs_id, config_fpath)
- return None
-
- mgs_configured = True
-
- # Parse MDT configs
- mdt_configs = utils.config_value(lustre_config, cstr.CSTR_MDTS)
- if mdt_configs is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_MDTS, config_fpath)
- return None
-
- for mdt_config in mdt_configs:
- mdt_index = utils.config_value(mdt_config, cstr.CSTR_INDEX)
- if mdt_index is None:
- log.cl_error("no [%s] is configured for a MDT of file system "
- "[%s], please correct file [%s]",
- cstr.CSTR_INDEX, fsname, config_fpath)
- return None
-
- is_mgs = utils.config_value(mdt_config, cstr.CSTR_IS_MGS)
- if is_mgs is None:
- log.cl_error("no [%s] is configured for MDT with index [%s] "
- "of file system [%s], using default value [False]",
- cstr.CSTR_IS_MGS, mdt_index, fsname)
- is_mgs = False
-
- if is_mgs:
- if mgs_configured:
- log.cl_error("multiple MGS are configured for file "
- "system [%s], please correct file [%s]",
- fsname, config_fpath)
- return None
- mgs_configured = True
-
- backfstype = utils.config_value(mdt_config, cstr.CSTR_BACKFSTYPE)
- if backfstype is None:
- log.cl_debug("no [%s] is configured for MDT with index [%s] "
- "of file system [%s], using [%s] as the default "
- "value", cstr.CSTR_BACKFSTYPE, mdt_index, fsname,
- lustre.BACKFSTYPE_LDISKFS)
- backfstype = lustre.BACKFSTYPE_LDISKFS
-
- mdt = lustre.LustreMDT(log, lustre_fs, mdt_index, backfstype,
- is_mgs=is_mgs)
-
- instance_configs = utils.config_value(mdt_config, cstr.CSTR_INSTANCES)
- if instance_configs is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_INSTANCES, config_fpath)
- return None
-
- for instance_config in instance_configs:
- host_id = utils.config_value(instance_config, cstr.CSTR_HOST_ID)
- if host_id is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_HOST_ID, config_fpath)
- return None
-
- if host_id not in hosts:
- log.cl_error("no host with [%s] is configured in hosts, "
- "please correct file [%s]",
- host_id, config_fpath)
- return None
-
- device = utils.config_value(instance_config, cstr.CSTR_DEVICE)
- if device is None:
- log.cl_error("no [%s] is configured for an instance of "
- "MDT with index [%s] of file system [%s], "
- "please correct file [%s]",
- cstr.CSTR_DEVICE, mdt_index, fsname,
- config_fpath)
- return None
-
- if backfstype == lustre.BACKFSTYPE_ZFS:
- if device.startswith("/"):
- log.cl_error("device [%s] with absolute path is "
- "configured for an instance of MDT "
- "with index [%s] of file system [%s] "
- "with ZFS type, please correct file [%s]",
- cstr.CSTR_DEVICE, mdt_index, fsname,
- config_fpath)
- return None
- else:
- if not device.startswith("/"):
- log.cl_error("device [%s] with absolute path is "
- "configured for an instance of MDT "
- "with index [%s] of file system [%s] "
- "with ldiskfs type, please correct file "
- "[%s]",
- cstr.CSTR_DEVICE, mdt_index, fsname,
- config_fpath)
- return None
-
- nid = utils.config_value(instance_config, cstr.CSTR_NID)
- if nid is None:
- log.cl_error("no [%s] is configured for an instance of "
- "MDT with index [%s] of file system [%s], "
- "please correct file [%s]",
- cstr.CSTR_NID, mdt_index, fsname,
- config_fpath)
- return None
-
- zpool_create = None
- if backfstype == lustre.BACKFSTYPE_ZFS:
- zpool_create = utils.config_value(instance_config,
- cstr.CSTR_ZPOOL_CREATE)
- if zpool_create is None:
- log.cl_error("no [%s] is configured for an instance of "
- "MDT with index [%s] of file system [%s], "
- "please correct file [%s]",
- cstr.CSTR_ZPOOL_CREATE, mdt_index, fsname,
- config_fpath)
- return None
-
- lustre_host = hosts[host_id]
- if host_id not in server_hosts:
- server_hosts[host_id] = lustre_host
-
- mnt = "/mnt/%s_mdt_%s" % (fsname, mdt_index)
- lustre.LustreMDTInstance(log, mdt, lustre_host, device, mnt,
- nid, add_to_host=True,
- zpool_create=zpool_create)
-
- if not mgs_configured:
- log.cl_error("None MGS is configured, please correct file [%s]",
- config_fpath)
- return None
-
- # Parse OST configs
- ost_configs = utils.config_value(lustre_config, cstr.CSTR_OSTS)
- if ost_configs is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_OSTS, config_fpath)
- return None
-
- for ost_config in ost_configs:
- ost_index = utils.config_value(ost_config, cstr.CSTR_INDEX)
- if ost_index is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_INDEX, config_fpath)
- return None
-
- backfstype = utils.config_value(ost_config, cstr.CSTR_BACKFSTYPE)
- if backfstype is None:
- log.cl_debug("no [%s] is configured for OST with index [%s] "
- "of file system [%s], using [%s] as default",
- cstr.CSTR_BACKFSTYPE, ost_index, fsname,
- lustre.BACKFSTYPE_LDISKFS)
- backfstype = lustre.BACKFSTYPE_LDISKFS
-
- ost = lustre.LustreOST(log, lustre_fs, ost_index, backfstype)
-
- instance_configs = utils.config_value(ost_config, cstr.CSTR_INSTANCES)
- if instance_configs is None:
- log.cl_error("no [%s] is configured for OST with index [%s] "
- "of file system [%s], please correct file [%s]",
- cstr.CSTR_INSTANCES, ost_index, fsname,
- config_fpath)
- return None
-
- for instance_config in instance_configs:
- host_id = utils.config_value(instance_config, cstr.CSTR_HOST_ID)
- if host_id is None:
- log.cl_error("no [%s] is configured for an instance of "
- "OST with index [%s] of file system [%s], "
- "please correct file [%s]",
- cstr.CSTR_HOST_ID, ost_index, fsname,
- config_fpath)
- return None
-
- if host_id not in hosts:
- log.cl_error("no host with ID [%s] is configured in hosts, "
- "please correct file [%s]",
- host_id, config_fpath)
- return None
-
- device = utils.config_value(instance_config, cstr.CSTR_DEVICE)
- if device is None:
- log.cl_error("no [%s] is configured for an instance of "
- "OST with index [%s] of file system [%s], "
- "please correct file [%s]",
- cstr.CSTR_DEVICE, ost_index, fsname,
- config_fpath)
- return None
-
- if backfstype == lustre.BACKFSTYPE_ZFS:
- if device.startswith("/"):
- log.cl_error("device [%s] with absolute path is "
- "configured for an instance of OST "
- "with index [%s] of file system [%s] "
- "with ZFS type, please correct file [%s]",
- cstr.CSTR_DEVICE, ost_index, fsname,
- config_fpath)
- return None
- else:
- if not device.startswith("/"):
- log.cl_error("device [%s] with none-absolute path is "
- "configured for an instance of OST "
- "with index [%s] of file system [%s] "
- "with ldiskfs type, please correct file "
- "[%s]",
- cstr.CSTR_DEVICE, ost_index, fsname,
- config_fpath)
- return None
-
- nid = utils.config_value(instance_config, cstr.CSTR_NID)
- if nid is None:
- log.cl_error("no [%s] is configured for an instance of "
- "OST with index [%s] of file system [%s], "
- "please correct file [%s]",
- cstr.CSTR_NID, ost_index, fsname,
- config_fpath)
- return None
-
- zpool_create = None
- if backfstype == lustre.BACKFSTYPE_ZFS:
- zpool_create = utils.config_value(instance_config, cstr.CSTR_ZPOOL_CREATE)
- if zpool_create is None:
- log.cl_error("no [%s] is configured for an instance of "
- "OST with index [%s] of file system [%s], "
- "please correct file [%s]",
- cstr.CSTR_ZPOOL_CREATE, mdt_index, fsname,
- config_fpath)
- return None
-
- lustre_host = hosts[host_id]
- if host_id not in server_hosts:
- server_hosts[host_id] = lustre_host
-
- mnt = "/mnt/%s_ost_%s" % (fsname, ost_index)
- lustre.LustreOSTInstance(log, ost, lustre_host, device, mnt,
- nid, add_to_host=True,
- zpool_create=zpool_create)
- # Parse client configs
- client_configs = utils.config_value(lustre_config,
- cstr.CSTR_CLIENTS)
- if client_configs is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_CLIENTS, config_fpath)
- return None
-
- for client_config in client_configs:
- host_id = utils.config_value(client_config, cstr.CSTR_HOST_ID)
- if host_id is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_HOST_ID, config_fpath)
- return None
-
- if host_id not in hosts:
- log.cl_error("no host with [%s] is configured in hosts, "
- "please correct file [%s]",
- host_id, config_fpath)
- return None
-
- lustre_host = hosts[host_id]
-
- mnt = utils.config_value(client_config, cstr.CSTR_MNT)
- if mnt is None:
- log.cl_error("no [%s] is configured, please correct file [%s]",
- cstr.CSTR_MNT, config_fpath)
- return None
-
- option_string = utils.config_value(client_config, cstr.CSTR_OPTIONS)
- if option_string is None:
- options = []
- else:
- options = option_string.split(",")
-
- lustre.LustreClient(log, lustre_fs, lustre_host, mnt,
- options=options, add_to_host=True)
-
- return ClownfishInstance(workspace, lazy_prepare, hosts, mgs_dict, lustres,
- high_availability)
+++ /dev/null
-"""
-Common library for clownfish
-"""
-CLOWNFISH_DEPENDENT_RPMS = ["rsync",
- "libyaml",
- "PyYAML",
- "python2-filelock",
- "python-dateutil"]
LIPE_INSTALL_LOG_DIR_BASENAME = "lipe_install"
LIPE_INSTALL_LOG_DIR = VAR_LOG_PATH + "/" + LIPE_INSTALL_LOG_DIR_BASENAME
LIPE_INSTALL_MNT_DIR = LIPE_INSTALL_LOG_DIR + "/mnt"
-
-CLOWNFISH_CONFIG_FNAME = "clownfish.conf"
-CLOWNFISH_CONFIG = "/etc/" + CLOWNFISH_CONFIG_FNAME
-CLOWNFISH_LOG_DIR = "/var/log/clownfish"
CSTR_CLIENTS = "clients"
CSTR_CLIENT_NAME = "client_name"
CSTR_CONFIG_FPATH = "config_fpath"
-CSTR_CLOWNFISH_CONFIG = "clownfish_config"
CSTR_CLUSTER = "cluster"
CSTR_COMMAND_OUTPUT = "command_output"
CSTR_DEVICE = "device"
command = ("rpm -ivh %s/lipe-pylustre-*.x86_64.rpm "
"%s/lipe-1.*.x86_64.rpm "
"%s/lipe-client-1.*.x86_64.rpm "
- "%s/lipe-server-1.*.x86_64.rpm "
- "%s/lipe-clownfish-1.*.x86_64.rpm --nodeps" %
- (package_dir, package_dir, package_dir, package_dir, package_dir))
+ "%s/lipe-server-1.*.x86_64.rpm "
+ " --nodeps" %
+ (package_dir, package_dir, package_dir, package_dir))
retval = host.sh_run(log, command)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
if BUILD_SERVER
bin_PROGRAMS += \
ext4_inode2path \
- lcreatemany \
ldumpstripe \
lfill \
lipe_scan \
lfill_SOURCES = cmd.c cmd.h debug.c debug.h lfill.c ldiskfs_read_ldd.c \
ldiskfs_read_ldd.h misc.c misc.h
-lcreatemany_SOURCES = lcreatemany.c debug.c debug.h
-
ldumpstripe_SOURCES = ldumpstripe.c lustre_ea.h lustre_ea.c debug.c debug.h
laudit_SOURCES = laudit.c
+++ /dev/null
-/*
- * Copyright (c) 2018, DDN Storage Corporation.
- */
-/*
- *
- * Tool for scanning the MDT and print list of matched files.
- *
- * Author: Li Xi <lixi@ddn.com>
- */
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/time.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <unistd.h>
-#include <string.h>
-#include "debug.h"
-
-static void usage(char *prog)
-{
- printf("\nUsage: %s dir file_count second_limit\n",
- prog);
- printf("file_count = -1 means no limit for file count\n");
- printf("second_limit = -1 means no limit for time\n");
- exit(1);
-}
-
-double now(void)
-{
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- return (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
-}
-
-int main(int argc, char **argv)
-{
- long i;
- const char *dir;
- int seconds;
- long file_count;
- char filename[4096];
- int bytes;
- int rc;
- double time_diff;
- double time_start;
- double time_end;
- double time_now;
- double iops;
-
- if (argc != 4) {
- LERROR("%s, missing options\n", argv[0]);
- usage(argv[0]);
- }
-
- dir = argv[1];
-
- file_count = strtol(argv[2], NULL, 0);
- if (file_count != -1 && file_count <= 0) {
- LERROR("%s, invalid file_count, %l\n", argv[0], file_count);
- usage(argv[0]);
- }
-
- seconds = strtol(argv[3], NULL, 0);
- if (seconds != -1 && seconds <= 0) {
- LERROR("%s, invalid seconds, %d\n", argv[0], seconds);
- usage(argv[0]);
- }
-
- time_start = now();
- time_end = time_start + seconds;
- i = 0;
-
- while (1) {
- if (file_count != -1 && i >= file_count)
- break;
- i++;
-
- time_now = now();
- if (seconds != -1 && time_now >= time_end)
- break;
-
- bytes = snprintf(filename, sizeof(filename) - 1, "%s/%lu",
- dir, i);
- if (bytes >= sizeof(filename) - 1) {
- LERROR("file path is too long\n");
- break;
- }
-
-#if 0
- fd = open(filename, O_CREAT|O_RDWR, 0644);
- if (fd < 0) {
- LERROR("failed to create file [%s]: %s\n", filename,
- strerror(errno));
- return -1;
- }
- close(fd);
-#endif
- rc = mknod(filename, S_IFREG | 0444, 0);
- if (rc) {
- LERROR("mknod(%s) error: %s\n",
- filename, strerror(errno));
- rc = errno;
- break;
- }
-
- rc = unlink(filename);
- if (rc) {
- LERROR("unlink(%s) error: %s\n",
- filename, strerror(errno));
- rc = errno;
- break;
- }
- }
- time_end = now();
- time_diff = time_end - time_start;
- iops = (double)i / time_diff;
-
- printf("seconds: %.2f\n", time_diff);
- printf("files: %lu\n", i);
- printf("files per second: %.2f\n", iops);
- return 0;
-}