├── openfas ├── diamond │ ├── collectors │ │ ├── IPCollector.conf │ │ ├── netappDisk.conf │ │ ├── AmavisCollector.conf │ │ ├── ApcupsdCollector.conf │ │ ├── AuroraCollector.conf │ │ ├── BindCollector.conf │ │ ├── CephCollector.conf │ │ ├── ChronydCollector.conf │ │ ├── DRBDCollector.conf │ │ ├── DarnerCollector.conf │ │ ├── DockerCollector.conf │ │ ├── ElbCollector.conf │ │ ├── ExampleCollector.conf │ │ ├── EximCollector.conf │ │ ├── FilesCollector.conf │ │ ├── FluentdCollector.conf │ │ ├── FlumeCollector.conf │ │ ├── HAProxyCollector.conf │ │ ├── HBaseCollector.conf │ │ ├── HadoopCollector.conf │ │ ├── HttpCollector.conf │ │ ├── HttpdCollector.conf │ │ ├── IPVSCollector.conf │ │ ├── JolokiaCollector.conf │ │ ├── KSMCollector.conf │ │ ├── KVMCollector.conf │ │ ├── KafkaCollector.conf │ │ ├── MesosCollector.conf │ │ ├── MongoDBCollector.conf │ │ ├── MonitCollector.conf │ │ ├── MySQLCollector.conf │ │ ├── NetAppCollector.conf │ │ ├── NetworkCollector.conf │ │ ├── NfsCollector.conf │ │ ├── NfsdCollector.conf │ │ ├── NginxCollector.conf │ │ ├── NtpCollector.conf │ │ ├── NtpdCollector.conf │ │ ├── NumaCollector.conf │ │ ├── OneWireCollector.conf │ │ ├── OpenVPNCollector.conf │ │ ├── OssecCollector.conf │ │ ├── PgQCollector.conf │ │ ├── PhpFpmCollector.conf │ │ ├── PingCollector.conf │ │ ├── PostfixCollector.conf │ │ ├── RedisCollector.conf │ │ ├── SNMPCollector.conf │ │ ├── SNMPRawCollector.conf │ │ ├── ScribeCollector.conf │ │ ├── SlonyCollector.conf │ │ ├── SmartCollector.conf │ │ ├── SolrCollector.conf │ │ ├── SqsCollector.conf │ │ ├── SquidCollector.conf │ │ ├── TCPCollector.conf │ │ ├── TokuMXCollector.conf │ │ ├── UDPCollector.conf │ │ ├── UPSCollector.conf │ │ ├── UnboundCollector.conf │ │ ├── UptimeCollector.conf │ │ ├── UsersCollector.conf │ │ ├── VMSDomsCollector.conf │ │ ├── VMSFSCollector.conf │ │ ├── VarnishCollector.conf │ │ ├── XENCollector.conf │ │ ├── XFSCollector.conf │ │ ├── netapp_inode.conf │ │ ├── BeanstalkdCollector.conf │ │ ├── CelerymonCollector.conf │ │ ├── CephStatsCollector.conf │ │ ├── ConnTrackCollector.conf │ │ ├── DropwizardCollector.conf │ │ ├── DseOpsCenterCollector.conf │ │ ├── EndecaDgraphCollector.conf │ │ ├── EntropyStatCollector.conf │ │ ├── FilestatCollector.conf │ │ ├── GridEngineCollector.conf │ │ ├── HTTPJSONCollector.conf │ │ ├── IODriveSNMPCollector.conf │ │ ├── IPMISensorCollector.conf │ │ ├── IcingaStatsCollector.conf │ │ ├── InterruptCollector.conf │ │ ├── JCollectdCollector.conf │ │ ├── JbossApiCollector.conf │ │ ├── LMSensorsCollector.conf │ │ ├── LibvirtKVMCollector.conf │ │ ├── MemcachedCollector.conf │ │ ├── MemoryCgroupCollector.conf │ │ ├── MemoryDockerCollector.conf │ │ ├── MesosCGroupCollector.conf │ │ ├── MountStatsCollector.conf │ │ ├── MySQLPerfCollector.conf │ │ ├── NagiosStatsCollector.conf │ │ ├── OpenLDAPCollector.conf │ │ ├── PassengerCollector.conf │ │ ├── PgbouncerCollector.conf │ │ ├── PortStatCollector.conf │ │ ├── PostgresqlCollector.conf │ │ ├── PostqueueCollector.conf │ │ ├── PowerDNSCollector.conf │ │ ├── ProcessStatCollector.conf │ │ ├── PuppetAgentCollector.conf │ │ ├── PuppetDBCollector.conf │ │ ├── RabbitMQCollector.conf │ │ ├── ResqueWebCollector.conf │ │ ├── S3BucketCollector.conf │ │ ├── SidekiqWebCollector.conf │ │ ├── SlabInfoCollector.conf │ │ ├── SockstatCollector.conf │ │ ├── SupervisordCollector.conf │ │ ├── TwemproxyCollector.conf │ │ ├── UserScriptsCollector.conf │ │ ├── ZookeeperCollector.conf │ │ ├── CassandraJolokiaCollector.conf │ │ ├── CpuAcctCgroupCollector.conf │ │ ├── DiskTemperatureCollector.conf │ │ ├── ElasticSearchCollector.conf │ │ ├── NagiosPerfdataCollector.conf │ │ ├── NetscalerSNMPCollector.conf │ │ ├── OpenstackSwiftCollector.conf │ │ ├── ProcessResourcesCollector.conf │ │ ├── PuppetDashboardCollector.conf │ │ ├── SNMPInterfaceCollector.conf │ │ ├── ServerTechPDUCollector.conf │ │ ├── SoftInterruptCollector.conf │ │ ├── WebsiteMonitorCollector.conf │ │ ├── EventstoreProjectionsCollector.conf │ │ ├── NetfilterAccountingCollector.conf │ │ ├── OpenstackSwiftReconCollector.conf │ │ ├── VMStatCollector.conf │ │ ├── LoadAverageCollector.conf │ │ ├── MemoryCollector.conf │ │ ├── CPUCollector.conf │ │ ├── DiskSpaceCollector.conf │ │ └── DiskUsageCollector.conf │ └── diamond.conf ├── install-openstack.py ├── install.sh ├── README.md ├── parser.py ├── install-lma.py ├── install-iscsitarget.py ├── configure.sh └── install-disklayout.py ├── .DS_Store ├── benchmark ├── attbench │ ├── hosts-sample.ini │ ├── local │ │ ├── cosbench │ │ │ └── run.sh │ │ ├── fio │ │ │ ├── README.md │ │ │ ├── run.sh │ │ │ ├── exec-fio.sh │ │ │ └── parse-and-report-influxdb.py │ │ ├── config-sample.yaml │ │ ├── start.py │ │ └── fio.json.sample │ ├── start-fio.yaml │ ├── install-prerequisites.yaml │ ├── group_vars │ │ └── hostgroup-sample │ └── README.md └── fio │ ├── test-fillup │ ├── fillup-1.fio │ ├── fillup-2.fio │ ├── fillup-3.fio │ ├── fillup-4.fio │ ├── fillup-99.fio │ └── start-test.sh │ ├── README.md │ ├── run.sh │ ├── exec_fio.sh │ └── parse_and_report_influxdb.sh ├── README.md ├── qosctrl ├── README.md ├── src │ └── cinder │ │ ├── common │ │ ├── ioarbresv.py │ │ └── ioarbparams.py │ │ ├── scheduler │ │ └── filters │ │ │ └── ioarb_filter.py │ │ ├── brick │ │ └── local_dev │ │ │ └── ioarbcontainer.py │ │ └── volume │ │ ├── targets │ │ └── ioarbtgt.py │ │ └── drivers │ │ ├── provlvm.py │ │ └── ioarblvm.py └── INSTALL.md └── LICENSE /openfas/diamond/collectors/IPCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/netappDisk.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/install-openstack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | 4 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/AmavisCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ApcupsdCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/AuroraCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/BindCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/CephCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ChronydCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DRBDCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DarnerCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DockerCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ElbCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ExampleCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/EximCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/FilesCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/FluentdCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/FlumeCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/HAProxyCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/HBaseCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/HadoopCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/HttpCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/HttpdCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/IPVSCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/JolokiaCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/KSMCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/KVMCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/KafkaCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MesosCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MongoDBCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MonitCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MySQLCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NetAppCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NetworkCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NfsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NfsdCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NginxCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NtpCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NtpdCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NumaCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/OneWireCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/OpenVPNCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/OssecCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PgQCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PhpFpmCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PingCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PostfixCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/RedisCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SNMPCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SNMPRawCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ScribeCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SlonyCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SmartCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SolrCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SqsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SquidCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/TCPCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/TokuMXCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/UDPCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/UPSCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/UnboundCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/UptimeCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/UsersCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/VMSDomsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/VMSFSCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/VarnishCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/XENCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/XFSCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/netapp_inode.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/att/ioarbiter/master/.DS_Store -------------------------------------------------------------------------------- /openfas/diamond/collectors/BeanstalkdCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/CelerymonCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/CephStatsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ConnTrackCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DropwizardCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DseOpsCenterCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/EndecaDgraphCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/EntropyStatCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/FilestatCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/GridEngineCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/HTTPJSONCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/IODriveSNMPCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/IPMISensorCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/IcingaStatsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/InterruptCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/JCollectdCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/JbossApiCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/LMSensorsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/LibvirtKVMCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MemcachedCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MemoryCgroupCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MemoryDockerCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MesosCGroupCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MountStatsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MySQLPerfCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NagiosStatsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/OpenLDAPCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PassengerCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PgbouncerCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PortStatCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PostgresqlCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PostqueueCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PowerDNSCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ProcessStatCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PuppetAgentCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PuppetDBCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/RabbitMQCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ResqueWebCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/S3BucketCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SidekiqWebCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SlabInfoCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SockstatCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SupervisordCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/TwemproxyCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/UserScriptsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ZookeeperCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/CassandraJolokiaCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/CpuAcctCgroupCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DiskTemperatureCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ElasticSearchCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NagiosPerfdataCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NetscalerSNMPCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/OpenstackSwiftCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ProcessResourcesCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/PuppetDashboardCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SNMPInterfaceCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/ServerTechPDUCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/SoftInterruptCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/WebsiteMonitorCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/EventstoreProjectionsCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/NetfilterAccountingCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/OpenstackSwiftReconCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = False 2 | -------------------------------------------------------------------------------- /benchmark/attbench/hosts-sample.ini: -------------------------------------------------------------------------------- 1 | [hostgroup] 2 | yourhostname1 3 | yourhostname2 4 | 5 | [hostgroup:vars] 6 | user=yourid 7 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/VMStatCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = True 2 | path_suffix = "" 3 | ttl_multiplier = 2 4 | measure_collector_time = False 5 | byte_unit = byte, 6 | -------------------------------------------------------------------------------- /openfas/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo ./install-disklayout.py 4 | sudo ./install-iscsitarget.py 5 | sudo ./install-lma.py 6 | sudo ./install-openstack.py 7 | 8 | -------------------------------------------------------------------------------- /benchmark/attbench/local/cosbench/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Author: Hee Won Lee 3 | # Created on: 12/12/2017 4 | 5 | echo "COSBench: to be implemented" 6 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/LoadAverageCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = True 2 | path_suffix = "" 3 | ttl_multiplier = 2 4 | measure_collector_time = False 5 | byte_unit = byte, 6 | simple = False 7 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/MemoryCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = True 2 | path_suffix = "" 3 | ttl_multiplier = 2 4 | measure_collector_time = False 5 | byte_unit = byte, 6 | force_psutil = False 7 | method = Threaded 8 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/CPUCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = True 2 | path_suffix = "" 3 | ttl_multiplier = 2 4 | measure_collector_time = False 5 | byte_unit = byte, 6 | simple = False 7 | normalize = False 8 | percore = True 9 | -------------------------------------------------------------------------------- /benchmark/fio/test-fillup/fillup-1.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | ioengine=libaio 3 | size=10TB 4 | rw=write 5 | bs=1MB 6 | direct=1 7 | iodepth=256 8 | ramp_time=10 9 | #runtime=100 10 | invalidate=1 11 | rwmixread=0 12 | 13 | [job-sda] 14 | filename=../ssd50tb/iotest-1.bin 15 | -------------------------------------------------------------------------------- /benchmark/fio/test-fillup/fillup-2.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | ioengine=libaio 3 | size=10TB 4 | rw=write 5 | bs=1MB 6 | direct=1 7 | iodepth=256 8 | ramp_time=10 9 | #runtime=100 10 | invalidate=1 11 | rwmixread=0 12 | 13 | [job-sda] 14 | filename=../ssd50tb/iotest-2.bin 15 | -------------------------------------------------------------------------------- /benchmark/fio/test-fillup/fillup-3.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | ioengine=libaio 3 | size=10TB 4 | rw=write 5 | bs=1MB 6 | direct=1 7 | iodepth=256 8 | ramp_time=10 9 | #runtime=100 10 | invalidate=1 11 | rwmixread=0 12 | 13 | [job-sda] 14 | filename=../ssd50tb/iotest-3.bin 15 | -------------------------------------------------------------------------------- /benchmark/fio/test-fillup/fillup-4.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | ioengine=libaio 3 | size=10TB 4 | rw=write 5 | bs=1MB 6 | direct=1 7 | iodepth=256 8 | ramp_time=10 9 | #runtime=100 10 | invalidate=1 11 | rwmixread=0 12 | 13 | [job-sda] 14 | filename=../ssd50tb/iotest-4.bin 15 | -------------------------------------------------------------------------------- /benchmark/fio/test-fillup/fillup-99.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | ioengine=libaio 3 | size=1400GB 4 | rw=write 5 | bs=1MB 6 | direct=1 7 | iodepth=256 8 | ramp_time=10 9 | #runtime=100 10 | invalidate=1 11 | rwmixread=0 12 | 13 | [job-sda] 14 | filename=../ssddata/iotest-1.bin 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IOArbiter: Storage QoS and Workload Tests 2 | 3 | * [QoS control for cloud block storage](qosctrl/) 4 | 5 | * [OpenFAS](openfas/) 6 | 7 | * IO Benchmarks: [fio](benchmark/fio/), [cosbench](benchmark/cosbench/) 8 | 9 | 10 | ##### Contact: mra@research.att.com 11 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DiskSpaceCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = True 2 | path_suffix = "" 3 | ttl_multiplier = 2 4 | measure_collector_time = False 5 | exclude_filters = ^/export/home, 6 | byte_unit = byte, 7 | filesystems = "ext2, ext3, ext4, xfs, glusterfs, nfs, nfs4, ntfs, hfs, fat32, fat16, btrfs" 8 | -------------------------------------------------------------------------------- /openfas/README.md: -------------------------------------------------------------------------------- 1 | # OpenFAS 2 | Authors: Moo-Ryong Ra and Hee Won Lee 3 | 4 | ### Goal 5 | Make a commodity SSD array a usable iSCSI block storage backend for an Openstack cloud in no time. 6 | 7 | ### Project Status 8 | Work In Progress 9 | 10 | -------------------------------------------------------------------------------- /openfas/diamond/collectors/DiskUsageCollector.conf: -------------------------------------------------------------------------------- 1 | enabled = True 2 | path_suffix = "" 3 | ttl_multiplier = 2 4 | measure_collector_time = False 5 | byte_unit = byte, 6 | sector_size = 512 7 | send_zero = False 8 | devices = PhysicalDrive[0-9]+$|md[0-9]+$|sd[a-z]+[0-9]*$|x?vd[a-z]+[0-9]*$|disk[0-9]+$|dm\-[0-9]+$ 9 | -------------------------------------------------------------------------------- /benchmark/fio/test-fillup/start-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo time fio fillup-1.fio 4 | sudo time fio fillup-2.fio 5 | sudo time fio fillup-3.fio 6 | sudo time fio fillup-4.fio 7 | sudo time rm ../ssd50tb/iotest-1.bin 8 | sudo time rm ../ssd50tb/iotest-2.bin 9 | sudo time rm ../ssd50tb/iotest-3.bin 10 | sudo time rm ../ssd50tb/iotest-4.bin 11 | 12 | -------------------------------------------------------------------------------- /benchmark/attbench/local/fio/README.md: -------------------------------------------------------------------------------- 1 | # FIO Tester 2 | Authors: Moo-Ryong Ra and Hee Won Lee 3 | 4 | * [run.sh](run.sh): 5 | - Main script to run a test for various blocksizes, r/w ratio, iodepth, numjobs 6 | 7 | * [exec-fio.sh](exec-fio.sh): 8 | - Used by run.sh. 9 | - Generate fio configuration, run, and trigger the report script (below). 10 | 11 | * [parse-and-report-influxdb.py](parse-and-report-influxdb.py): 12 | - Used by `exec_fio.sh`. 13 | - Parse fio output logs and report to influxdb. 14 | 15 | -------------------------------------------------------------------------------- /benchmark/attbench/start-fio.yaml: -------------------------------------------------------------------------------- 1 | # Author: Hee Won Lee 2 | # Created on 12/5/2017 3 | --- 4 | - hosts: hostgroup 5 | remote_user: "{{ user }}" 6 | become: no 7 | gather_facts: no 8 | tasks: 9 | - name: rsync 'local' directory 10 | synchronize: 11 | src=local 12 | dest=/tmp 13 | delete=no 14 | 15 | - name: start fio 16 | shell: ./start.py -c config-sample.yaml fio 17 | args: 18 | chdir: /tmp/local/ 19 | environment: "{{ env }}" 20 | async: 2592000 # 60*60*24*30 – 1 month 21 | register: start_fio 22 | 23 | - debug: var=start_fio 24 | -------------------------------------------------------------------------------- /benchmark/fio/README.md: -------------------------------------------------------------------------------- 1 | # FIO test scripts 2 | 3 | ### Prerequisites 4 | ``` 5 | sudo apt-get install fio bc 6 | ``` 7 | 8 | ### Run 9 | - [run.sh](run.sh): a main script to run a test. blocksizes, r/w ratio, iodepth 10 | ``` 11 | nohup ./run.sh & 12 | ``` 13 | 14 | - [exec\_fio.sh](exec_fio.sh): used by run.sh. generate fio configuration, run, and trigger the report script (below). 15 | 16 | - [parse\_and\_report\_influxdb.sh](parse_and_report_influxdb.sh): used by `exec_fio.sh`. parse fio output logs and report to influxdb (needs to be preconfigured). 17 | 18 | ###### Note: current scripts are to test local block devices only (remote version will be added soon). 19 | 20 | -------------------------------------------------------------------------------- /benchmark/fio/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | bslist="4k 8k 16k 32k 64k" 4 | readratiolist="0 10 20 30 40 50 60 70 80 90 100" 5 | iodepthlist="1 8 16 32 64" 6 | 7 | # random test 8 | for bs in $bslist 9 | do 10 | for readratio in $readratiolist 11 | do 12 | for iodepth in $iodepthlist 13 | do 14 | ./exec_fio.sh randrw $bs $readratio $iodepth 15 | done 16 | done 17 | done 18 | 19 | bslist="128k 256k 512k 1024k 2048k 4096k" 20 | 21 | # sequential test 22 | for bs in $bslist 23 | do 24 | for readratio in $readratiolist 25 | do 26 | for iodepth in $iodepthlist 27 | do 28 | ./exec_fio.sh rw $bs $readratio $iodepth 29 | done 30 | done 31 | done 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /benchmark/attbench/install-prerequisites.yaml: -------------------------------------------------------------------------------- 1 | # Author: Hee Won Lee 2 | # Created on 12/5/2017 3 | --- 4 | - hosts: hostgroup 5 | remote_user: "{{ user }}" 6 | become: yes 7 | gather_facts: no 8 | tasks: 9 | - name: "apt-get update && apt-get install fio" 10 | apt: 11 | name: fio 12 | update_cache: yes 13 | 14 | - name: "apt-get install bc" 15 | apt: 16 | name: bc 17 | update_cache: no 18 | register: debugging 19 | 20 | - name: "apt-get install python-pip" 21 | apt: 22 | name: python-pip 23 | update_cache: no 24 | 25 | - name: "pip install pyaml" 26 | command: pip install pyaml 27 | -------------------------------------------------------------------------------- /qosctrl/README.md: -------------------------------------------------------------------------------- 1 | # QoS-aware Block Storage Management in the Cloud 2 | 3 | IOArbiter aims 4 | to provide QoS-aware block storage management in the cloud environment. 5 | The system intends to provide the following features: 6 | 7 | * Dynamic creation of backend block storage: the infrastructure defers an underlying storage 8 | implementation at volume creation time, which can significantly improve overall resource utilization. 9 | 10 | * Per-tenant IOPS allocation: a tenant can expect minimum IOPS guarantee in a per-volume basis. 11 | 12 | * Improved space efficiency: inline deduplication/compression, thin-provisioning features will be collectively supported by other open source projects. 13 | 14 | If you are interested, please follow the [installation guide](INSTALL.md). 15 | 16 | ##### *only tested with Openstack kilo. 17 | -------------------------------------------------------------------------------- /benchmark/attbench/group_vars/hostgroup-sample: -------------------------------------------------------------------------------- 1 | --- 2 | env: 3 | INFLUXDB_IP: 10.1.2.3 # (influxdb) IP or domain name 4 | INFLUXDB_PORT: 8086 # (influxdb) Port 5 | INFLUXDB_DBNAME: yourdb # (influxdb) Database name (which should be created beforehand) 6 | INFLUXDB_USER: yourid # (influxdb) User ID 7 | INFLUXDB_PASSWORD: yourpw # (influxdb) Password 8 | 9 | FIO_RUNTIME: 300 # FIO runtime (unit: sec) 10 | FIO_DIRECT: 1 # 1: Direct IO, 2: Buffered IO 11 | FIO_SIZE: 400G # io size 12 | FIO_DEVLIST: "sdc" # block list 13 | FIO_RANDBSLIST: "4k 8k 32k" # random block size list (optional) 14 | FIO_SEQBSLIST: "128k 1024k 4096k" # sequential block size list (optional) 15 | FIO_READRATIOLIST: "0 30 50 70 100" # read/write ratio: e.g., 30 means read 30% and write 70% 16 | FIO_IODEPTHLIST: "1 8 16 32 64" # io depth list 17 | FIO_NUMJOBSLIST: "1 8 16 32" # number of jobs list 18 | -------------------------------------------------------------------------------- /benchmark/attbench/local/config-sample.yaml: -------------------------------------------------------------------------------- 1 | # Configuration 2 | influxdb: 3 | enabled: true 4 | env: 5 | ip: 10.1.2.3 # (influxdb) IP or domain name 6 | port: 8086 # (influxdb) Port 7 | dbname: yourdb # (influxdb) Database name (which should be created beforehand) 8 | user: yourid # (influxdb) User ID 9 | password: yourpw # (influxdb) Password 10 | fio: 11 | enabled: true 12 | env: 13 | runtime: 300 # FIO runtime (unit: sec) 14 | direct: 1 # 1: Direct IO, 2: Buffered IO 15 | size: 400G # io size 16 | devlist: "sdb sdc" # block list 17 | randbslist: "4k 8k 32k" # random block size list (optional) 18 | seqbslist: "128k 1024k 4096k" # sequential block size list (optional) 19 | readratiolist: "0 30 50 70 100" # read/write ratio: e.g., 30 means read 30% and write 70% 20 | iodepthlist: "1 8 16 32 64" # io depth list 21 | numjobslist: "1 8 16 32" # number of jobs list 22 | cosbench: 23 | enabled: false 24 | env: 25 | var1: TBD 26 | var2: TBD 27 | -------------------------------------------------------------------------------- /benchmark/fio/exec_fio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | histdir="results/exp-01" 4 | #devlist=`lsblk | grep disk | grep -v sdat | awk '{print $1}' | grep -v nvme` 5 | devlist="nvme0n1 nvme1n1" 6 | rw=$1 7 | bs=$2 8 | readratio=$3 9 | iodepth=$4 10 | 11 | fname=mra-$rw-$bs-$readratio-$iodepth.fio 12 | logfname="$histdir/fio-summary.log" 13 | fiolog=log-$rw-$bs-$readratio-$iodepth.txt 14 | 15 | mkdir -p $histdir 16 | 17 | echo "[global]" > $fname 18 | echo "ioengine=libaio" >> $fname 19 | echo "size=400G" >> $fname 20 | echo "rw=$rw" >> $fname 21 | echo "bs=$bs" >> $fname 22 | echo "direct=1" >> $fname 23 | echo "iodepth=$iodepth" >> $fname 24 | echo "ramp_time=5" >> $fname 25 | echo "runtime=300" >> $fname 26 | echo "invalidate=1" >> $fname 27 | echo "rwmixread=$readratio" >> $fname 28 | echo "invalidate=1" >> $fname 29 | echo "" >> $fname 30 | 31 | for i in $devlist 32 | do 33 | echo "[job-$i]" >> $fname 34 | echo "filename=/dev/$i" >> $fname 35 | echo "" >> $fname 36 | done 37 | 38 | sudo fio --output=$fiolog $fname 39 | sudo su -c 'echo 3 > /proc/sys/vm/drop_caches' 40 | 41 | echo "rw=$rw bs=$bs readratio=$readratio iodepth=$iodepth" >> $logfname 42 | ./parse_and_report_influxdb.sh $fiolog $bs >> $logfname 43 | 44 | sudo mv *.fio $histdir 45 | sudo mv *.txt $histdir 46 | 47 | -------------------------------------------------------------------------------- /openfas/parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Author: Hee Won Lee 4 | Created on 1/17/2017 5 | ''' 6 | 7 | import json, sys 8 | import yaml 9 | 10 | import subprocess 11 | 12 | def runBash(cmd): 13 | """Run a subprocess 14 | 15 | Args: 16 | cmd (str): command 17 | 18 | Returns: 19 | result (str): stdout + stderr 20 | 21 | """ 22 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash') 23 | (stdout, stderr) = proc.communicate() 24 | return stdout + stderr 25 | 26 | ### Unit test ### 27 | if __name__ == '__main__': 28 | config = {} 29 | 30 | # User Input 31 | with open('userinput.conf') as data_file: 32 | data = json.load(data_file) 33 | config['userreq'] = data 34 | 35 | # Block Devices 36 | content = runBash("lsblk |grep disk |awk '{print $1, $2, $4}'") 37 | content = [line.split() for line in content.split('\n') if line.strip() != ''] 38 | blkdev = [] 39 | for x in content: 40 | dev = {} 41 | dev['name']= x[0] 42 | dev['maj:min'] = x[1] 43 | dev['size'] = x[2] 44 | blkdev.append(dev) 45 | config['blkdev'] = blkdev 46 | 47 | # Dump to yaml 48 | print yaml.safe_dump(config, default_flow_style=False) 49 | 50 | with open('config.yaml', 'w') as outfile: 51 | yaml.safe_dump(config, outfile, default_flow_style=False) 52 | 53 | -------------------------------------------------------------------------------- /benchmark/attbench/local/fio/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Author: Moo-Ryong Ra 3 | # Modified on: 12/7/2017 by Hee Won Lee 4 | 5 | #set -x 6 | set -e 7 | 8 | # Default variables 9 | FIO_RANDBSLIST=${FIO_RANDBSLIST:-""} # e.g., "4k 8k 32k" 10 | FIO_SEQBSLIST=${FIO_SEQBSLIST:-""} # e.g., "128k 1024k 4096k" 11 | FIO_READRATIOLIST=${FIO_READRATIOLIST:-"0"} # e.g., "0 30 50 70 100" 12 | FIO_IODEPTHLIST=${FIO_IODEPTHLIST:-"1"} # e.g., "1 8 16 32 64" 13 | FIO_NUMJOBSLIST=${FIO_NUMJOBSLIST:-"1"} # e.g., "1 8 16 32" 14 | 15 | # Prepare for result dirs 16 | n=0 17 | while ! mkdir ../res-$n 18 | do 19 | n=$((n+1)) 20 | done 21 | export res_dir=../res-$n 22 | echo "mkdir: create directory ‘$res_dir’" 23 | mkdir $res_dir/job 24 | mkdir $res_dir/out 25 | 26 | 27 | # random test 28 | if [ -n "$FIO_RANDBSLIST" ]; then 29 | for bs in $FIO_RANDBSLIST; do 30 | for readratio in $FIO_READRATIOLIST; do 31 | for iodepth in $FIO_IODEPTHLIST; do 32 | for numjobs in $FIO_NUMJOBSLIST; do 33 | ./exec-fio.sh randrw $bs $readratio $iodepth $numjobs 34 | done 35 | done 36 | done 37 | done 38 | fi 39 | 40 | # sequential test 41 | if [ -n "$FIO_SEQBSLIST" ]; then 42 | for bs in $FIO_SEQBSLIST; do 43 | for readratio in $FIO_READRATIOLIST; do 44 | for iodepth in $FIO_IODEPTHLIST; do 45 | for numjobs in $FIO_NUMJOBSLIST; do 46 | ./exec-fio.sh rw $bs $readratio $iodepth $numjobs 47 | done 48 | done 49 | done 50 | done 51 | fi 52 | -------------------------------------------------------------------------------- /openfas/install-lma.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Author: Hee Won Lee 4 | Created on 2/15/2017 5 | ''' 6 | import yaml 7 | import os, sys 8 | import subprocess 9 | 10 | # global variables. 11 | user_config_file = 'config.yaml' 12 | 13 | def run_bash(cmd): 14 | """Run a subprocess 15 | 16 | Args: 17 | cmd (str): command 18 | 19 | Returns: 20 | result (str): stdout + stderr 21 | 22 | """ 23 | print cmd 24 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash') 25 | (stdout, stderr) = proc.communicate() 26 | return stdout + stderr 27 | 28 | def install_diamond(): 29 | run_bash("pip install diamond") 30 | run_bash("mkdir -p /etc/diamond") 31 | run_bash("mkdir -p /var/log/diamond") 32 | run_bash("cp diamond/diamond.conf /etc/diamond/") 33 | run_bash("cp -r diamond/collectors /etc/diamond/") 34 | run_bash("diamond") 35 | 36 | def install_telegraf(): 37 | run_bash("wget https://dl.influxdata.com/telegraf/releases/telegraf_1.2.1_amd64.deb") 38 | run_bash("dpkg -i telegraf_1.2.1_amd64.deb") 39 | 40 | def parse_input(cfgfile): 41 | try: 42 | with open(cfgfile) as stream: 43 | # load config file 44 | config = yaml.load(stream) 45 | monitor = config['userreq']['monitor'] 46 | 47 | # install 48 | if monitor == 'DIAMOND': 49 | install_diamond() 50 | elif monitor == 'TELEGRAF': 51 | install_telegraf() 52 | 53 | except IOError as e: 54 | print e 55 | 56 | 57 | if __name__ == "__main__": 58 | parse_input(user_config_file) 59 | 60 | -------------------------------------------------------------------------------- /openfas/install-iscsitarget.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import os 4 | import sys 5 | import yaml 6 | 7 | # global variables. 8 | user_config_file = 'config.yaml' 9 | 10 | 11 | def configure_target(target): 12 | print "[TBD] * make some configuration changes if necessary." 13 | 14 | 15 | def install_packages(target): 16 | # 17 | # Options: STGT|LIO|SCST|IET|SKIP 18 | # Cinder: http://docs.openstack.org/kilo/config-reference/content/section_volume-misc.html 19 | # Comparison: http://scst.sourceforge.net/comparison.html 20 | # 21 | print "* install packages for " + target 22 | 23 | if target == 'STGT': 24 | os.system('sudo apt install tgt') 25 | elif target == 'LIO': 26 | pass 27 | elif target == 'SCST': 28 | # home: http://scst.sourceforge.net/ 29 | # ubuntu: https://launchpad.net/~ast/+archive/ubuntu/scst2 30 | print '! not yet supported' 31 | pass 32 | elif target == 'IET': 33 | os.system('sudo apt install iscsitarget') 34 | pass 35 | elif target == 'SKIP': 36 | print '* skipping iscsi target installation' 37 | else: 38 | print '! unknown configuration for iscsi target: [%s]' % target 39 | 40 | 41 | def parse_input(cfgfile): 42 | try: 43 | with open(cfgfile) as stream: 44 | # load config file 45 | config = yaml.load(stream) 46 | target = config['userreq']['target'] 47 | 48 | # install & configure 49 | install_packages(target) 50 | configure_target(target) 51 | 52 | except IOError as e: 53 | print e 54 | 55 | 56 | if __name__ == "__main__": 57 | parse_input(user_config_file) 58 | -------------------------------------------------------------------------------- /benchmark/attbench/local/fio/exec-fio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Author: Moo-Ryong Ra 3 | # Modified on: 12/7/2017 by Hee Won Lee 4 | 5 | #set -x 6 | set -e 7 | 8 | rw=$1 9 | bs=$2 10 | readratio=$3 11 | iodepth=$4 12 | numjobs=$5 13 | 14 | # Default variables 15 | FIO_DEVLIST=${FIO_DEVLIST:-"sdf sdg"} 16 | FIO_DIRECT=${FIO_DIRECT:-"1"} 17 | FIO_SIZE=${FIO_SIZE:-"400G"} 18 | FIO_RUNTIME=${FIO_RUNTIME:-"60"} 19 | 20 | # Prepare for result files 21 | jobfile="$res_dir/job/$rw-$bs-$readratio-$iodepth-$numjobs.fio" 22 | outfile="$res_dir/out/$rw-$bs-$readratio-$iodepth-$numjobs.json" 23 | logfile="$res_dir/fio-summary.log" 24 | 25 | # Create a fio job file 26 | echo "[global]" > $jobfile 27 | echo "ioengine=libaio" >> $jobfile 28 | echo "direct=$FIO_DIRECT" >> $jobfile 29 | echo "size=$FIO_SIZE" >> $jobfile 30 | echo "ramp_time=5" >> $jobfile 31 | echo "runtime=$FIO_RUNTIME" >> $jobfile 32 | echo "invalidate=1" >> $jobfile 33 | echo "rw=$rw" >> $jobfile 34 | echo "bs=$bs" >> $jobfile 35 | echo "rwmixread=$readratio" >> $jobfile 36 | echo "iodepth=$iodepth" >> $jobfile 37 | echo "" >> $jobfile 38 | 39 | for i in $FIO_DEVLIST; do 40 | for j in $(seq 1 $numjobs); do 41 | echo "[$i]" >> $jobfile 42 | echo "filename=/dev/$i" >> $jobfile 43 | echo "" >> $jobfile 44 | done 45 | done 46 | 47 | # Run fio 48 | sudo fio --output-format=json --output=$outfile $jobfile 49 | 50 | # Log current setup 51 | printf "\nFio completed: " 52 | echo "rw=$rw bs=$bs readratio=$readratio iodepth=$iodepth numjobs=$numjobs" #| tee -a $logfile 53 | 54 | # Drop caches 55 | if [ $FIO_DIRECT == '1' ]; then 56 | echo "Drop caches!" 57 | sudo su -c 'echo 3 > /proc/sys/vm/drop_caches' 58 | fi 59 | 60 | # Translate bs into a number 61 | # e.g., 4k or 4K -> 4, 256b or 256B -> 0.256 62 | str=$2 63 | i=$((${#str}-1)) 64 | unit="${str:$i:1}" 65 | bs=$(echo $2 | sed -e "s/[KkBb]$//") 66 | if [ "$unit" = "B" ] || [ "$unit" = "b" ]; then 67 | parsed=$(echo "scale=3; $bs/1000" | bc) 68 | bs=`echo "0"$parsed` 69 | fi 70 | 71 | echo "Parse fio output and send it to InfluxDB server:" 72 | ./parse-and-report-influxdb.py $outfile $rw $bs $readratio $iodepth $numjobs | tee -a $logfile 73 | echo '' >> $logfile 74 | -------------------------------------------------------------------------------- /benchmark/attbench/README.md: -------------------------------------------------------------------------------- 1 | # ATTBench 2 | Authors: Hee Won Lee and Moo-Ryong Ra 3 | Created on: 12/1/2017 4 | 5 | ### Prerequisites 6 | Install *InfluxDB* (and *Grafana*) in a monitoring server, and *Telegraf* in host machines where you want to collect metrics. 7 | For details, refer to 8 | 9 | ## Local test 10 | 11 | ### Dependencies 12 | Install fio, bc and python's yaml module in a host (or container) where you run ATTBench. 13 | ``` 14 | sudo apt-get install fio bc 15 | sudo apt-get install python-pip 16 | sudo pip install pyaml 17 | ``` 18 | 19 | ### Configure 20 | 1. Go to directory `local`. 21 | 22 | 2. Create your own config file: 23 | ``` 24 | cp config-sample.yaml yourconfig.yaml 25 | ``` 26 | 27 | 3. Edit yourconfig.yaml for your environment. 28 | For details, refer to [config-sample.yaml](local/config-sample.yaml). 29 | 30 | ### Run 31 | ``` 32 | ./start.py -c yourconfig.yaml 33 | 34 | # Example: 35 | ./start.py fio 36 | ``` 37 | * Note: ATTBench currently supports Fio and plans to support COSBench. 38 | 39 | 40 | ## Distributed test 41 | You can concurrently run ATTBench on mutiple hosts. 42 | 43 | ### Dependencies 44 | You require *Ansible*. 45 | ``` 46 | sudo apt install ansible 47 | ``` 48 | 49 | ### Configure 50 | 1. Set up an Ansible inventory: 51 | - [option 1] Edit /etc/ansible/hosts. 52 | - [option 2] Create your own inventory file (e.g., `yourhosts.ini`) as follows. 53 | For details, refer to [hosts-sample.ini](hosts-sample.ini). 54 | 55 | 2. Configure InfluxDB and Fio variables in `group_vars/hostgroup`. 56 | ``` 57 | cd group_vars; cp hostgroup-sample hostgroup 58 | ``` 59 | For details, refer to [group_vars/hostgroup-sample](group_vars/hostgroup-sample). 60 | 61 | ### Install 62 | To install dependencies (fio, bc, pyaml) in your group of hosts, run: 63 | ``` 64 | # For /etc/ansible/hosts: 65 | ansible-playbook install-prerequisites.yaml 66 | 67 | # For your own inventory file (e.g., yourhosts.ini) 68 | ansible-playbook -i yourhosts.ini install-prerequisites.yaml 69 | ``` 70 | 71 | ### Run 72 | ``` 73 | # For /etc/ansible/hosts: 74 | ansible-playbook start-fio.yaml 75 | 76 | # For your own inventory file (e.g., yourhosts.ini) 77 | ansible-playbook -i yourhosts.ini start-fio.yaml 78 | ``` 79 | -------------------------------------------------------------------------------- /qosctrl/src/cinder/common/ioarbresv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 AT&T Labs Research 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | # Author: Moo-Ryong Ra, mra@research.att.com 16 | 17 | """Tracking IOArbiter reservation""" 18 | 19 | import ConfigParser 20 | 21 | from oslo_log import log as logging 22 | 23 | DEFAULT_RESV_DIR = '/var/lib/cinder/ioarb-resv/' 24 | 25 | LOG = logging.getLogger(__name__) 26 | 27 | # This function should be in the common library. 28 | # But, in order not to touch openstack distribution, 29 | # I will keep this function locally. 30 | def _read_info(fpath): 31 | """Read .ini format file.""" 32 | config = ConfigParser.ConfigParser() 33 | config.read(fpath) 34 | return config 35 | 36 | def get_resv_filepath(blkdev): 37 | return DEFAULT_RESV_DIR + 'resv-' + blkdev.split('/')[-1] 38 | 39 | def get_resv_info(path): 40 | """Get current reservation information of the deployed volumes.""" 41 | 42 | config = _read_info(path) 43 | 44 | data = {} 45 | sections = config.sections() 46 | for sec in sections: 47 | data[sec] = config.items(sec) 48 | 49 | return data 50 | 51 | def add_resv_info(fpath, key, data): 52 | """New reservation info.""" 53 | 54 | config = _read_info(fpath) 55 | 56 | if not config.has_section(key): 57 | config.add_section(key) 58 | 59 | for opt in data: 60 | config.set(key, opt, data[opt]) 61 | 62 | # save it 63 | with open(fpath, 'wb') as configfile: 64 | config.write(configfile) 65 | 66 | 67 | def delete_resv_info(fpath, key): 68 | """Delete reservation info.""" 69 | 70 | config = _read_info(fpath) 71 | 72 | if not config.remove_section(key): 73 | LOG.debug('[MRA] section does not exist: %s' % key) 74 | 75 | # save it to the designated location. 76 | with open(fpath, 'wb') as configfile: 77 | config.write(configfile) 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /openfas/configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright (c) 2017 AT&T Labs Research 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | # 17 | # Author: Moo-Ryong Ra (mra@research.att.com) 18 | # History: 2017-01-13 created. 19 | # 20 | # Abstract: This script will run on any Linux-based commodity hardware. 21 | # It will collect necessary configuration details from users 22 | # and pass it to yaml parser. 23 | # 24 | 25 | # Dependency check 26 | sudo apt-get install -y python-pip 27 | sudo apt-get install -y python-yaml 28 | 29 | # --------- Questionaire ---------- 30 | yamlparser="./parser.py" 31 | outfile="userinput.conf" 32 | 33 | declare -a qs 34 | declare -a ans 35 | declare -a var 36 | declare -a inputs 37 | 38 | i=0 39 | qs[$i]='* What level of redundancy do you need?' 40 | ans[$i]='[JBOD|RAID5|RAID6|SKIP]' 41 | var[$i]='redundancy' 42 | inputs[$i]='RAID6' # default 43 | 44 | i=$((i=i+1)) 45 | qs[$i]='* Choose an iSCSI target software.' 46 | ans[$i]='[STGT|LIO|SCST|IET|SKIP]' 47 | var[$i]='target' 48 | inputs[$i]='STGT' # default 49 | 50 | i=$((i=i+1)) 51 | qs[$i]='* Do you want to integrate this machine with Openstack (Cinder service)?' 52 | ans[$i]='[Y|N]' 53 | var[$i]='cinder' 54 | inputs[$i]='Y' # default 55 | 56 | i=$((i=i+1)) 57 | qs[$i]='* Do you need a data collector for your monitoring stack?' 58 | ans[$i]='[NOPE|TELEGRAF|DIAMOND]' 59 | var[$i]='monitor' 60 | inputs[$i]='TELEGRAF' # default 61 | 62 | args='{' 63 | 64 | for idx in `seq 0 $i`; 65 | do 66 | echo "${qs[$idx]} ${ans[$idx]}, Default: [${inputs[$idx]}]" 67 | while read -r -p "> " line 68 | do 69 | line=`echo $line | awk '{print toupper($0)}'` 70 | if [[ $line == '' ]]; then 71 | line=${inputs[$idx]} 72 | fi 73 | 74 | if [[ ${ans[$idx]} == *"$line"* ]]; then 75 | break 76 | fi 77 | echo "! wrong input. please choose among the valid inputs: ${ans[$idx]}" 78 | done 79 | #echo "Okay, ${var[$idx]}=$line" 80 | if [ "$args" = "{" ] 81 | then 82 | args="$args \"${var[$idx]}\": \"$line\"" 83 | else 84 | args="$args, \"${var[$idx]}\": \"$line\"" 85 | fi 86 | done 87 | args="$args }" 88 | 89 | echo $args > $outfile 90 | 91 | $yamlparser 92 | 93 | 94 | -------------------------------------------------------------------------------- /qosctrl/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation Guide 2 | 3 | This code is tested under OpenStack Kilo version and assume that there exists a valid Cinder installation. 4 | You need to install IOArbiter components onto both a controller node where a cinder scheduler instance is running and 5 | a storage node where you want to deploy IOArbiter-capable cinder service. 6 | 7 | 8 | ### OpenStack Controller nodes 9 | 10 | * Copy source files in src/cinder/ directory to $[cinder-root]/ 11 | 12 | typical $cinder-root = /usr/lib/python2.7/dist-packages/cinder/ 13 | 14 | * Install IOArbiter scheduler filter: add an entry below to cinder-2015.1.1.egg-info/entry_points.txt under [cinder.scheduler.filters] section. 15 | 16 | IOArbiterFilter = cinder.scheduler.filters.ioarb_filter:IOArbiterFilter 17 | 18 | 19 | ### OpenStack Storage nodes 20 | 21 | * Copy source files in src/cinder/ directory to $[cinder-root]/ 22 | 23 | typical $cinder-root = /usr/lib/python2.7/dist-packages/cinder/ 24 | 25 | * Install madam package. 26 | 27 | sudo apt-get install mdadm 28 | 29 | * Install and configure docker. 30 | 31 | Current version is tested under docker version 1.8.2 32 | [TBD - docker installation & image configuration.] 33 | 34 | * Allowin IOArbiter source to use previleges operations: add two lines below to /etc/cinder/rootwrap.d/volume.filters 35 | 36 | mdadm: CommandFilter, mdadm, root 37 | docker: CommandFilter, docker, root 38 | 39 | * Create two directories. set directory permissions as cinder:cinder. 40 | 41 | /var/lib/cinder/ioarb-container/ 42 | /var/lib/cinder/ioarb-resv/ 43 | 44 | * Give a permission for cinder user to run docker commands. 45 | 46 | sudo usermod -aG docker cinder 47 | 48 | ### Volume Type Configuration (Controller node) 49 | 50 | * Creat volume types with ioarb_sttype = “ioarbiter”. 51 | 52 | cinder type-create $vtype 53 | cinder type-key $vtype set ioarb_sttype="ioarbiter" 54 | 55 | * Set qos-specs fields. 56 | 57 | cinder qos-create $vtype ioarb_sttype="ioarb_manual" raidconf=raid6 ndisk=4 miniops=100 maxiops-100 iosize=4096 medium=hdd 58 | 59 | * Available options
60 | 61 | > sttype = “[ioarb-demo-platinum|ioarb-demo-gold|ioarb-demo-silver|ioarb-demo-bronze|manual]”
62 | > raidconf = [jbod|raid0|raid1|raid5|raid6]
63 | > maxiops
64 | > miniops
65 | > iosize = 4096, etc.
66 | > medium = “ssd|hdd|any”
67 | > ndisk
68 | 69 | 70 | ### Test 71 | 72 | * Create a volume with the volume type you created for IOArbiter service. 73 | If you have a Dashboard installed on your cluster, it might be easier to just use the web interface. 74 | -------------------------------------------------------------------------------- /openfas/install-disklayout.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Author: Hee Won Lee 4 | Created on 1/25/2017 5 | ''' 6 | 7 | # sudo python -c 'import os,sys; os.open("/dev/sda", os.O_EXCL)' 8 | # lsblk | grep disk | awk '{print $4}' | sort -u 9 | # mdadm --create --verbose /dev/md0 --level=5 --raid-devices=3 /dev/sdb1 /dev/sdc1 /dev/sdd1 --spare-devices=1 /dev/sde1 10 | 11 | import yaml 12 | import os, sys 13 | import subprocess 14 | 15 | def run_bash(cmd): 16 | """Run a subprocess 17 | 18 | Args: 19 | cmd (str): command 20 | 21 | Returns: 22 | result (str): stdout + stderr 23 | 24 | """ 25 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash') 26 | (stdout, stderr) = proc.communicate() 27 | return stdout + stderr 28 | 29 | def get_avail_devs(blkdev_list): 30 | dev_avail = [] 31 | for blkdev in blkdev_list: 32 | try: 33 | os.open('/dev/'+blkdev['name'], os.O_EXCL) 34 | dev_avail.append(('/dev/' + blkdev['name'], 35 | blkdev['size'])) 36 | except OSError as e: 37 | pass # This means that when a device is in use, exclude it. 38 | 39 | return dev_avail 40 | 41 | def get_devs_by_size(dev_avail): 42 | # get device sizes that are unique. 43 | sizes = set() 44 | for item in dev_avail: 45 | sizes.add(item[1]) 46 | 47 | diskarray = {} 48 | for sz in sizes: 49 | dev_str = "" 50 | for item in dev_avail: 51 | if item[1] == sz: 52 | dev_str = dev_str + item[0] + " " 53 | diskarray[sz]=dev_str.rstrip() 54 | return diskarray 55 | 56 | 57 | 58 | if __name__ == "__main__": 59 | try: 60 | with open('config.yaml') as stream: 61 | # Load config file 62 | config = yaml.load(stream) 63 | blkdev_list = config['blkdev'] 64 | 65 | # Get available devices; i.e., exclude devices in use 66 | dev_avail = get_avail_devs(blkdev_list) 67 | 68 | # Get a dictionary: key -> size, value -> concatenation of devices 69 | diskarray = get_devs_by_size(dev_avail) 70 | 71 | # Create software raid 72 | redundancy = config['userreq']['redundancy'] 73 | if redundancy == "RAID5" or redundancy == "RAID6": 74 | if redundancy == "RAID5": 75 | level = 5 76 | elif redundancy == "RAID6": 77 | level = 6 78 | md_idx = 0 79 | for key in diskarray: 80 | cmd = "mdadm --create --force --verbose /dev/md" + str(md_idx) 81 | cmd = cmd + " --level=" + str(level) 82 | cmd = cmd + " --raid-devices=" + str(len(diskarray[key].split())) 83 | cmd = cmd + " " + diskarray[key] 84 | print cmd 85 | # run_bash(cmd) 86 | md_idx = md_idx + 1 87 | 88 | # For the case that config.yaml does not exist. 89 | except IOError as e: 90 | print e 91 | -------------------------------------------------------------------------------- /benchmark/fio/parse_and_report_influxdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z $1 || ! -s $1 ]]; then 4 | echo "! no such file exists: $1" 5 | exit 6 | fi 7 | 8 | str=$2 9 | i=$((${#str}-1)) 10 | unit="${str:$i:1}" 11 | bs=`echo $2 | sed -e "s/[KkBb]$//"` 12 | if [ "$unit" = "B" ] || [ "$unit" = "b" ] 13 | then 14 | parsed=`echo "scale=3; $bs/1000" | bc` 15 | bs=`echo "0"$parsed` 16 | fi 17 | 18 | # parse iops 19 | iotype="read" 20 | readiops=`cat $1 | grep iops | grep $iotype | awk -F, '{print $3}' | awk -F= '{print $2}'` 21 | if [ -z "$readiops" ] 22 | then 23 | readiops=0 24 | else 25 | newiops=0 26 | for riops in $readiops 27 | do 28 | #echo $riops 29 | newiops=`echo $newiops + $riops | bc` 30 | done 31 | readiops=$newiops 32 | fi 33 | 34 | iotype="write" 35 | writeiops=`cat $1 | grep iops | grep $iotype | awk -F, '{print $3}' | awk -F= '{print $2}'` 36 | if [ -z "$writeiops" ] 37 | then 38 | writeiops=0 39 | else 40 | newiops=0 41 | for wiops in $writeiops 42 | do 43 | #echo $wiops 44 | newiops=`echo $newiops + $wiops | bc` 45 | done 46 | writeiops=$newiops 47 | fi 48 | 49 | totaliops=$((readiops+writeiops)) 50 | 51 | 52 | # parse latency 53 | latunit=`cat $1 | grep lat | grep -v "%" | grep -v percentile | grep -v slat | grep -v clat | awk -F\( '{print $2}' | awk -F\) '{print $1}'` 54 | arrlatunit=($latunit) 55 | latnums=`cat $1 | grep lat | grep -v "%" | grep -v percentile | grep -v slat | grep -v clat | awk -F, '{print $3}' | awk -F= '{print $2}'` 56 | cnts=`cat $1 | grep lat | grep -v "%" | grep -v percentile | grep -v slat | grep -v clat | awk -F, '{print $3}' | awk -F= '{print $2}' | wc -l` 57 | 58 | newlat=0 59 | itr=0 60 | for lat in $latnums 61 | do 62 | #echo $lat ${arrlatunit[$itr]} 63 | unit=${arrlatunit[$itr]} 64 | if [ $unit = 'usec' ]; then 65 | newlat=`echo $newlat + $lat | bc` 66 | else 67 | newlat=`echo "$newlat + $lat * 1000" | bc` 68 | fi 69 | itr=$((itr+1)) 70 | done 71 | 72 | avglat=`echo "scale=3; $newlat / $cnts" | bc` 73 | 74 | echo r_iops: $readiops, w_iops: $writeiops, t_iops: $totaliops, avglat: $avglat 75 | 76 | # report to mra-mon 77 | hostname=`hostname` 78 | mramon="135.197.227.51" 79 | 80 | ## graphite 81 | #echo "servers.$hostname.fio.read_iops $readiops `date +%s`" | nc -q0 $mramon 2003 82 | #echo "servers.$hostname.fio.write_iops $writeiops `date +%s`" | nc -q0 $mramon 2003 83 | #echo "servers.$hostname.fio.iops $totaliops `date +%s`" | nc -q0 $mramon 2003 84 | #echo "servers.$hostname.fio.avglat $avglat `date +%s`" | nc -q0 $mramon 2003 85 | #echo "servers.$hostname.fio.bs $bs `date +%s`" | nc -q0 $mramon 2003 86 | 87 | # influx 88 | dbname="dss7k" 89 | #echo "curl -i -XPOST \"http://$mramon:8086/write?db=$dbname\" --data-binary \"fio,host=$hostname readiops=$readiops,writeiops=$writeiops,totaliops=$totaliops,avglat=$avglat,bs=$bs\"" >> mra-influxdb.log 90 | curl -i -XPOST "http://$mramon:8086/write?db=$dbname" --data-binary "fio,host=$hostname readiops=$readiops,writeiops=$writeiops,totaliops=$totaliops,avglat=$avglat,bs=$bs" 91 | 92 | -------------------------------------------------------------------------------- /benchmark/attbench/local/fio/parse-and-report-influxdb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Author: Hee Won Lee 3 | # Created on: 12/6/2017 4 | 5 | import sys, os, socket, subprocess, json 6 | from pprint import pprint 7 | 8 | filename = sys.argv[1] 9 | rw = sys.argv[2] 10 | bs = sys.argv[3] 11 | readratio = sys.argv[4] 12 | iodepth = sys.argv[5] 13 | numjobs = sys.argv[6] 14 | 15 | # Default variables 16 | ip = os.getenv('INFLUXDB_IP', '10.1.2.3') 17 | port = os.getenv('INFLUXDB_PORT', '8086') 18 | dbname = os.getenv('INFLUXDB_DBNAME', 'telegraf') 19 | user = os.getenv('INFLUXDB_USER', 'influx') 20 | password = os.getenv('INFLUXDB_PASSWORD', 'influx_pw') 21 | 22 | def run_bash(cmd): 23 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash') 24 | (stdout, stderr) = proc.communicate() 25 | return stdout + stderr 26 | 27 | # Load a json file 28 | with open(filename) as data_file: 29 | fio_output = json.load(data_file) 30 | 31 | #pprint(fio_output) # for debugging 32 | 33 | # Calculate total iops/lat/bw 34 | total_read_iops = 0 35 | total_write_iops = 0 36 | avg_read_lat = 0 37 | avg_write_lat = 0 38 | total_read_bw = 0 39 | total_write_bw = 0 40 | for job in fio_output['jobs']: 41 | total_read_iops = total_read_iops + job['read']['iops'] 42 | total_write_iops = total_write_iops + job['write']['iops'] 43 | total_read_bw = total_read_bw + job['read']['bw'] 44 | total_write_bw = total_write_bw + job['write']['bw'] 45 | avg_read_lat = avg_read_lat + job['read']['lat']['mean'] 46 | avg_write_lat = avg_write_lat + job['write']['lat']['mean'] 47 | avg_read_lat = avg_read_lat / len(fio_output['jobs']) 48 | avg_write_lat = avg_write_lat / len(fio_output['jobs']) 49 | 50 | total_iops = total_read_iops + total_write_iops 51 | total_bw = total_read_bw + total_write_bw 52 | #total_lat = (avg_read_lat + avg_write_lat) / 2 53 | # note: total_lat seems not meaningful; for read only (avg_write_lat = 0), 54 | # total_lat = avg_read_lat / 2, which is incorrect! 55 | 56 | # Add a query for total iops/bw/lat 57 | query = '' 58 | e = ('fio,host=' + socket.gethostname() + ',rw=' + rw + 59 | ' bs=' + str(bs) + ',readratio=' + str(readratio) + 60 | ',iodepth=' + str(iodepth) + ',numjobs=' + str(numjobs) + 61 | ',total_iops=' + str(total_iops) + 62 | ',total_bw=' + str(total_bw) + 63 | ',avg_read_lat=' + str(avg_read_lat) + 64 | ',avg_write_lat=' + str(avg_write_lat)) 65 | query = query + e + '\n' 66 | 67 | # Add queries per fio job 68 | for job in fio_output['jobs']: 69 | e = ('fio,host=' + socket.gethostname() + ',job=' + job['jobname'] + ',rw=' + rw + 70 | ' bs=' + str(bs) + ',readratio=' + str(readratio) + 71 | ',iodepth=' + str(iodepth) + ',numjobs=' + str(numjobs) + 72 | ',sys_cpu=' + str(job['sys_cpu']) + 73 | ',usr_cpu=' + str(job['usr_cpu']) + 74 | ',read_bw=' + str(job['read']['bw']) + 75 | ',read_iops=' + str(job['read']['iops']) + 76 | ',read_lat_mean=' + str(job['read']['lat']['mean']) + 77 | ',read_lat_stddev=' + str(job['read']['lat']['stddev']) + 78 | ',read_clat_percentile_95=' + str(job['read']['clat']['percentile']['95.000000']) + 79 | ',read_clat_percentile_99=' + str(job['read']['clat']['percentile']['99.000000']) + 80 | ',write_bw=' + str(job['write']['bw']) + 81 | ',write_iops=' + str(job['write']['iops']) + 82 | ',write_lat_mean=' + str(job['write']['lat']['mean']) + 83 | ',write_lat_stddev=' + str(job['write']['lat']['stddev']) + 84 | ',write_clat_percentile_95=' + str(job['write']['clat']['percentile']['95.000000']) + 85 | ',write_clat_percentile_99=' + str(job['write']['clat']['percentile']['99.000000'])) 86 | query = query + e + '\n' 87 | 88 | # Send data to InfluxDB 89 | cmd = ("curl -i -XPOST 'http://" + ip + ":" + port + "/write?db=" + dbname + 90 | "&u=" + user + "&p=" + password + "'" 91 | " --data-binary " + "'" + query.rstrip() + "'") 92 | print cmd # required for logging 93 | 94 | if os.environ.get('INFLUXDB_ENABLED') == 'false': 95 | print("InfluxDB is not enabled, so this data is not trasmitted.") 96 | else: 97 | #run_bash(cmd) 98 | print run_bash(cmd) # for debugging 99 | 100 | -------------------------------------------------------------------------------- /benchmark/attbench/local/start.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Author: Hee Won Lee 3 | # Created on 12/8/2017 4 | 5 | supported_benchmark_tool = ['fio', 'cosbench'] 6 | 7 | import os, sys, subprocess, copy, argparse, yaml 8 | 9 | def run_bash(cmd): 10 | # Refer to http://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running 11 | process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 12 | 13 | # Poll process for new output until finished 14 | while True: 15 | nextline = process.stdout.readline() 16 | if nextline == '' and process.poll() is not None: 17 | break 18 | sys.stdout.write(nextline) 19 | sys.stdout.flush() 20 | 21 | output = process.communicate()[0] 22 | exitCode = process.returncode 23 | 24 | if (exitCode == 0): 25 | return output 26 | else: 27 | raise exitCode 28 | 29 | def arg_handler(): 30 | parser = argparse.ArgumentParser(description='This program runs various benchmark \ 31 | tools with a single config file.') 32 | parser.add_argument("benchmark_tool", help="fio, cosbench") 33 | parser.add_argument("-c", "--config", default="config.yaml", help="config file (default: %(default)s)") 34 | args = parser.parse_args() 35 | main(args) 36 | 37 | 38 | def load_config(config_file): 39 | # Read config.yaml 40 | with open(config_file) as stream: 41 | config = yaml.load(stream) 42 | 43 | # Preclude items that are `enabled = false` 44 | myconf = copy.deepcopy(config) 45 | for k1, v1 in config.iteritems(): 46 | for k2, v2 in v1.iteritems(): 47 | if k2 == 'enabled' and v2 == False: 48 | var = k1.upper() + '_' + k2.upper() 49 | os.environ[var] = 'false' 50 | myconf.pop(k1) 51 | # For debugging 52 | #print conf_disabled 53 | #print myconf 54 | 55 | # Add to environment variables 56 | myenv = {} # for debugging 57 | for k1, v1 in myconf.iteritems(): 58 | for k2, v2 in v1.iteritems(): 59 | if k2 == 'env': 60 | for k3, v3 in v2.iteritems(): 61 | var = k1.upper() + '_' + k3.upper() 62 | if var not in os.environ: # if defined in ansible beforehand, skip this. 63 | myenv[var] = str(v3) # for debugging 64 | os.environ[var] = str(v3) 65 | 66 | # For debugging 67 | #print myenv 68 | #print os.getenv('INFLUXDB_IP', '') 69 | #print os.environ.get('INFLUXDB_IP') 70 | #print os.environ 71 | 72 | def fio_eta(): 73 | cnt = 0 74 | randbslist = os.environ.get('FIO_RANDBSLIST') 75 | seqbslist = os.environ.get('FIO_SEQBSLIST') 76 | readratiolist = os.environ.get('FIO_READRATIOLIST') 77 | iodepthlist = os.environ.get('FIO_IODEPTHLIST') 78 | numjobslist = os.environ.get('FIO_NUMJOBSLIST') 79 | runtime = os.environ.get('FIO_RUNTIME') 80 | 81 | factor1 = len(readratiolist.split()) * len(iodepthlist.split()) * \ 82 | len(numjobslist.split()) 83 | if randbslist is not None: 84 | cnt = cnt + len(randbslist.split()) * factor1 85 | if seqbslist is not None: 86 | cnt = cnt + len(seqbslist.split()) * factor1 87 | 88 | eta = int(runtime) * cnt 89 | eta_unit = 'sec' 90 | 91 | if eta >= 86400: 92 | eta = eta / 86400. 93 | eta_unit = 'day' 94 | elif eta >= 3600: 95 | eta = eta / 3600. 96 | eta_unit = 'hr' 97 | elif eta >= 60: 98 | eta = eta / 60. 99 | eta_unit = 'min' 100 | 101 | return eta, eta_unit, int(runtime), cnt 102 | 103 | def main(args): 104 | # Generate env variables 105 | load_config(args.config) 106 | 107 | # ETA 108 | if args.benchmark_tool == 'fio': 109 | print("ETA: %.1f+ %s (each runtime: %d sec, count: %d)" % fio_eta()) 110 | 111 | # Run 112 | if args.benchmark_tool in supported_benchmark_tool: 113 | cmd = ('cd ' + args.benchmark_tool + '; ./run.sh') 114 | run_bash(cmd) 115 | else: 116 | print("`%s` is not supported." % args.benchmark_tool) 117 | print("Supported benchmark tools: %s" % ', '.join(map(str, supported_benchmark_tool))) 118 | 119 | if __name__ == "__main__": 120 | arg_handler() 121 | -------------------------------------------------------------------------------- /qosctrl/src/cinder/common/ioarbparams.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 AT&T Labs Research 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | # Author: Moo-Ryong Ra, mra@research.att.com 16 | 17 | """IOArbiter QoS parameter mapping 18 | 19 | These information will be applied both for 20 | cinder-volume and cinder-scheduler. 21 | """ 22 | 23 | from oslo_log import log as logging 24 | 25 | # IOArbiter specific keys. 26 | STTYPE = 'ioarb_sttype' 27 | STTYPE_MANUAL = 'ioarb-manual' 28 | 29 | RTYPE_SIZE = 'size' 30 | RTYPE_IOPS4K = 'iops-4k' 31 | RTYPE_IOPS4K_R = 'iops-4k-r' 32 | RTYPE_IOPS4K_W = 'iops-4k-w' 33 | 34 | # Constants for software RAID configuration. ('storage_class' field) 35 | RAID_MAPPING = { 36 | 'ioarb-platinum': 'raid0', 37 | 'ioarb-gold': 'raid6', 38 | 'ioarb-silver': 'raid5', 39 | 'ioarb-bronze': 'jbod' } 40 | NDISK_MAPPING = { 41 | 'ioarb-platinum': 3, 42 | 'ioarb-gold': 5, 43 | 'ioarb-silver': 4, 44 | 'ioarb-bronze': 1 } 45 | MAX_IOPS_MAPPING = { 46 | 'ioarb-platinum': 30000, 47 | 'ioarb-gold': 1000, 48 | 'ioarb-silver': 500, 49 | 'ioarb-bronze': 100 } 50 | MIN_IOPS_MAPPING = { 51 | 'ioarb-platinum': 30000, 52 | 'ioarb-gold': 1000, 53 | 'ioarb-silver': 500, 54 | 'ioarb-bronze': 100 } 55 | IO_SIZE_MAPPING = { 56 | 'ioarb-platinum': 4096, 57 | 'ioarb-gold': 4096, 58 | 'ioarb-silver': 4096, 59 | 'ioarb-bronze': 4096 } 60 | MEDIUM_MAPPING = { 61 | 'ioarb-platinum': 'ssd', 62 | 'ioarb-gold': 'any', 63 | 'ioarb-silver': 'any', 64 | 'ioarb-bronze': 'any' } 65 | 66 | LOG = logging.getLogger(__name__) 67 | 68 | def translate_qosspec(qosspec): 69 | """ Requirement mapping """ 70 | stspec = {} 71 | sttype = qosspec[STTYPE] 72 | if sttype in RAID_MAPPING: 73 | stspec = { 74 | 'raidconf': RAID_MAPPING[sttype], 75 | 'maxiops': MAX_IOPS_MAPPING[sttype], 76 | 'miniops': MIN_IOPS_MAPPING[sttype], 77 | 'iosize': IO_SIZE_MAPPING[sttype], 78 | 'medium': MEDIUM_MAPPING[sttype], 79 | 'ndisk': NDISK_MAPPING[sttype] } 80 | elif sttype == STTYPE_MANUAL: 81 | # u'max_iops': u'100', u'medium': u'hdd', u'blocksize': u'4096', 82 | # u'raidconf': u'jbod', u'min_iops': u'100' 83 | stspec = { 84 | 'raidconf': qosspec['raidconf'], 85 | 'maxiops': qosspec['maxiops'], 86 | 'miniops': qosspec['miniops'], 87 | 'iosize': qosspec['iosize'], 88 | 'medium': qosspec['medium'], 89 | 'ndisk': qosspec['ndisk'] } 90 | else: 91 | LOG.debug('[MRA] unknown sttype: %s' % (sttype)) 92 | stspec = { 93 | 'raidconf': 'jbod', 94 | 'maxiops': 0, 95 | 'miniops': 0, 96 | 'iosize': 4096, 97 | 'medium': 'hdd', 98 | 'ndisk': 1 } 99 | 100 | return stspec 101 | 102 | def get_perf_dict(ndisk, medium, iotype): 103 | """Basic IOPS budgetting""" 104 | 105 | unit = 200 106 | if medium == 'hdd': 107 | unit = 200 108 | elif medium == 'ssd': 109 | unit = 70000 110 | elif medium == 'nvme': 111 | unit = 700000 112 | 113 | r = { 114 | 'jbod': unit, 115 | 'raid0': ndisk * unit, 116 | 'raid1': ndisk * unit, 117 | 'raid5': ndisk * unit, 118 | 'raid6': ndisk * unit, } 119 | w = { 120 | 'jbod': unit, 121 | 'raid0': ndisk * unit, 122 | 'raid1': unit, 123 | 'raid5': (ndisk-1) * unit, 124 | 'raid6': (ndisk-2) * unit, } 125 | rw = { 126 | 'jbod': min(r['jbod'], w['jbod']), 127 | 'raid0': min(r['raid0'], w['raid0']), 128 | 'raid1': min(r['raid1'], w['raid1']), 129 | 'raid5': min(r['raid5'], w['raid5']), 130 | 'raid6': min(r['raid6'], w['raid6']), } 131 | 132 | if iotype == 'r': 133 | return r 134 | elif iotype == 'w': 135 | return w 136 | else: 137 | return rw 138 | 139 | def calculate_total_budget(devs, stspec): 140 | """Calculate total budget. 141 | devs: ioarb_resource from the ioarblvm driver impl. 142 | stspec: from translate_qosspec() above. 143 | """ 144 | budget = {} 145 | 146 | # Capacity calculation. (in GB) 147 | mindisk = float(min(dev['size'] for dev in devs)) 148 | totdisk = float(sum(dev['size'] for dev in devs)) 149 | ndisk = len(devs) 150 | 151 | budget[RTYPE_SIZE] = { 152 | 'jbod': totdisk, 153 | 'raid0': totdisk, 154 | 'raid1': totdisk / 2.0, 155 | 'raid5': mindisk * (ndisk - 1) if ndisk > 2 else 0, # minimum=3 156 | 'raid6': mindisk * (ndisk - 2) if ndisk > 3 else 0, # minimum=4 157 | } 158 | 159 | # IOPS budget calculation. (based on 4KB randrw) 160 | # - calculation is based on the following link. 161 | # - https://en.wikipedia.org/wiki/Standard_RAID_levels 162 | # - might be replaced with profiled data. 163 | budget[RTYPE_IOPS4K_R] = get_perf_dict(ndisk, stspec['medium'], 'r') 164 | budget[RTYPE_IOPS4K_W] = get_perf_dict(ndisk, stspec['medium'], 'w') 165 | budget[RTYPE_IOPS4K] = get_perf_dict(ndisk, stspec['medium'], 'rw') 166 | 167 | return budget 168 | 169 | 170 | 171 | 172 | -------------------------------------------------------------------------------- /openfas/diamond/diamond.conf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Diamond Configuration File 3 | ################################################################################ 4 | 5 | ################################################################################ 6 | ### Options for the server 7 | [server] 8 | 9 | # Handlers for published metrics. 10 | handlers = diamond.handler.graphite.GraphiteHandler, diamond.handler.archive.ArchiveHandler 11 | 12 | # User diamond will run as 13 | # Leave empty to use the current user 14 | user = 15 | 16 | # Group diamond will run as 17 | # Leave empty to use the current group 18 | group = 19 | 20 | # Pid file 21 | pid_file = /var/run/diamond.pid 22 | 23 | # Directory to load collector modules from 24 | #collectors_path = /usr/share/diamond/collectors/ 25 | collectors_path = /usr/local/share/diamond/collectors 26 | 27 | # Directory to load collector configs from 28 | collectors_config_path = /etc/diamond/collectors/ 29 | 30 | # Number of seconds between each collector load 31 | # collectors_load_delay = 1.0 32 | 33 | # Directory to load handler configs from 34 | handlers_config_path = /etc/diamond/handlers/ 35 | 36 | # Directory to load handler modules from 37 | #handlers_path = /usr/share/diamond/handlers/ 38 | handlers_path = /usr/local/share/diamond/handlers/ 39 | 40 | # Maximum number of metrics waiting to be processed by handlers. 41 | # When metric queue is full, new metrics are dropped. 42 | metric_queue_size = 16384 43 | 44 | 45 | ################################################################################ 46 | ### Options for handlers 47 | [handlers] 48 | 49 | # daemon logging handler(s) 50 | keys = rotated_file 51 | 52 | ### Defaults options for all Handlers 53 | [[default]] 54 | 55 | [[ArchiveHandler]] 56 | 57 | # File to write archive log files 58 | log_file = /var/log/diamond/archive.log 59 | 60 | # Number of days to keep archive log files 61 | days = 7 62 | 63 | [[GraphiteHandler]] 64 | ### Options for GraphiteHandler 65 | 66 | # Graphite server host 67 | host = 127.0.0.1 68 | 69 | # Port to send metrics to 70 | port = 2003 71 | 72 | # Socket timeout (seconds) 73 | timeout = 15 74 | 75 | # Batch size for metrics 76 | batch = 1 77 | 78 | [[GraphitePickleHandler]] 79 | ### Options for GraphitePickleHandler 80 | 81 | # Graphite server host 82 | host = 127.0.0.1 83 | 84 | # Port to send metrics to 85 | port = 2004 86 | 87 | # Socket timeout (seconds) 88 | timeout = 15 89 | 90 | # Batch size for pickled metrics 91 | batch = 256 92 | 93 | [[MySQLHandler]] 94 | ### Options for MySQLHandler 95 | 96 | # MySQL Connection Info 97 | hostname = 127.0.0.1 98 | port = 3306 99 | username = root 100 | password = 101 | database = diamond 102 | table = metrics 103 | # INT UNSIGNED NOT NULL 104 | col_time = timestamp 105 | # VARCHAR(255) NOT NULL 106 | col_metric = metric 107 | # VARCHAR(255) NOT NULL 108 | col_value = value 109 | 110 | [[StatsdHandler]] 111 | host = 127.0.0.1 112 | port = 8125 113 | 114 | [[TSDBHandler]] 115 | host = 127.0.0.1 116 | port = 4242 117 | timeout = 15 118 | 119 | [[LibratoHandler]] 120 | user = user@example.com 121 | apikey = abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01 122 | 123 | [[HostedGraphiteHandler]] 124 | apikey = abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01 125 | timeout = 15 126 | batch = 1 127 | 128 | [[SignalfxHandler]] 129 | auth_token = abcdefghijklmnopqrstuvwxyz 130 | 131 | # And any other config settings from GraphiteHandler are valid here 132 | 133 | [[HttpPostHandler]] 134 | 135 | ### Urp to post the metrics 136 | url = http://localhost:8888/ 137 | ### Metrics batch size 138 | batch = 100 139 | 140 | 141 | ################################################################################ 142 | ### Options for collectors 143 | [collectors] 144 | 145 | [[default]] 146 | ### Defaults options for all Collectors 147 | 148 | # Uncomment and set to hardcode a hostname for the collector path 149 | # Keep in mind, periods are seperators in graphite 150 | # hostname = my_custom_hostname 151 | 152 | # If you prefer to just use a different way of calculating the hostname 153 | # Uncomment and set this to one of these values: 154 | 155 | # smart = Default. Tries fqdn_short. If that's localhost, uses hostname_short 156 | 157 | # fqdn_short = Default. Similar to hostname -s 158 | # fqdn = hostname output 159 | # fqdn_rev = hostname in reverse (com.example.www) 160 | 161 | # uname_short = Similar to uname -n, but only the first part 162 | # uname_rev = uname -r in reverse (com.example.www) 163 | 164 | # hostname_short = `hostname -s` 165 | # hostname = `hostname` 166 | # hostname_rev = `hostname` in reverse (com.example.www) 167 | 168 | # shell = Run the string set in hostname as a shell command and use its 169 | # output(with spaces trimmed off from both ends) as the hostname. 170 | 171 | # hostname_method = smart 172 | 173 | # Path Prefix and Suffix 174 | # you can use one or both to craft the path where you want to put metrics 175 | # such as: %(path_prefix)s.$(hostname)s.$(path_suffix)s.$(metric)s 176 | # path_prefix = servers 177 | # path_suffix = 178 | 179 | # Path Prefix for Virtual Machines 180 | # If the host supports virtual machines, collectors may report per 181 | # VM metrics. Following OpenStack nomenclature, the prefix for 182 | # reporting per VM metrics is "instances", and metric foo for VM 183 | # bar will be reported as: instances.bar.foo... 184 | # instance_prefix = instances 185 | 186 | # Default Poll Interval (seconds) 187 | # interval = 300 188 | 189 | ################################################################################ 190 | # Default enabled collectors 191 | ################################################################################ 192 | 193 | [[CPUCollector]] 194 | enabled = True 195 | 196 | [[DiskSpaceCollector]] 197 | enabled = True 198 | 199 | [[DiskUsageCollector]] 200 | enabled = True 201 | 202 | [[LoadAverageCollector]] 203 | enabled = True 204 | 205 | [[MemoryCollector]] 206 | enabled = True 207 | 208 | [[VMStatCollector]] 209 | enabled = True 210 | 211 | ################################################################################ 212 | ### Options for logging 213 | # for more information on file format syntax: 214 | # http://docs.python.org/library/logging.config.html#configuration-file-format 215 | 216 | [loggers] 217 | 218 | keys = root 219 | 220 | # handlers are higher in this config file, in: 221 | # [handlers] 222 | # keys = ... 223 | 224 | [formatters] 225 | 226 | keys = default 227 | 228 | [logger_root] 229 | 230 | # to increase verbosity, set DEBUG 231 | level = INFO 232 | handlers = rotated_file 233 | propagate = 1 234 | 235 | [handler_rotated_file] 236 | 237 | class = handlers.TimedRotatingFileHandler 238 | level = DEBUG 239 | formatter = default 240 | # rotate at midnight, each day and keep 7 days 241 | args = ('/var/log/diamond/diamond.log', 'midnight', 1, 7) 242 | 243 | [formatter_default] 244 | 245 | format = [%(asctime)s] [%(threadName)s] %(message)s 246 | datefmt = 247 | 248 | ################################################################################ 249 | ### Options for config merging 250 | # [configs] 251 | # path = "/etc/diamond/configs/" 252 | # extension = ".conf" 253 | #------------------------------------------------------------------------------- 254 | # Example: 255 | # /etc/diamond/configs/net.conf 256 | # [collectors] 257 | # 258 | # [[NetworkCollector]] 259 | # enabled = True 260 | 261 | -------------------------------------------------------------------------------- /qosctrl/src/cinder/scheduler/filters/ioarb_filter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2014 AT&T Labs Research 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | # Author: Moo-Ryong Ra, mra@research.att.com 17 | 18 | from oslo_log import log as logging 19 | from oslo_serialization import jsonutils 20 | import six 21 | 22 | from cinder import context 23 | from cinder.i18n import _LW 24 | from cinder.openstack.common.scheduler import filters 25 | from cinder.scheduler.evaluator import evaluator 26 | 27 | from cinder.common import ioarbparams as ioarbiter 28 | from cinder.volume import qos_specs as qos 29 | 30 | LOG = logging.getLogger(__name__) 31 | 32 | class IOArbiterFilter(filters.BaseHostFilter): 33 | """IOArbiterFilter filters hosts based on provided block devices information. 34 | 35 | IOArbFilter filters based on volume host's provided 'filter function' 36 | and metrics. 37 | """ 38 | 39 | def host_passes(self, host_state, filter_properties): 40 | """Determines whether a host passes ioarbiter filter.""" 41 | stats = self._generate_stats(host_state, filter_properties) 42 | 43 | result = self._check_filter_function(stats) 44 | LOG.debug("[MRA] filtering result: %s -> %s" % 45 | (stats['host_stats']['host'], result)) 46 | 47 | return result 48 | 49 | def _check_filter_function(self, stats): 50 | """Checks if a volume passes a host's filter function. 51 | 52 | Returns a tuple in the format (filter_passing, filter_invalid). 53 | Both values are booleans. 54 | """ 55 | filter_result = False 56 | 57 | host_stats = stats['host_stats'] 58 | host_caps = stats['host_caps'] 59 | extra_specs = stats['extra_specs'] 60 | qos_specs = stats['qos_specs'] 61 | volume_stats = stats['volume_stats'] 62 | volume_type = stats['volume_type'] 63 | 64 | LOG.debug('[MRA] =================') 65 | LOG.debug('[MRA] host_stats: %(dt)s' % {'dt': host_stats}) 66 | LOG.debug('[MRA] host_caps: %(dt)s' % {'dt': host_caps}) 67 | LOG.debug('[MRA] extra_specs: %(dt)s' % {'dt': extra_specs}) 68 | LOG.debug('[MRA] volume_stats: %(dt)s' % {'dt': volume_stats}) 69 | LOG.debug('[MRA] volume_type: %(dt)s' % {'dt': volume_type}) 70 | LOG.debug('[MRA] qos_specs: %(dt)s' % {'dt': qos_specs}) 71 | LOG.debug('[MRA] =================') 72 | 73 | # Check that the volume types match, i.e., ioarb_sttype = "ioarbiter" 74 | if (extra_specs is None or 'volume_backend_name' not in extra_specs): 75 | LOG.warning(_LW("No 'volume_backend_name' key in extra_specs. " 76 | "Skipping volume backend name check.")) 77 | elif (extra_specs['volume_backend_name'] != 78 | host_stats['volume_backend_name']): 79 | LOG.warning(_LW("Volume backend names do not match: '%(target)s' " 80 | "vs '%(current)s' :: Skipping"), 81 | {'target': extra_specs['volume_backend_name'], 82 | 'current': host_stats['volume_backend_name']}) 83 | return False 84 | 85 | # Check either host or request does not know ioarbiter. 86 | if not 'ioarb_cvtype' in host_caps: 87 | if 'ioarb_sttype' in extra_specs: 88 | return False 89 | else: 90 | return True 91 | else: 92 | if qos_specs is None: 93 | # This might be a policy decision. 94 | # currently, ioarbiter cinder-volume does not 95 | # handle volume creation request without qos_specs. 96 | return False 97 | 98 | # Check cinder-volume type: 'host' or 'provisioned' 99 | cvtype = host_caps['ioarb_cvtype'] 100 | stspec = ioarbiter.translate_qosspec(qos_specs) 101 | 102 | tot_budget = {} 103 | deployed = {} 104 | 105 | if cvtype == 'host': 106 | # Calculate host's total capacity. 107 | devs = host_caps['ioarb_resource'] 108 | LOG.debug('[MRA] host mode: available devs - %(devs)s' % 109 | {'devs': host_caps['ioarb_resource']}) 110 | 111 | if len(host_caps['ioarb_resource']) < int(stspec['ndisk']): 112 | LOG.debug('[MRA] %s vs. %s' % 113 | (len(host_caps['ioarb_resource']), int(stspec['ndisk']))) 114 | return False 115 | 116 | # Get a total budget for a) storage capacity, b) iops budget. 117 | # Make sure if the deployed cinder volumes are using 118 | # the same translator function. 119 | tot_budget = ioarbiter.calculate_total_budget(devs, stspec) 120 | elif cvtype == 'provisioned': 121 | # in terms of container, it already know its total cap. 122 | LOG.debug('[MRA] provisioned mode') 123 | 124 | # Check capacity. 125 | # This function may be redundant if CapacityFilter is already 126 | # used. (It is enabled by default in Kilo.) 127 | if volume_stats['size'] > host_stats['free_capacity_gb']: 128 | return False 129 | 130 | # Chek RAID conf. 131 | if (host_caps['ioarb_raidconf'] <> stspec['raidconf'] or 132 | host_caps['ioarb_ndisk'] <> str(stspec['ndisk'])): 133 | LOG.debug('[MRA] redundancy params do not match.') 134 | LOG.debug('[MRA] raid %s vs. %s' % 135 | (host_caps['ioarb_raidconf'], stspec['raidconf'])) 136 | LOG.debug('[MRA] ndisk %s vs. %s' % 137 | (host_caps['ioarb_ndisk'], stspec['ndisk'])) 138 | return False 139 | 140 | # Check qos budget. 141 | tot_budget[ioarbiter.RTYPE_SIZE] = { 142 | host_caps['ioarb_raidconf']: host_stats['total_capacity_gb']} 143 | tot_budget[ioarbiter.RTYPE_IOPS4K] = { 144 | host_caps['ioarb_raidconf']: host_caps['total_iops_4k']} 145 | 146 | # Get already deployed qos-aware volumes' information. 147 | deployed = self._calculate_deployed_capacity(cvtype, host_caps, stspec) 148 | 149 | # See if there is a remaining capacity both in terms of 150 | # capacity & qos budget. 151 | filter_result = ( 152 | self._check_budget(tot_budget, deployed, stspec, 153 | ioarbiter.RTYPE_SIZE, volume_stats['size']) and 154 | self._check_budget(tot_budget, deployed, stspec, 155 | ioarbiter.RTYPE_IOPS4K, stspec['miniops']) 156 | ) 157 | 158 | return filter_result 159 | 160 | def _check_budget(self, budget, deployed, stspec, rtype, reqnum): 161 | """We are doing a capacity (and other) check (perhaps) again 162 | since CapacityFilter cannot know what could be exact available 163 | storage space after QoS requirements are applied. 164 | """ 165 | if (not 'raidconf' in stspec or 166 | not rtype in budget or 167 | not rtype in deployed): 168 | LOG.warning('[MRA] a field is missing') 169 | return False 170 | 171 | tot = float(budget[rtype][stspec['raidconf']]) 172 | used = float(deployed[rtype][stspec['raidconf']]) 173 | 174 | LOG.debug('[MRA] budget chk: %s, %s, %s' % (rtype, reqnum, tot-used)) 175 | LOG.debug('[MRA] decision: %s' % (float(reqnum) < tot - used)) 176 | 177 | return (float(reqnum) < tot - used) 178 | 179 | def _calculate_deployed_capacity(self, cvtype, hostinfo, stspec): 180 | """Calculate already consumed resources. 181 | Currently only supports size and IOPS. 182 | """ 183 | deployed = {} 184 | 185 | if cvtype == 'host': 186 | # In 'host' mode, no volumes are deployed. 187 | deployed[ioarbiter.RTYPE_SIZE] = { 188 | 'jbod': 0, 'raid0': 0, 'raid1': 0, 'raid5': 0, 'raid6': 0} 189 | deployed[ioarbiter.RTYPE_IOPS4K] = { 190 | 'jbod': 0, 'raid0': 0, 'raid1': 0, 'raid5': 0, 'raid6': 0} 191 | elif cvtype == 'provisioned': 192 | # In 'provisioned' mode, cinder-volume process will report info. 193 | deployed[ioarbiter.RTYPE_SIZE] = { 194 | stspec['raidconf']: hostinfo['provisioned_capacity_gb'] } 195 | deployed[ioarbiter.RTYPE_IOPS4K] = { 196 | stspec['raidconf']: hostinfo['provisioned_iops_4k'] } 197 | else: 198 | LOG.error('[MRA] unknown cinder-volume type: %s' % cvtype) 199 | raise 200 | 201 | return deployed 202 | 203 | def _generate_stats(self, host_state, filter_properties): 204 | """Generates statistics from host and volume data.""" 205 | 206 | host_stats = { 207 | 'host': host_state.host, 208 | 'volume_backend_name': host_state.volume_backend_name, 209 | 'vendor_name': host_state.vendor_name, 210 | 'driver_version': host_state.driver_version, 211 | 'storage_protocol': host_state.storage_protocol, 212 | 'QoS_support': host_state.QoS_support, 213 | 'total_capacity_gb': host_state.total_capacity_gb, 214 | 'allocated_capacity_gb': host_state.allocated_capacity_gb, 215 | 'free_capacity_gb': host_state.free_capacity_gb, 216 | 'reserved_percentage': host_state.reserved_percentage, 217 | 'updated': host_state.updated, 218 | } 219 | 220 | host_caps = host_state.capabilities 221 | volume_type = filter_properties.get('volume_type', {}) 222 | extra_specs = volume_type.get('extra_specs', {}) 223 | request_spec = filter_properties.get('request_spec', {}) 224 | volume_stats = request_spec.get('volume_properties', {}) 225 | 226 | ctxt = context.get_admin_context() 227 | qos_specs_id = volume_type.get('qos_specs_id') 228 | if qos_specs_id is not None: 229 | qos_specs = qos.get_qos_specs(ctxt, qos_specs_id)['specs'] 230 | else: 231 | qos_specs = None 232 | 233 | stats = { 234 | 'host_stats': host_stats, 235 | 'host_caps': host_caps, 236 | 'extra_specs': extra_specs, 237 | 'qos_specs': qos_specs, 238 | 'volume_stats': volume_stats, 239 | 'volume_type': volume_type, 240 | } 241 | 242 | return stats 243 | 244 | 245 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /qosctrl/src/cinder/brick/local_dev/ioarbcontainer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 AT&T Labs Research 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | # Author: Moo-Ryong Ra, mra@research.att.com 16 | 17 | """Container-related utilities and helpers.""" 18 | 19 | import os 20 | import socket 21 | import ConfigParser 22 | 23 | from oslo_concurrency import processutils 24 | from oslo_config import cfg 25 | from oslo_log import log as logging 26 | 27 | from cinder.brick.local_dev import lvm as brick_lvm 28 | from cinder.common import ioarbparams as ioarbiter 29 | from cinder.i18n import _, _LE, _LI 30 | from cinder import utils 31 | 32 | #CONF = cfg.CONF 33 | LOG = logging.getLogger(__name__) 34 | 35 | 36 | def _get_default_conf_dir(): 37 | return '/var/lib/cinder/ioarb-container/' 38 | 39 | def _get_container_image(): 40 | return 'ioarb/cinderbackend' 41 | 42 | def _get_cont_backend_name(blkdev): 43 | return 'ioarb-' + blkdev.split('/')[2] 44 | 45 | def _get_cont_vg_prefix(): 46 | return 'ioarb-cvs-' 47 | 48 | def _get_cont_vg_name(blkdev): 49 | return _get_cont_vg_prefix() + blkdev.split('/')[2] 50 | 51 | def _get_container_name(blkdev): 52 | return socket.gethostname() + '-ioarbcont-' + blkdev.split('/')[2] 53 | 54 | def _get_conf_path(blkdev): 55 | return (_get_default_conf_dir() + 'ioarb-cinder-' 56 | + blkdev.split('/')[2] + '.conf') 57 | 58 | # This function should be in the common library. 59 | # But, in order not to touch openstack distribution, 60 | # I will keep this function locally. 61 | def _read_cinder_conf(path='/etc/cinder/cinder.conf'): 62 | # 63 | config = ConfigParser.ConfigParser() 64 | if os.path.exists(path): 65 | config.read(path) 66 | else: 67 | LOG.error('[MRA] cannot find a cinder.conf file.') 68 | raise 69 | return config 70 | 71 | def get_cmdprefix_for_exec_in_cont(config): 72 | return ['docker', 'exec', '-t', config['container_name']]; 73 | 74 | def create_cinder_conf_for_container(blkdev, stspec, config_info): 75 | """Automatically create cinder.conf for container.""" 76 | 77 | info = {} 78 | if config_info is not None: 79 | info = config_info 80 | default_conf_dir = _get_default_conf_dir() 81 | 82 | # default values if not in config_info. 83 | if not 'container_name' in info: 84 | info['container_name'] = _get_container_name(blkdev) 85 | if not 'backend_name' in info: 86 | info['backend_name'] = _get_cont_backend_name(blkdev) 87 | if not 'config_path' in info: 88 | info['config_path'] = _get_conf_path(blkdev) 89 | if not 'blkdev' in info: 90 | info['blkdev'] = blkdev 91 | if not 'container_image' in info: 92 | info['container_image'] = _get_container_image() 93 | if not 'vg_gname' in info: 94 | info['vg_name'] = _get_cont_vg_name(blkdev) 95 | 96 | # read a local cinder.conf file. 97 | default_section = 'DEFAULT' 98 | config = _read_cinder_conf() 99 | 100 | # replace a backend information 101 | old_backends = config.get(default_section, 'enabled_backends').split(',') 102 | for backend in old_backends: 103 | config.remove_section(backend) 104 | 105 | backend = info['backend_name'] 106 | config.add_section(backend) 107 | config.set(backend, 'volume_driver' 108 | , 'cinder.volume.drivers.provlvm.LVMVolumeDriver') 109 | config.set(backend, 'iscsi_protocol', 'iscsi') 110 | config.set(backend, 'iscsi_helper', 'tgtadm') 111 | config.set(backend, 'volume_group', _get_cont_vg_name(blkdev)) 112 | config.set(backend, 'volume_clear_size', '50') 113 | config.set(default_section, 'enabled_backends', backend) 114 | config.set(default_section, 'periodic_interval', '10') 115 | #config.set(default_section, 'iscsi_write_cache', 'off') 116 | 117 | if stspec is not None: 118 | config.set(backend, 'ioarb_raidconf', stspec['raidconf']) 119 | config.set(backend, 'ioarb_ndisk', stspec['ndisk']) 120 | perfmat = ioarbiter.get_perf_dict(int(stspec['ndisk']), stspec['medium'], 'rw') 121 | config.set(backend, 'ioarb_total_iops_4k', perfmat[stspec['raidconf']]) 122 | 123 | # save it to the designated location. 124 | # [MRA] Todo: file creation should be done by rootwrapper. 125 | with open(info['config_path'], 'wb') as configfile: 126 | config.write(configfile) 127 | 128 | return info 129 | 130 | 131 | def check_container_is_running(config, root_helper): 132 | """Check if the container is already running.""" 133 | 134 | LOG.debug('[MRA] entered check_container_is_running()') 135 | 136 | cmd = ['docker', 'ps', '-f', ('name=%s' % config['container_name']), '-q'] 137 | try: 138 | (out, _err) = utils.execute(*cmd, root_helper=root_helper 139 | , run_as_root=True) 140 | except processutils.ProcessExecutionError as err: 141 | LOG.exception(_LE('Error checking running docker instances')) 142 | LOG.error(_LE('Cmd :%s') % err.cmd) 143 | LOG.error(_LE('StdOut :%s') % err.stdout) 144 | LOG.error(_LE('StdErr :%s') % err.stderr) 145 | raise 146 | 147 | if out is not None and len(out) > 10: 148 | LOG.debug('[MRA] existed. container-id: %(contid)s' 149 | % {'contid': out}) 150 | config['container_id'] = out.strip() 151 | else: 152 | LOG.debug('[MRA] container does not exist: %(out)s' 153 | % {'out': out}) 154 | 155 | return config 156 | 157 | 158 | def create_container_instance(config, root_helper): 159 | """Create a docker instance that runs tgt and cinder-volume in them.""" 160 | 161 | LOG.debug('[MRA] entered create_container_instance()') 162 | 163 | # run a docker instance. 164 | cinder_root = '/usr/lib/python2.7/dist-packages/cinder' 165 | resv = '%s/common/ioarbresv.py' % cinder_root 166 | params = '%s/common/ioarbparams.py' % cinder_root 167 | 168 | cmd = ['docker', 'run', '--name', config['container_name'], '-it', 169 | '-p', '3260', '-d', '--privileged', 170 | '-v', '%s:%s' % (config['config_path'], '/etc/cinder/cinder.conf'), 171 | '-v', '%s:%s' % (resv, resv), 172 | '-v', '%s:%s' % (params, params), 173 | '-v', '%s:%s' % (config['resv_info'], config['resv_info']), 174 | '-v', '/etc/hosts:/etc/hosts-hostmachine', config['container_image']] 175 | try: 176 | (out, _err) = utils.execute(*cmd, root_helper=root_helper 177 | , run_as_root=True) 178 | except processutils.ProcessExecutionError as err: 179 | LOG.exception(_LE('Error running docker instance')) 180 | LOG.error(_LE('Cmd :%s') % err.cmd) 181 | LOG.error(_LE('StdOut :%s') % err.stdout) 182 | LOG.error(_LE('StdErr :%s') % err.stderr) 183 | raise 184 | 185 | config['container_id'] = out[0:12] 186 | LOG.debug('[MRA] created. container-id: %(out)s' % {'out': config['container_id']}) 187 | 188 | return config 189 | 190 | def configure_container_instance(config, root_helper): 191 | """Configure container instance.""" 192 | 193 | # get a mapped port. 194 | cmd = ['docker', 'inspect', 195 | "--format='{{(index (index .NetworkSettings.Ports \"3260/tcp\") 0).HostPort}}'", 196 | config['container_name']] 197 | try: 198 | (out, _err) = utils.execute(*cmd, root_helper=root_helper 199 | , run_as_root=True) 200 | except processutils.ProcessExecutionError as err: 201 | LOG.exception(_LE('Error inspecting container port mapping')) 202 | LOG.error(_LE('Cmd :%s') % err.cmd) 203 | LOG.error(_LE('StdOut :%s') % err.stdout) 204 | LOG.error(_LE('StdErr :%s') % err.stderr) 205 | raise 206 | 207 | LOG.debug('[MRA] host port: %(out)s' % {'out': out}) 208 | if out is not None: 209 | hostport = out.split()[0] 210 | else: 211 | raise 212 | 213 | # update container's /etc/hosts and re-map tgt port 214 | # with a corresponding host port. 215 | cmd = ['docker', 'exec', '-t', config['container_name'], 216 | 'ioarbiter-conf.sh', hostport] 217 | try: 218 | (out, _err) = utils.execute(*cmd, root_helper=root_helper 219 | , run_as_root=True) 220 | except processutils.ProcessExecutionError as err: 221 | LOG.exception(_LE('Error configuring container instance')) 222 | LOG.error(_LE('Cmd :%s') % err.cmd) 223 | LOG.error(_LE('StdOut :%s') % err.stdout) 224 | LOG.error(_LE('StdErr :%s') % err.stderr) 225 | raise 226 | 227 | # change hostname of the container 228 | cmd = ['docker', 'exec', '-t', config['container_name'], 229 | 'hostname', config['container_name']] 230 | try: 231 | (out, _err) = utils.execute(*cmd, root_helper=root_helper 232 | , run_as_root=True) 233 | except processutils.ProcessExecutionError as err: 234 | LOG.exception(_LE('Error changing container hostname.')) 235 | LOG.error(_LE('Cmd :%s') % err.cmd) 236 | LOG.error(_LE('StdOut :%s') % err.stdout) 237 | LOG.error(_LE('StdErr :%s') % err.stderr) 238 | raise 239 | 240 | return config 241 | 242 | 243 | def restart_processes_in_container(container_name, svclist, root_helper): 244 | # 245 | if len(svclist) == 0: 246 | return 247 | 248 | for svc in svclist: 249 | cmd = ['docker', 'exec', '-t', container_name, 'service', svc, 'restart'] 250 | try: 251 | utils.execute(*cmd, root_helper=root_helper 252 | , run_as_root=True) 253 | except processutils.ProcessExecutionError as err: 254 | LOG.exception(_LE('Error restarting services in container.')) 255 | LOG.error(_LE('Cmd :%s') % err.cmd) 256 | LOG.error(_LE('StdOut :%s') % err.stdout) 257 | LOG.error(_LE('StdErr :%s') % err.stderr) 258 | raise 259 | 260 | 261 | def remove_cont_cinder_volume(root_helper, arrdev): 262 | """Stop and remove a cinder-volume container""" 263 | 264 | # stop docker instance. 265 | cont_name = _get_container_name(arrdev) 266 | cmd = ['docker', 'stop', cont_name] 267 | try: 268 | utils.execute(*cmd, root_helper=root_helper 269 | , run_as_root=True) 270 | except processutils.ProcessExecutionError as err: 271 | if "no such id" in err.stderr: 272 | LOG.debug('[MRA] container does not exists.') 273 | LOG.debug('[MRA] cmd: %s' % err.cmd) 274 | return 275 | LOG.exception(_LE('Error stopping container.')) 276 | LOG.error(_LE('Cmd :%s') % err.cmd) 277 | LOG.error(_LE('StdOut :%s') % err.stdout) 278 | LOG.error(_LE('StdErr :%s') % err.stderr) 279 | raise 280 | 281 | # remove docker instance. 282 | cmd = ['docker', 'rm', cont_name] 283 | try: 284 | utils.execute(*cmd, root_helper=root_helper 285 | , run_as_root=True) 286 | except processutils.ProcessExecutionError as err: 287 | LOG.exception(_LE('Error removing container.')) 288 | LOG.error(_LE('Cmd :%s') % err.cmd) 289 | LOG.error(_LE('StdOut :%s') % err.stdout) 290 | LOG.error(_LE('StdErr :%s') % err.stderr) 291 | raise 292 | 293 | # remove reservation info. 294 | resv_fpath = '/var/lib/cinder/ioarb-resv/resv-' + arrdev.split('/')[-1] 295 | cmd = ['rm', '-f', resv_fpath] 296 | try: 297 | utils.execute(*cmd, root_helper=root_helper 298 | , run_as_root=True) 299 | except processutils.ProcessExecutionError as err: 300 | LOG.exception(_LE('Error removing resv info.')) 301 | LOG.error(_LE('Cmd :%s') % err.cmd) 302 | LOG.error(_LE('StdOut :%s') % err.stdout) 303 | LOG.error(_LE('StdErr :%s') % err.stderr) 304 | raise 305 | 306 | # unit test code. 307 | if __name__ == '__main__': 308 | print "* started." 309 | config = create_cinder_conf_for_container('/dev/md0', None, None) 310 | info = create_container_instance(config, None) 311 | svclist = ['tgt', 'cinder-volume'] 312 | restart_processes_in_container(info['container_name'], svclist, None) 313 | print "* ended." 314 | 315 | 316 | 317 | 318 | -------------------------------------------------------------------------------- /benchmark/attbench/local/fio.json.sample: -------------------------------------------------------------------------------- 1 | { 2 | "fio version" : "fio-2.2.10", 3 | "timestamp" : 1512601354, 4 | "time" : "Wed Dec 6 18:02:34 2017", 5 | "jobs" : [ 6 | { 7 | "jobname" : "job-sdb", 8 | "groupid" : 0, 9 | "error" : 0, 10 | "eta" : 0, 11 | "elapsed" : 16, 12 | "read" : { 13 | "io_bytes" : 563360, 14 | "bw" : 56330, 15 | "iops" : 14081.59, 16 | "runtime" : 10001, 17 | "total_ios" : 140830, 18 | "short_ios" : 0, 19 | "drop_ios" : 0, 20 | "slat" : { 21 | "min" : 4, 22 | "max" : 217, 23 | "mean" : 7.88, 24 | "stddev" : 3.39 25 | }, 26 | "clat" : { 27 | "min" : 181, 28 | "max" : 10821, 29 | "mean" : 766.99, 30 | "stddev" : 213.95, 31 | "percentile" : { 32 | "1.000000" : 580, 33 | "5.000000" : 636, 34 | "10.000000" : 668, 35 | "20.000000" : 692, 36 | "30.000000" : 708, 37 | "40.000000" : 716, 38 | "50.000000" : 724, 39 | "60.000000" : 732, 40 | "70.000000" : 748, 41 | "80.000000" : 756, 42 | "90.000000" : 796, 43 | "95.000000" : 1192, 44 | "99.000000" : 1688, 45 | "99.500000" : 1832, 46 | "99.900000" : 2008, 47 | "99.950000" : 2160, 48 | "99.990000" : 9664, 49 | "0.00" : 0, 50 | "0.00" : 0, 51 | "0.00" : 0 52 | } 53 | }, 54 | "lat" : { 55 | "min" : 188, 56 | "max" : 10858, 57 | "mean" : 775.01, 58 | "stddev" : 213.92 59 | }, 60 | "bw_min" : 55104, 61 | "bw_max" : 57328, 62 | "bw_agg" : 49.84, 63 | "bw_mean" : 56331.20, 64 | "bw_dev" : 634.99 65 | }, 66 | "write" : { 67 | "io_bytes" : 1313728, 68 | "bw" : 131359, 69 | "iops" : 32837.82, 70 | "runtime" : 10001, 71 | "total_ios" : 328411, 72 | "short_ios" : 0, 73 | "drop_ios" : 0, 74 | "slat" : { 75 | "min" : 4, 76 | "max" : 1245, 77 | "mean" : 8.01, 78 | "stddev" : 3.74 79 | }, 80 | "clat" : { 81 | "min" : 51, 82 | "max" : 9827, 83 | "mean" : 631.38, 84 | "stddev" : 86.92, 85 | "percentile" : { 86 | "1.000000" : 474, 87 | "5.000000" : 540, 88 | "10.000000" : 572, 89 | "20.000000" : 604, 90 | "30.000000" : 620, 91 | "40.000000" : 628, 92 | "50.000000" : 636, 93 | "60.000000" : 644, 94 | "70.000000" : 652, 95 | "80.000000" : 668, 96 | "90.000000" : 676, 97 | "95.000000" : 684, 98 | "99.000000" : 700, 99 | "99.500000" : 708, 100 | "99.900000" : 1304, 101 | "99.950000" : 1672, 102 | "99.990000" : 2064, 103 | "0.00" : 0, 104 | "0.00" : 0, 105 | "0.00" : 0 106 | } 107 | }, 108 | "lat" : { 109 | "min" : 60, 110 | "max" : 9836, 111 | "mean" : 639.56, 112 | "stddev" : 86.81 113 | }, 114 | "bw_min" : 0, 115 | "bw_max" : 132232, 116 | "bw_agg" : 47.48, 117 | "bw_mean" : 125109.71, 118 | "bw_dev" : 28675.93 119 | }, 120 | "trim" : { 121 | "io_bytes" : 0, 122 | "bw" : 0, 123 | "iops" : 0.00, 124 | "runtime" : 0, 125 | "total_ios" : 0, 126 | "short_ios" : 0, 127 | "drop_ios" : 0, 128 | "slat" : { 129 | "min" : 0, 130 | "max" : 0, 131 | "mean" : 0.00, 132 | "stddev" : 0.00 133 | }, 134 | "clat" : { 135 | "min" : 0, 136 | "max" : 0, 137 | "mean" : 0.00, 138 | "stddev" : 0.00, 139 | "percentile" : { 140 | "1.000000" : 0, 141 | "5.000000" : 0, 142 | "10.000000" : 0, 143 | "20.000000" : 0, 144 | "30.000000" : 0, 145 | "40.000000" : 0, 146 | "50.000000" : 0, 147 | "60.000000" : 0, 148 | "70.000000" : 0, 149 | "80.000000" : 0, 150 | "90.000000" : 0, 151 | "95.000000" : 0, 152 | "99.000000" : 0, 153 | "99.500000" : 0, 154 | "99.900000" : 0, 155 | "99.950000" : 0, 156 | "99.990000" : 0, 157 | "0.00" : 0, 158 | "0.00" : 0, 159 | "0.00" : 0 160 | } 161 | }, 162 | "lat" : { 163 | "min" : 0, 164 | "max" : 0, 165 | "mean" : 0.00, 166 | "stddev" : 0.00 167 | }, 168 | "bw_min" : 0, 169 | "bw_max" : 0, 170 | "bw_agg" : 0.00, 171 | "bw_mean" : 0.00, 172 | "bw_dev" : 0.00 173 | }, 174 | "usr_cpu" : 23.12, 175 | "sys_cpu" : 72.28, 176 | "ctx" : 451818, 177 | "majf" : 0, 178 | "minf" : 4477, 179 | "iodepth_level" : { 180 | "1" : 0.10, 181 | "2" : 0.10, 182 | "4" : 0.10, 183 | "8" : 0.10, 184 | "16" : 0.10, 185 | "32" : 150.08, 186 | ">=64" : 0.00 187 | }, 188 | "latency_us" : { 189 | "2" : 0.00, 190 | "4" : 0.00, 191 | "10" : 0.00, 192 | "20" : 0.00, 193 | "50" : 0.00, 194 | "100" : 0.01, 195 | "250" : 0.01, 196 | "500" : 1.33, 197 | "750" : 90.82, 198 | "1000" : 5.54 199 | }, 200 | "latency_ms" : { 201 | "2" : 2.27, 202 | "4" : 0.03, 203 | "10" : 0.01, 204 | "20" : 0.01, 205 | "50" : 0.00, 206 | "100" : 0.00, 207 | "250" : 0.00, 208 | "500" : 0.00, 209 | "750" : 0.00, 210 | "1000" : 0.00, 211 | "2000" : 0.00, 212 | ">=2000" : 0.00 213 | }, 214 | "latency_depth" : 32, 215 | "latency_target" : 0, 216 | "latency_percentile" : 100.00, 217 | "latency_window" : 0 218 | }, 219 | { 220 | "jobname" : "job-sdc", 221 | "groupid" : 0, 222 | "error" : 0, 223 | "eta" : 0, 224 | "elapsed" : 16, 225 | "read" : { 226 | "io_bytes" : 566976, 227 | "bw" : 56691, 228 | "iops" : 14171.28, 229 | "runtime" : 10001, 230 | "total_ios" : 141727, 231 | "short_ios" : 0, 232 | "drop_ios" : 0, 233 | "slat" : { 234 | "min" : 4, 235 | "max" : 217, 236 | "mean" : 7.26, 237 | "stddev" : 2.80 238 | }, 239 | "clat" : { 240 | "min" : 216, 241 | "max" : 9993, 242 | "mean" : 767.08, 243 | "stddev" : 220.43, 244 | "percentile" : { 245 | "1.000000" : 564, 246 | "5.000000" : 636, 247 | "10.000000" : 660, 248 | "20.000000" : 684, 249 | "30.000000" : 700, 250 | "40.000000" : 708, 251 | "50.000000" : 724, 252 | "60.000000" : 732, 253 | "70.000000" : 740, 254 | "80.000000" : 756, 255 | "90.000000" : 836, 256 | "95.000000" : 1224, 257 | "99.000000" : 1720, 258 | "99.500000" : 1864, 259 | "99.900000" : 2096, 260 | "99.950000" : 2416, 261 | "99.990000" : 3408, 262 | "0.00" : 0, 263 | "0.00" : 0, 264 | "0.00" : 0 265 | } 266 | }, 267 | "lat" : { 268 | "min" : 222, 269 | "max" : 10000, 270 | "mean" : 774.55, 271 | "stddev" : 220.42 272 | }, 273 | "bw_min" : 0, 274 | "bw_max" : 57808, 275 | "bw_agg" : 47.77, 276 | "bw_mean" : 53994.67, 277 | "bw_dev" : 12393.26 278 | }, 279 | "write" : { 280 | "io_bytes" : 1321380, 281 | "bw" : 132124, 282 | "iops" : 33029.80, 283 | "runtime" : 10001, 284 | "total_ios" : 330331, 285 | "short_ios" : 0, 286 | "drop_ios" : 0, 287 | "slat" : { 288 | "min" : 3, 289 | "max" : 234, 290 | "mean" : 7.28, 291 | "stddev" : 2.81 292 | }, 293 | "clat" : { 294 | "min" : 57, 295 | "max" : 9905, 296 | "mean" : 626.06, 297 | "stddev" : 87.74, 298 | "percentile" : { 299 | "1.000000" : 462, 300 | "5.000000" : 532, 301 | "10.000000" : 564, 302 | "20.000000" : 596, 303 | "30.000000" : 612, 304 | "40.000000" : 628, 305 | "50.000000" : 636, 306 | "60.000000" : 644, 307 | "70.000000" : 652, 308 | "80.000000" : 660, 309 | "90.000000" : 676, 310 | "95.000000" : 684, 311 | "99.000000" : 700, 312 | "99.500000" : 708, 313 | "99.900000" : 988, 314 | "99.950000" : 1448, 315 | "99.990000" : 2096, 316 | "0.00" : 0, 317 | "0.00" : 0, 318 | "0.00" : 0 319 | } 320 | }, 321 | "lat" : { 322 | "min" : 62, 323 | "max" : 9913, 324 | "mean" : 633.55, 325 | "stddev" : 87.66 326 | }, 327 | "bw_min" : 129328, 328 | "bw_max" : 133400, 329 | "bw_agg" : 50.15, 330 | "bw_mean" : 132128.80, 331 | "bw_dev" : 870.46 332 | }, 333 | "trim" : { 334 | "io_bytes" : 0, 335 | "bw" : 0, 336 | "iops" : 0.00, 337 | "runtime" : 0, 338 | "total_ios" : 0, 339 | "short_ios" : 0, 340 | "drop_ios" : 0, 341 | "slat" : { 342 | "min" : 0, 343 | "max" : 0, 344 | "mean" : 0.00, 345 | "stddev" : 0.00 346 | }, 347 | "clat" : { 348 | "min" : 0, 349 | "max" : 0, 350 | "mean" : 0.00, 351 | "stddev" : 0.00, 352 | "percentile" : { 353 | "1.000000" : 0, 354 | "5.000000" : 0, 355 | "10.000000" : 0, 356 | "20.000000" : 0, 357 | "30.000000" : 0, 358 | "40.000000" : 0, 359 | "50.000000" : 0, 360 | "60.000000" : 0, 361 | "70.000000" : 0, 362 | "80.000000" : 0, 363 | "90.000000" : 0, 364 | "95.000000" : 0, 365 | "99.000000" : 0, 366 | "99.500000" : 0, 367 | "99.900000" : 0, 368 | "99.950000" : 0, 369 | "99.990000" : 0, 370 | "0.00" : 0, 371 | "0.00" : 0, 372 | "0.00" : 0 373 | } 374 | }, 375 | "lat" : { 376 | "min" : 0, 377 | "max" : 0, 378 | "mean" : 0.00, 379 | "stddev" : 0.00 380 | }, 381 | "bw_min" : 0, 382 | "bw_max" : 0, 383 | "bw_agg" : 0.00, 384 | "bw_mean" : 0.00, 385 | "bw_dev" : 0.00 386 | }, 387 | "usr_cpu" : 24.16, 388 | "sys_cpu" : 67.00, 389 | "ctx" : 536409, 390 | "majf" : 0, 391 | "minf" : 2988, 392 | "iodepth_level" : { 393 | "1" : 0.10, 394 | "2" : 0.10, 395 | "4" : 0.10, 396 | "8" : 0.10, 397 | "16" : 0.10, 398 | "32" : 150.07, 399 | ">=64" : 0.00 400 | }, 401 | "latency_us" : { 402 | "2" : 0.00, 403 | "4" : 0.00, 404 | "10" : 0.00, 405 | "20" : 0.00, 406 | "50" : 0.00, 407 | "100" : 0.01, 408 | "250" : 0.03, 409 | "500" : 1.80, 410 | "750" : 91.27, 411 | "1000" : 4.45 412 | }, 413 | "latency_ms" : { 414 | "2" : 2.40, 415 | "4" : 0.05, 416 | "10" : 0.01, 417 | "20" : 0.00, 418 | "50" : 0.00, 419 | "100" : 0.00, 420 | "250" : 0.00, 421 | "500" : 0.00, 422 | "750" : 0.00, 423 | "1000" : 0.00, 424 | "2000" : 0.00, 425 | ">=2000" : 0.00 426 | }, 427 | "latency_depth" : 32, 428 | "latency_target" : 0, 429 | "latency_percentile" : 100.00, 430 | "latency_window" : 0 431 | } 432 | ], 433 | "disk_util" : [ 434 | { 435 | "name" : "sdb", 436 | "read_ios" : 210187, 437 | "write_ios" : 489245, 438 | "read_merges" : 0, 439 | "write_merges" : 0, 440 | "read_ticks" : 160284, 441 | "write_ticks" : 307772, 442 | "in_queue" : 468844, 443 | "util" : 99.33 444 | }, 445 | { 446 | "name" : "sdc", 447 | "read_ios" : 211452, 448 | "write_ios" : 492120, 449 | "read_merges" : 0, 450 | "write_merges" : 0, 451 | "read_ticks" : 161044, 452 | "write_ticks" : 307492, 453 | "in_queue" : 469172, 454 | "util" : 99.43 455 | } 456 | ] 457 | } 458 | -------------------------------------------------------------------------------- /qosctrl/src/cinder/volume/targets/ioarbtgt.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | # 13 | # Author: Moo-Ryong Ra, mra@research.att.com 14 | # Disclaimer: This source file is a modified version of the tgt.py 15 | # in OpenStack Kilo sources. 16 | 17 | import os 18 | import re 19 | import time 20 | 21 | from oslo_concurrency import processutils as putils 22 | from oslo_log import log as logging 23 | 24 | from cinder import exception 25 | from cinder.openstack.common import fileutils 26 | from cinder.i18n import _LI, _LW, _LE 27 | from cinder import utils 28 | from cinder.volume.targets import iscsi 29 | 30 | LOG = logging.getLogger(__name__) 31 | 32 | 33 | class TgtAdm(iscsi.ISCSITarget): 34 | """Target object for block storage devices. 35 | 36 | Base class for target object, where target 37 | is data transport mechanism (target) specific calls. 38 | This includes things like create targets, attach, detach 39 | etc. 40 | """ 41 | 42 | VOLUME_CONF = """ 43 | 44 | backing-store %s 45 | driver %s 46 | write-cache %s 47 | 48 | """ 49 | VOLUME_CONF_WITH_CHAP_AUTH = """ 50 | 51 | backing-store %s 52 | driver %s 53 | %s 54 | write-cache %s 55 | 56 | """ 57 | 58 | def __init__(self, *args, **kwargs): 59 | super(TgtAdm, self).__init__(*args, **kwargs) 60 | 61 | def _get_target(self, iqn): 62 | (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) 63 | lines = out.split('\n') 64 | for line in lines: 65 | if iqn in line: 66 | parsed = line.split() 67 | tid = parsed[1] 68 | return tid[:-1] 69 | 70 | return None 71 | 72 | def _verify_backing_lun(self, iqn, tid): 73 | backing_lun = True 74 | capture = False 75 | target_info = [] 76 | 77 | (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) 78 | lines = out.split('\n') 79 | 80 | for line in lines: 81 | if iqn in line and "Target %s" % tid in line: 82 | capture = True 83 | if capture: 84 | target_info.append(line) 85 | if iqn not in line and 'Target ' in line: 86 | capture = False 87 | 88 | if ' LUN: 1' not in target_info: 89 | backing_lun = False 90 | 91 | return backing_lun 92 | 93 | def _recreate_backing_lun(self, iqn, tid, name, path): 94 | LOG.warning(_LW('Attempting recreate of backing lun...')) 95 | 96 | # Since we think the most common case of this is a dev busy 97 | # (create vol from snapshot) we're going to add a sleep here 98 | # this will hopefully give things enough time to stabilize 99 | # how long should we wait?? I have no idea, let's go big 100 | # and error on the side of caution 101 | time.sleep(10) 102 | 103 | (out, err) = (None, None) 104 | try: 105 | (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', 106 | '--op', 'new', '--mode', 107 | 'logicalunit', '--tid', 108 | tid, '--lun', '1', '-b', 109 | path, run_as_root=True) 110 | except putils.ProcessExecutionError as e: 111 | LOG.error(_LE("Failed recovery attempt to create " 112 | "iscsi backing lun for Volume " 113 | "ID:%(vol_id)s: %(e)s"), 114 | {'vol_id': name, 'e': e}) 115 | finally: 116 | LOG.debug('StdOut from recreate backing lun: %s', out) 117 | LOG.debug('StdErr from recreate backing lun: %s', err) 118 | 119 | def _get_iscsi_target(self, context, vol_id): 120 | return 0 121 | 122 | def _get_target_and_lun(self, context, volume): 123 | lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 124 | iscsi_target = 0 # NOTE(jdg): Not used by tgtadm 125 | return iscsi_target, lun 126 | 127 | def _get_target_chap_auth(self, context, iscsi_name): 128 | """Get the current chap auth username and password.""" 129 | volumes_dir = self.volumes_dir 130 | vol_id = iscsi_name.split(':')[1] 131 | volume_path = os.path.join(volumes_dir, vol_id) 132 | 133 | try: 134 | with open(volume_path, 'r') as f: 135 | volume_conf = f.read() 136 | except IOError as e_fnf: 137 | LOG.debug('Failed to open config for Volume %(vol_id)s: %(e)s', 138 | {'vol_id': vol_id, 'e': e_fnf}) 139 | # tgt is linux specific 140 | if e_fnf.errno == 2: 141 | return None 142 | else: 143 | raise 144 | except Exception as e_vol: 145 | LOG.error(_LE('Failed to open config for %(vol_id)s: %(e)s'), 146 | {'vol_id': vol_id, 'e': e_vol}) 147 | raise 148 | 149 | m = re.search('incominguser (\w+) (\w+)', volume_conf) 150 | if m: 151 | return (m.group(1), m.group(2)) 152 | LOG.debug('Failed to find CHAP auth from config for %s', vol_id) 153 | return None 154 | 155 | @utils.retry(putils.ProcessExecutionError) 156 | def _do_tgt_update(self, name): 157 | (out, err) = utils.execute('tgt-admin', '--update', name, 158 | run_as_root=True) 159 | LOG.debug("StdOut from tgt-admin --update: %s", out) 160 | LOG.debug("StdErr from tgt-admin --update: %s", err) 161 | 162 | def create_iscsi_target(self, name, tid, lun, path, 163 | chap_auth=None, **kwargs): 164 | 165 | # Note(jdg) tid and lun aren't used by TgtAdm but remain for 166 | # compatibility 167 | 168 | # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078 169 | # for now, since we intermittently hit target already exists we're 170 | # adding some debug info to try and pinpoint what's going on 171 | (out, err) = utils.execute('tgtadm', 172 | '--lld', 173 | 'iscsi', 174 | '--op', 175 | 'show', 176 | '--mode', 177 | 'target', 178 | run_as_root=True) 179 | LOG.debug("Targets prior to update: %s", out) 180 | fileutils.ensure_tree(self.volumes_dir) 181 | 182 | vol_id = name.split(':')[1] 183 | write_cache = self.configuration.get('iscsi_write_cache', 'on') 184 | driver = self.iscsi_protocol 185 | 186 | if chap_auth is None: 187 | volume_conf = self.VOLUME_CONF % (name, path, driver, write_cache) 188 | else: 189 | chap_str = 'incominguser %s %s' % chap_auth 190 | volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name, path, 191 | driver, chap_str, 192 | write_cache) 193 | LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id) 194 | volumes_dir = self.volumes_dir 195 | volume_path = os.path.join(volumes_dir, vol_id) 196 | 197 | if os.path.exists(volume_path): 198 | LOG.warning(_LW('Persistence file already exists for volume, ' 199 | 'found file at: %s'), volume_path) 200 | f = open(volume_path, 'w+') 201 | f.write(volume_conf) 202 | f.close() 203 | LOG.debug(('Created volume path %(vp)s,\n' 204 | 'content: %(vc)s'), 205 | {'vp': volume_path, 'vc': volume_conf}) 206 | 207 | old_persist_file = None 208 | old_name = kwargs.get('old_name', None) 209 | if old_name is not None: 210 | LOG.debug('Detected old persistence file for volume ' 211 | '%{vol}s at %{old_name}s', 212 | {'vol': vol_id, 'old_name': old_name}) 213 | old_persist_file = os.path.join(volumes_dir, old_name) 214 | 215 | try: 216 | # With the persistent tgts we create them 217 | # by creating the entry in the persist file 218 | # and then doing an update to get the target 219 | # created. 220 | 221 | self._do_tgt_update(name) 222 | except putils.ProcessExecutionError as e: 223 | if "target already exists" in e.stderr: 224 | # Adding the additional Warning message below for a clear 225 | # ER marker (Ref bug: #1398078). 226 | LOG.warning(_LW('Could not create target because ' 227 | 'it already exists for volume: %s'), vol_id) 228 | LOG.debug('Exception was: %s', e) 229 | 230 | else: 231 | LOG.error(_LE("Failed to create iscsi target for Volume " 232 | "ID: %(vol_id)s: %(e)s"), 233 | {'vol_id': vol_id, 'e': e}) 234 | 235 | # Don't forget to remove the persistent file we created 236 | os.unlink(volume_path) 237 | raise exception.ISCSITargetCreateFailed(volume_id=vol_id) 238 | 239 | # Grab targets list for debug 240 | # Consider adding a check for lun 0 and 1 for tgtadm 241 | # before considering this as valid 242 | (out, err) = utils.execute('tgtadm', 243 | '--lld', 244 | 'iscsi', 245 | '--op', 246 | 'show', 247 | '--mode', 248 | 'target', 249 | run_as_root=True) 250 | LOG.debug("Targets after update: %s", out) 251 | 252 | iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) 253 | tid = self._get_target(iqn) 254 | if tid is None: 255 | LOG.error(_LE("Failed to create iscsi target for Volume " 256 | "ID: %(vol_id)s. Please ensure your tgtd config " 257 | "file contains 'include %(volumes_dir)s/*'"), { 258 | 'vol_id': vol_id, 259 | 'volumes_dir': volumes_dir, }) 260 | raise exception.NotFound() 261 | 262 | # NOTE(jdg): Sometimes we have some issues with the backing lun 263 | # not being created, believe this is due to a device busy 264 | # or something related, so we're going to add some code 265 | # here that verifies the backing lun (lun 1) was created 266 | # and we'll try and recreate it if it's not there 267 | if not self._verify_backing_lun(iqn, tid): 268 | try: 269 | self._recreate_backing_lun(iqn, tid, name, path) 270 | except putils.ProcessExecutionError: 271 | os.unlink(volume_path) 272 | raise exception.ISCSITargetCreateFailed(volume_id=vol_id) 273 | 274 | # Finally check once more and if no go, fail and punt 275 | if not self._verify_backing_lun(iqn, tid): 276 | os.unlink(volume_path) 277 | raise exception.ISCSITargetCreateFailed(volume_id=vol_id) 278 | 279 | if old_persist_file is not None and os.path.exists(old_persist_file): 280 | os.unlink(old_persist_file) 281 | 282 | return tid 283 | 284 | def initialize_connection(self, volume, connector): 285 | iscsi_properties = self._get_iscsi_properties(volume, 286 | connector.get( 287 | 'multipath')) 288 | return { 289 | 'driver_volume_type': self.iscsi_protocol, 290 | 'data': iscsi_properties 291 | } 292 | 293 | def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): 294 | LOG.info(_LI('Removing iscsi_target for Volume ID: %s'), vol_id) 295 | vol_uuid_file = vol_name 296 | volume_path = os.path.join(self.volumes_dir, vol_uuid_file) 297 | if not os.path.exists(volume_path): 298 | LOG.warning(_LW('Volume path %s does not exist, ' 299 | 'nothing to remove.'), volume_path) 300 | return 301 | 302 | if os.path.isfile(volume_path): 303 | iqn = '%s%s' % (self.iscsi_target_prefix, 304 | vol_uuid_file) 305 | else: 306 | raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) 307 | try: 308 | # NOTE(vish): --force is a workaround for bug: 309 | # https://bugs.launchpad.net/cinder/+bug/1159948 310 | utils.execute('tgt-admin', 311 | '--force', 312 | '--delete', 313 | iqn, 314 | run_as_root=True) 315 | except putils.ProcessExecutionError as e: 316 | non_fatal_errors = ("can't find the target", 317 | "access control rule does not exist") 318 | 319 | if any(error in e.stderr for error in non_fatal_errors): 320 | LOG.warning(_LW("Failed target removal because target or " 321 | "ACL's couldn't be found for iqn: %s."), iqn) 322 | else: 323 | LOG.error(_LE("Failed to remove iscsi target for Volume " 324 | "ID: %(vol_id)s: %(e)s"), 325 | {'vol_id': vol_id, 'e': e}) 326 | raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) 327 | # NOTE(jdg): There's a bug in some versions of tgt that 328 | # will sometimes fail silently when using the force flag 329 | # https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343 330 | # For now work-around by checking if the target was deleted, 331 | # if it wasn't, try again without the force. 332 | 333 | # This will NOT do any good for the case of mutliple sessions 334 | # which the force was aded for but it will however address 335 | # the cases pointed out in bug: 336 | # https://bugs.launchpad.net/cinder/+bug/1304122 337 | if self._get_target(iqn): 338 | try: 339 | LOG.warning(_LW('Silent failure of target removal ' 340 | 'detected, retry....')) 341 | utils.execute('tgt-admin', 342 | '--delete', 343 | iqn, 344 | run_as_root=True) 345 | except putils.ProcessExecutionError as e: 346 | LOG.error(_LE("Failed to remove iscsi target for Volume " 347 | "ID: %(vol_id)s: %(e)s"), 348 | {'vol_id': vol_id, 'e': e}) 349 | raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) 350 | 351 | # NOTE(jdg): This *should* be there still but incase 352 | # it's not we don't care, so just ignore it if was 353 | # somehow deleted between entry of this method 354 | # and here 355 | if os.path.exists(volume_path): 356 | os.unlink(volume_path) 357 | else: 358 | LOG.debug('Volume path %s not found at end, ' 359 | 'of remove_iscsi_target.', volume_path) 360 | -------------------------------------------------------------------------------- /qosctrl/src/cinder/volume/drivers/provlvm.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | # 13 | # Author: Moo-Ryong Ra, mra@research.att.com 14 | # Disclaimer: This source file is a modified version of the lvm driver 15 | # in OpenStack Kilo sources. 16 | 17 | """ 18 | Driver for Linux servers running LVM. 19 | 20 | """ 21 | 22 | import math 23 | import os 24 | import socket 25 | 26 | from oslo_concurrency import processutils 27 | from oslo_config import cfg 28 | from oslo_log import log as logging 29 | from oslo_utils import importutils 30 | from oslo_utils import units 31 | 32 | from cinder import context 33 | from cinder.brick import exception as brick_exception 34 | from cinder.brick.local_dev import lvm as lvm 35 | from cinder import exception 36 | from cinder.i18n import _, _LE, _LI, _LW 37 | from cinder.image import image_utils 38 | from cinder.openstack.common import fileutils 39 | from cinder import utils 40 | from cinder.volume import driver 41 | from cinder.volume import utils as volutils 42 | 43 | from cinder.volume import qos_specs 44 | from cinder.volume import volume_types 45 | from cinder.common import ioarbparams as ioarbiter 46 | from cinder.common import ioarbresv as ioarbresv 47 | 48 | LOG = logging.getLogger(__name__) 49 | 50 | # FIXME(jdg): We'll put the lvm_ prefix back on these when we 51 | # move over to using this as the real LVM driver, for now we'll 52 | # rename them so that the config generation utility doesn't barf 53 | # on duplicate entries. 54 | volume_opts = [ 55 | cfg.StrOpt('volume_group', 56 | default='cinder-volumes', 57 | help='Name for the VG that will contain exported volumes'), 58 | cfg.IntOpt('lvm_mirrors', 59 | default=0, 60 | help='If >0, create LVs with multiple mirrors. Note that ' 61 | 'this requires lvm_mirrors + 2 PVs with available space'), 62 | cfg.StrOpt('lvm_type', 63 | default='default', 64 | choices=['default', 'thin'], 65 | help='Type of LVM volumes to deploy'), 66 | cfg.StrOpt('lvm_conf_file', 67 | default='/etc/cinder/lvm.conf', 68 | help='LVM conf file to use for the LVM driver in Cinder; ' 69 | 'this setting is ignored if the specified file does ' 70 | 'not exist (You can also specify \'None\' to not use ' 71 | 'a conf file even if one exists).'), 72 | # [MRA] custom configuration parameters. 73 | cfg.StrOpt('ioarb_raidconf', 74 | default='unknown', 75 | choices=['unknown', 'jbod', 'raid0', 'raid1', 'raid5', 'raid6'], 76 | help='Software RAID configuration'), 77 | cfg.StrOpt('ioarb_ndisk', 78 | default='1', 79 | help='How many disks are used to contruct the RAID.'), 80 | cfg.StrOpt('ioarb_total_iops_4k', 81 | default='200', 82 | help='Total IOPS that can be used for IOPS reservation.'), 83 | ] 84 | 85 | CONF = cfg.CONF 86 | CONF.register_opts(volume_opts) 87 | 88 | 89 | class LVMVolumeDriver(driver.VolumeDriver): 90 | """Executes commands relating to Volumes.""" 91 | 92 | VERSION = '3.0.0' 93 | 94 | def __init__(self, vg_obj=None, *args, **kwargs): 95 | # Parent sets db, host, _execute and base config 96 | super(LVMVolumeDriver, self).__init__(*args, **kwargs) 97 | 98 | self.configuration.append_config_values(volume_opts) 99 | self.hostname = socket.gethostname() 100 | self.vg = vg_obj 101 | self.backend_name =\ 102 | self.configuration.safe_get('volume_backend_name') or 'LVM' 103 | 104 | # Target Driver is what handles data-transport 105 | # Transport specific code should NOT be in 106 | # the driver (control path), this way 107 | # different target drivers can be added (iscsi, FC etc) 108 | target_driver = \ 109 | self.target_mapping[self.configuration.safe_get('iscsi_helper')] 110 | 111 | LOG.debug('Attempting to initialize LVM driver with the ' 112 | 'following target_driver: %s', 113 | target_driver) 114 | 115 | self.target_driver = importutils.import_object( 116 | target_driver, 117 | configuration=self.configuration, 118 | db=self.db, 119 | executor=self._execute) 120 | self.protocol = self.target_driver.protocol 121 | 122 | def _sizestr(self, size_in_g): 123 | return '%sg' % size_in_g 124 | 125 | def _volume_not_present(self, volume_name): 126 | return self.vg.get_volume(volume_name) is None 127 | 128 | def _delete_volume(self, volume, is_snapshot=False): 129 | """Deletes a logical volume.""" 130 | if self.configuration.volume_clear != 'none' and \ 131 | self.configuration.lvm_type != 'thin': 132 | self._clear_volume(volume, is_snapshot) 133 | 134 | name = volume['name'] 135 | if is_snapshot: 136 | name = self._escape_snapshot(volume['name']) 137 | self.vg.delete(name) 138 | 139 | def _clear_volume(self, volume, is_snapshot=False): 140 | # zero out old volumes to prevent data leaking between users 141 | # TODO(ja): reclaiming space should be done lazy and low priority 142 | if is_snapshot: 143 | # if the volume to be cleared is a snapshot of another volume 144 | # we need to clear out the volume using the -cow instead of the 145 | # directly volume path. We need to skip this if we are using 146 | # thin provisioned LVs. 147 | # bug# lp1191812 148 | dev_path = self.local_path(volume) + "-cow" 149 | else: 150 | dev_path = self.local_path(volume) 151 | 152 | # TODO(jdg): Maybe we could optimize this for snaps by looking at 153 | # the cow table and only overwriting what's necessary? 154 | # for now we're still skipping on snaps due to hang issue 155 | if not os.path.exists(dev_path): 156 | msg = (_LE('Volume device file path %s does not exist.') 157 | % dev_path) 158 | LOG.error(msg) 159 | raise exception.VolumeBackendAPIException(data=msg) 160 | 161 | size_in_g = volume.get('volume_size') or volume.get('size') 162 | if size_in_g is None: 163 | msg = (_LE("Size for volume: %s not found, " 164 | "cannot secure delete.") % volume['id']) 165 | LOG.error(msg) 166 | raise exception.InvalidParameterValue(msg) 167 | 168 | # clear_volume expects sizes in MiB, we store integer GiB 169 | # be sure to convert before passing in 170 | vol_sz_in_meg = size_in_g * units.Ki 171 | 172 | volutils.clear_volume( 173 | vol_sz_in_meg, dev_path, 174 | volume_clear=self.configuration.volume_clear, 175 | volume_clear_size=self.configuration.volume_clear_size) 176 | 177 | def _escape_snapshot(self, snapshot_name): 178 | # Linux LVM reserves name that starts with snapshot, so that 179 | # such volume name can't be created. Mangle it. 180 | if not snapshot_name.startswith('snapshot'): 181 | return snapshot_name 182 | return '_' + snapshot_name 183 | 184 | def _create_volume(self, name, size, lvm_type, mirror_count, vg=None): 185 | vg_ref = self.vg 186 | if vg is not None: 187 | vg_ref = vg 188 | 189 | vg_ref.create_volume(name, size, lvm_type, mirror_count) 190 | 191 | def _update_volume_stats(self): 192 | """Retrieve stats info from volume group.""" 193 | 194 | LOG.debug(("Updating volume stats")) 195 | if self.vg is None: 196 | LOG.warning(_LW('Unable to update stats on non-initialized ' 197 | 'Volume Group: %s'), 198 | self.configuration.volume_group) 199 | return 200 | 201 | self.vg.update_volume_group_info() 202 | data = {} 203 | 204 | # Note(zhiteng): These information are driver/backend specific, 205 | # each driver may define these values in its own config options 206 | # or fetch from driver specific configuration file. 207 | data["volume_backend_name"] = self.backend_name 208 | data["vendor_name"] = 'Open Source' 209 | data["driver_version"] = self.VERSION 210 | data["storage_protocol"] = self.protocol 211 | data["pools"] = [] 212 | 213 | total_capacity = 0 214 | free_capacity = 0 215 | 216 | if self.configuration.lvm_mirrors > 0: 217 | total_capacity =\ 218 | self.vg.vg_mirror_size(self.configuration.lvm_mirrors) 219 | free_capacity =\ 220 | self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) 221 | provisioned_capacity = round( 222 | float(total_capacity) - float(free_capacity), 2) 223 | elif self.configuration.lvm_type == 'thin': 224 | total_capacity = self.vg.vg_thin_pool_size 225 | free_capacity = self.vg.vg_thin_pool_free_space 226 | provisioned_capacity = self.vg.vg_provisioned_capacity 227 | else: 228 | total_capacity = self.vg.vg_size 229 | free_capacity = self.vg.vg_free_space 230 | provisioned_capacity = round( 231 | float(total_capacity) - float(free_capacity), 2) 232 | 233 | location_info = \ 234 | ('LVMVolumeDriver:%(hostname)s:%(vg)s' 235 | ':%(lvm_type)s:%(lvm_mirrors)s' % 236 | {'hostname': self.hostname, 237 | 'vg': self.configuration.volume_group, 238 | 'lvm_type': self.configuration.lvm_type, 239 | 'lvm_mirrors': self.configuration.lvm_mirrors}) 240 | 241 | thin_enabled = self.configuration.lvm_type == 'thin' 242 | 243 | # Calculate the total volumes used by the VG group. 244 | # This includes volumes and snapshots. 245 | total_volumes = len(self.vg.get_volumes()) 246 | 247 | # Skip enabled_pools setting, treat the whole backend as one pool 248 | # XXX FIXME if multipool support is added to LVM driver. 249 | single_pool = {} 250 | single_pool.update(dict( 251 | pool_name=data["volume_backend_name"], 252 | total_capacity_gb=total_capacity, 253 | free_capacity_gb=free_capacity, 254 | reserved_percentage=self.configuration.reserved_percentage, 255 | location_info=location_info, 256 | QoS_support=False, 257 | provisioned_capacity_gb=provisioned_capacity, 258 | max_over_subscription_ratio=( 259 | self.configuration.max_over_subscription_ratio), 260 | thin_provisioning_support=thin_enabled, 261 | thick_provisioning_support=not thin_enabled, 262 | total_volumes=total_volumes, 263 | filter_function=self.get_filter_function(), 264 | goodness_function=self.get_goodness_function(), 265 | # [MRA] announce that this cinder-volume is capable. 266 | ioarb_sttype='ioarbiter', 267 | ioarb_cvtype='provisioned', 268 | ioarb_raidconf=self.configuration.ioarb_raidconf, 269 | ioarb_ndisk=self.configuration.ioarb_ndisk, 270 | total_iops_4k=self.configuration.ioarb_total_iops_4k, 271 | provisioned_iops_4k=self.get_provisioned_iops_4k() 272 | )) 273 | data["pools"].append(single_pool) 274 | 275 | self._stats = data 276 | 277 | def get_provisioned_iops_4k(self): 278 | """Calculating a provisioned IOPS.""" 279 | 280 | resv_fpath = ioarbresv.get_resv_filepath( 281 | '/dev/' + self.vg.vg_name.split('-')[-1]) 282 | data = ioarbresv.get_resv_info(resv_fpath) 283 | 284 | #LOG.debug('[MRA] resv-info: %(resv)s' % {'resv': data}) 285 | 286 | iops = 0 287 | for volid in data: 288 | for k, v in data[volid]: 289 | if k == 'miniops': 290 | iops = iops + int(v) 291 | 292 | return iops 293 | 294 | def check_for_setup_error(self): 295 | """Verify that requirements are in place to use LVM driver.""" 296 | if self.vg is None: 297 | root_helper = utils.get_root_helper() 298 | 299 | lvm_conf_file = self.configuration.lvm_conf_file 300 | if lvm_conf_file.lower() == 'none': 301 | lvm_conf_file = None 302 | 303 | try: 304 | self.vg = lvm.LVM(self.configuration.volume_group, 305 | root_helper, 306 | lvm_type=self.configuration.lvm_type, 307 | executor=self._execute, 308 | lvm_conf=lvm_conf_file) 309 | 310 | except brick_exception.VolumeGroupNotFound: 311 | message = (_("Volume Group %s does not exist") % 312 | self.configuration.volume_group) 313 | raise exception.VolumeBackendAPIException(data=message) 314 | 315 | vg_list = volutils.get_all_volume_groups( 316 | self.configuration.volume_group) 317 | vg_dict = \ 318 | (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next() 319 | if vg_dict is None: 320 | message = (_("Volume Group %s does not exist") % 321 | self.configuration.volume_group) 322 | raise exception.VolumeBackendAPIException(data=message) 323 | 324 | if self.configuration.lvm_type == 'thin': 325 | # Specific checks for using Thin provisioned LV's 326 | if not volutils.supports_thin_provisioning(): 327 | message = _("Thin provisioning not supported " 328 | "on this version of LVM.") 329 | raise exception.VolumeBackendAPIException(data=message) 330 | 331 | pool_name = "%s-pool" % self.configuration.volume_group 332 | if self.vg.get_volume(pool_name) is None: 333 | try: 334 | self.vg.create_thin_pool(pool_name) 335 | except processutils.ProcessExecutionError as exc: 336 | exception_message = (_("Failed to create thin pool, " 337 | "error message was: %s") 338 | % exc.stderr) 339 | raise exception.VolumeBackendAPIException( 340 | data=exception_message) 341 | 342 | def _retrieve_qos_info(self, ctxt, type_id): 343 | qosspec = {} 344 | volume_type = volume_types.get_volume_type(ctxt, type_id) 345 | qos_specs_id = volume_type.get('qos_specs_id') 346 | voltype = volume_type.get('extra_specs') 347 | 348 | if qos_specs_id is not None: 349 | qosspec = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] 350 | 351 | return voltype, qosspec 352 | 353 | def add_resv_info(self, volume): 354 | # [MRA] add an entry to reservation map. 355 | ctxt = context.get_admin_context() 356 | type_id = volume['volume_type_id'] 357 | if type_id is not None: 358 | voltype, qosspec = self._retrieve_qos_info(ctxt, type_id) 359 | 360 | # check request format. 361 | if not ioarbiter.STTYPE in qosspec.keys(): 362 | LOG.error('[MRA] invalid qos spec. [%(stype)s] field is missing.' 363 | % {'stype': ioarbiter.STTYPE}) 364 | raise 365 | else: 366 | # no volume type. 367 | qosspec = { ioarbiter.STTYPE: 'ioarb-unknown' } 368 | 369 | stspec = ioarbiter.translate_qosspec(qosspec) 370 | resv_fpath = ioarbresv.get_resv_filepath( 371 | '/dev/' + self.vg.vg_name.split('-')[-1]) 372 | ioarbresv.add_resv_info(resv_fpath, volume['id'], stspec) 373 | 374 | 375 | def create_volume(self, volume): 376 | """Creates a logical volume.""" 377 | mirror_count = 0 378 | if self.configuration.lvm_mirrors: 379 | mirror_count = self.configuration.lvm_mirrors 380 | 381 | self._create_volume(volume['name'], 382 | self._sizestr(volume['size']), 383 | self.configuration.lvm_type, 384 | mirror_count) 385 | 386 | # [MRA] for admission control. 387 | self.add_resv_info(volume) 388 | 389 | 390 | def create_volume_from_snapshot(self, volume, snapshot): 391 | """Creates a volume from a snapshot.""" 392 | self._create_volume(volume['name'], 393 | self._sizestr(volume['size']), 394 | self.configuration.lvm_type, 395 | self.configuration.lvm_mirrors) 396 | 397 | # Some configurations of LVM do not automatically activate 398 | # ThinLVM snapshot LVs. 399 | self.vg.activate_lv(snapshot['name'], is_snapshot=True) 400 | 401 | # copy_volume expects sizes in MiB, we store integer GiB 402 | # be sure to convert before passing in 403 | volutils.copy_volume(self.local_path(snapshot), 404 | self.local_path(volume), 405 | snapshot['volume_size'] * units.Ki, 406 | self.configuration.volume_dd_blocksize, 407 | execute=self._execute) 408 | 409 | def delete_volume(self, volume): 410 | """Deletes a logical volume.""" 411 | 412 | # NOTE(jdg): We don't need to explicitly call 413 | # remove export here because we already did it 414 | # in the manager before we got here. 415 | 416 | if self._volume_not_present(volume['name']): 417 | # If the volume isn't present, then don't attempt to delete 418 | return True 419 | 420 | if self.vg.lv_has_snapshot(volume['name']): 421 | LOG.error(_LE('Unabled to delete due to existing snapshot ' 422 | 'for volume: %s') % volume['name']) 423 | raise exception.VolumeIsBusy(volume_name=volume['name']) 424 | 425 | self._delete_volume(volume) 426 | 427 | # [MRA] remove it from the reservation map. 428 | resv_fpath = ioarbresv.get_resv_filepath( 429 | '/dev/' + self.vg.vg_name.split('-')[-1]) 430 | ioarbresv.delete_resv_info(resv_fpath, volume['id']) 431 | 432 | LOG.info(_LI('Successfully deleted volume: %s'), volume['id']) 433 | 434 | def create_snapshot(self, snapshot): 435 | """Creates a snapshot.""" 436 | 437 | self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), 438 | snapshot['volume_name'], 439 | self.configuration.lvm_type) 440 | 441 | def delete_snapshot(self, snapshot): 442 | """Deletes a snapshot.""" 443 | if self._volume_not_present(self._escape_snapshot(snapshot['name'])): 444 | # If the snapshot isn't present, then don't attempt to delete 445 | LOG.warning(_LW("snapshot: %s not found, " 446 | "skipping delete operations") % snapshot['name']) 447 | LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id']) 448 | return True 449 | 450 | # TODO(yamahata): zeroing out the whole snapshot triggers COW. 451 | # it's quite slow. 452 | self._delete_volume(snapshot, is_snapshot=True) 453 | 454 | def local_path(self, volume, vg=None): 455 | if vg is None: 456 | vg = self.configuration.volume_group 457 | # NOTE(vish): stops deprecation warning 458 | escaped_group = vg.replace('-', '--') 459 | escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') 460 | return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) 461 | 462 | def copy_image_to_volume(self, context, volume, image_service, image_id): 463 | """Fetch the image from image_service and write it to the volume.""" 464 | image_utils.fetch_to_raw(context, 465 | image_service, 466 | image_id, 467 | self.local_path(volume), 468 | self.configuration.volume_dd_blocksize, 469 | size=volume['size']) 470 | 471 | def copy_volume_to_image(self, context, volume, image_service, image_meta): 472 | """Copy the volume to the specified image.""" 473 | image_utils.upload_volume(context, 474 | image_service, 475 | image_meta, 476 | self.local_path(volume)) 477 | 478 | def create_cloned_volume(self, volume, src_vref): 479 | """Creates a clone of the specified volume.""" 480 | 481 | mirror_count = 0 482 | if self.configuration.lvm_mirrors: 483 | mirror_count = self.configuration.lvm_mirrors 484 | LOG.info(_LI('Creating clone of volume: %s') % src_vref['id']) 485 | volume_name = src_vref['name'] 486 | temp_id = 'tmp-snap-%s' % volume['id'] 487 | temp_snapshot = {'volume_name': volume_name, 488 | 'size': src_vref['size'], 489 | 'volume_size': src_vref['size'], 490 | 'name': 'clone-snap-%s' % volume['id'], 491 | 'id': temp_id} 492 | 493 | self.create_snapshot(temp_snapshot) 494 | 495 | # copy_volume expects sizes in MiB, we store integer GiB 496 | # be sure to convert before passing in 497 | try: 498 | self._create_volume(volume['name'], 499 | self._sizestr(volume['size']), 500 | self.configuration.lvm_type, 501 | mirror_count) 502 | 503 | self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True) 504 | volutils.copy_volume( 505 | self.local_path(temp_snapshot), 506 | self.local_path(volume), 507 | src_vref['size'] * units.Ki, 508 | self.configuration.volume_dd_blocksize, 509 | execute=self._execute) 510 | finally: 511 | self.delete_snapshot(temp_snapshot) 512 | 513 | def clone_image(self, context, volume, 514 | image_location, image_meta, 515 | image_service): 516 | return None, False 517 | 518 | def backup_volume(self, context, backup, backup_service): 519 | """Create a new backup from an existing volume.""" 520 | volume = self.db.volume_get(context, backup['volume_id']) 521 | volume_path = self.local_path(volume) 522 | with utils.temporary_chown(volume_path): 523 | with fileutils.file_open(volume_path) as volume_file: 524 | backup_service.backup(backup, volume_file) 525 | 526 | def restore_backup(self, context, backup, volume, backup_service): 527 | """Restore an existing backup to a new or existing volume.""" 528 | volume_path = self.local_path(volume) 529 | with utils.temporary_chown(volume_path): 530 | with fileutils.file_open(volume_path, 'wb') as volume_file: 531 | backup_service.restore(backup, volume['id'], volume_file) 532 | 533 | def get_volume_stats(self, refresh=False): 534 | """Get volume status. 535 | 536 | If 'refresh' is True, run update the stats first. 537 | """ 538 | 539 | if refresh: 540 | self._update_volume_stats() 541 | 542 | return self._stats 543 | 544 | def extend_volume(self, volume, new_size): 545 | """Extend an existing volume's size.""" 546 | self.vg.extend_volume(volume['name'], 547 | self._sizestr(new_size)) 548 | 549 | def manage_existing(self, volume, existing_ref): 550 | """Manages an existing LV. 551 | 552 | Renames the LV to match the expected name for the volume. 553 | Error checking done by manage_existing_get_size is not repeated. 554 | """ 555 | lv_name = existing_ref['source-name'] 556 | self.vg.get_volume(lv_name) 557 | 558 | # Attempt to rename the LV to match the OpenStack internal name. 559 | try: 560 | self.vg.rename_volume(lv_name, volume['name']) 561 | except processutils.ProcessExecutionError as exc: 562 | exception_message = (_("Failed to rename logical volume %(name)s, " 563 | "error message was: %(err_msg)s") 564 | % {'name': lv_name, 565 | 'err_msg': exc.stderr}) 566 | raise exception.VolumeBackendAPIException( 567 | data=exception_message) 568 | 569 | def manage_existing_get_size(self, volume, existing_ref): 570 | """Return size of an existing LV for manage_existing. 571 | 572 | existing_ref is a dictionary of the form: 573 | {'source-name': } 574 | """ 575 | 576 | # Check that the reference is valid 577 | if 'source-name' not in existing_ref: 578 | reason = _('Reference must contain source-name element.') 579 | raise exception.ManageExistingInvalidReference( 580 | existing_ref=existing_ref, reason=reason) 581 | lv_name = existing_ref['source-name'] 582 | lv = self.vg.get_volume(lv_name) 583 | 584 | # Raise an exception if we didn't find a suitable LV. 585 | if not lv: 586 | kwargs = {'existing_ref': lv_name, 587 | 'reason': 'Specified logical volume does not exist.'} 588 | raise exception.ManageExistingInvalidReference(**kwargs) 589 | 590 | # LV size is returned in gigabytes. Attempt to parse size as a float 591 | # and round up to the next integer. 592 | try: 593 | lv_size = int(math.ceil(float(lv['size']))) 594 | except ValueError: 595 | exception_message = (_("Failed to manage existing volume " 596 | "%(name)s, because reported size %(size)s " 597 | "was not a floating-point number.") 598 | % {'name': lv_name, 599 | 'size': lv['size']}) 600 | raise exception.VolumeBackendAPIException( 601 | data=exception_message) 602 | return lv_size 603 | 604 | def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): 605 | """Optimize the migration if the destination is on the same server. 606 | 607 | If the specified host is another back-end on the same server, and 608 | the volume is not attached, we can do the migration locally without 609 | going through iSCSI. 610 | """ 611 | 612 | false_ret = (False, None) 613 | if volume['status'] != 'available': 614 | return false_ret 615 | if 'location_info' not in host['capabilities']: 616 | return false_ret 617 | info = host['capabilities']['location_info'] 618 | try: 619 | (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ 620 | info.split(':') 621 | lvm_mirrors = int(lvm_mirrors) 622 | except ValueError: 623 | return false_ret 624 | if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): 625 | return false_ret 626 | 627 | if dest_vg != self.vg.vg_name: 628 | vg_list = volutils.get_all_volume_groups() 629 | try: 630 | (vg for vg in vg_list if vg['name'] == dest_vg).next() 631 | except StopIteration: 632 | message = (_LE("Destination Volume Group %s does not exist") % 633 | dest_vg) 634 | LOG.error(message) 635 | return false_ret 636 | 637 | helper = utils.get_root_helper() 638 | 639 | lvm_conf_file = self.configuration.lvm_conf_file 640 | if lvm_conf_file.lower() == 'none': 641 | lvm_conf_file = None 642 | 643 | dest_vg_ref = lvm.LVM(dest_vg, helper, 644 | lvm_type=lvm_type, 645 | executor=self._execute, 646 | lvm_conf=lvm_conf_file) 647 | 648 | self.remove_export(ctxt, volume) 649 | self._create_volume(volume['name'], 650 | self._sizestr(volume['size']), 651 | lvm_type, 652 | lvm_mirrors, 653 | dest_vg_ref) 654 | # copy_volume expects sizes in MiB, we store integer GiB 655 | # be sure to convert before passing in 656 | size_in_mb = int(volume['size']) * units.Ki 657 | volutils.copy_volume(self.local_path(volume), 658 | self.local_path(volume, vg=dest_vg), 659 | size_in_mb, 660 | self.configuration.volume_dd_blocksize, 661 | execute=self._execute) 662 | self._delete_volume(volume) 663 | model_update = self.create_export(ctxt, volume, vg=dest_vg) 664 | 665 | return (True, model_update) 666 | else: 667 | message = (_("Refusing to migrate volume ID: %(id)s. Please " 668 | "check your configuration because source and " 669 | "destination are the same Volume Group: %(name)s."), 670 | {'id': volume['id'], 'name': self.vg.vg_name}) 671 | LOG.exception(message) 672 | raise exception.VolumeBackendAPIException(data=message) 673 | 674 | def get_pool(self, volume): 675 | return self.backend_name 676 | 677 | # ####### Interface methods for DataPath (Target Driver) ######## 678 | 679 | def ensure_export(self, context, volume): 680 | volume_path = "/dev/%s/%s" % (self.configuration.volume_group, 681 | volume['name']) 682 | 683 | model_update = \ 684 | self.target_driver.ensure_export(context, volume, volume_path) 685 | return model_update 686 | 687 | def create_export(self, context, volume, vg=None): 688 | if vg is None: 689 | vg = self.configuration.volume_group 690 | 691 | volume_path = "/dev/%s/%s" % (vg, volume['name']) 692 | 693 | export_info = self.target_driver.create_export( 694 | context, 695 | volume, 696 | volume_path) 697 | return {'provider_location': export_info['location'], 698 | 'provider_auth': export_info['auth'], } 699 | 700 | def remove_export(self, context, volume): 701 | self.target_driver.remove_export(context, volume) 702 | 703 | def initialize_connection(self, volume, connector): 704 | return self.target_driver.initialize_connection(volume, connector) 705 | 706 | def validate_connector(self, connector): 707 | return self.target_driver.validate_connector(connector) 708 | 709 | def terminate_connection(self, volume, connector, **kwargs): 710 | return self.target_driver.terminate_connection(volume, connector, 711 | **kwargs) 712 | 713 | 714 | class LVMISCSIDriver(LVMVolumeDriver): 715 | """Empty class designation for LVMISCSI. 716 | 717 | Since we've decoupled the inheritance of iSCSI and LVM we 718 | don't really need this class any longer. We do however want 719 | to keep it (at least for now) for back compat in driver naming. 720 | 721 | """ 722 | def __init__(self, *args, **kwargs): 723 | super(LVMISCSIDriver, self).__init__(*args, **kwargs) 724 | LOG.warning(_LW('LVMISCSIDriver is deprecated, you should ' 725 | 'now just use LVMVolumeDriver and specify ' 726 | 'target_helper for the target driver you ' 727 | 'wish to use.')) 728 | 729 | 730 | class LVMISERDriver(LVMVolumeDriver): 731 | """Empty class designation for LVMISER. 732 | 733 | Since we've decoupled the inheritance of data path in LVM we 734 | don't really need this class any longer. We do however want 735 | to keep it (at least for now) for back compat in driver naming. 736 | 737 | """ 738 | def __init__(self, *args, **kwargs): 739 | super(LVMISERDriver, self).__init__(*args, **kwargs) 740 | 741 | LOG.warning(_LW('LVMISERDriver is deprecated, you should ' 742 | 'now just use LVMVolumeDriver and specify ' 743 | 'target_helper for the target driver you ' 744 | 'wish to use. In order to enable iser, please ' 745 | 'set iscsi_protocol with the value iser.')) 746 | 747 | LOG.debug('Attempting to initialize LVM driver with the ' 748 | 'following target_driver: ' 749 | 'cinder.volume.targets.iser.ISERTgtAdm') 750 | self.target_driver = importutils.import_object( 751 | 'cinder.volume.targets.iser.ISERTgtAdm', 752 | configuration=self.configuration, 753 | db=self.db, 754 | executor=self._execute) 755 | -------------------------------------------------------------------------------- /qosctrl/src/cinder/volume/drivers/ioarblvm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 AT&T Labs Research 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | # Author: Moo-Ryong Ra, mra@research.att.com 16 | # Disclaimer: This source file is a modified version of the lvm driver 17 | # in OpenStack Kilo sources. 18 | 19 | """ 20 | Driver for Linux servers running LVM. 21 | 22 | """ 23 | 24 | import math 25 | import os 26 | import socket 27 | import time 28 | 29 | from oslo_concurrency import processutils 30 | from oslo_config import cfg 31 | from oslo_log import log as logging 32 | from oslo_utils import importutils 33 | from oslo_utils import units 34 | 35 | from cinder import context 36 | from cinder.brick import exception as brick_exception 37 | from cinder.brick.local_dev import ioarblvm as lvm 38 | from cinder.brick.local_dev import ioarbcontainer as contutil 39 | from cinder import exception 40 | from cinder.i18n import _, _LE, _LI, _LW 41 | from cinder.image import image_utils 42 | from cinder.openstack.common import fileutils 43 | from cinder.common import ioarbparams as ioarbiter 44 | from cinder.common import ioarbresv as ioarbresv 45 | from cinder import utils 46 | 47 | from cinder.volume import driver 48 | from cinder.volume import utils as volutils 49 | from cinder.volume import qos_specs 50 | from cinder.volume import volume_types 51 | 52 | LOG = logging.getLogger(__name__) 53 | 54 | # FIXME(jdg): We'll put the lvm_ prefix back on these when we 55 | # move over to using this as the real LVM driver, for now we'll 56 | # rename them so that the config generation utility doesn't barf 57 | # on duplicate entries. 58 | volume_opts = [ 59 | cfg.StrOpt('volume_group', 60 | default='cinder-volumes', 61 | help='Name for the VG that will contain exported volumes'), 62 | cfg.IntOpt('lvm_mirrors', 63 | default=0, 64 | help='If >0, create LVs with multiple mirrors. Note that ' 65 | 'this requires lvm_mirrors + 2 PVs with available space'), 66 | cfg.StrOpt('lvm_type', 67 | default='default', 68 | choices=['default', 'thin'], 69 | help='Type of LVM volumes to deploy'), 70 | cfg.StrOpt('lvm_conf_file', 71 | default='/etc/cinder/lvm.conf', 72 | help='LVM conf file to use for the LVM driver in Cinder; ' 73 | 'this setting is ignored if the specified file does ' 74 | 'not exist (You can also specify \'None\' to not use ' 75 | 'a conf file even if one exists).'), 76 | cfg.IntOpt('reclaim_interval', 77 | default=300, 78 | help='How much you would wait before you reclaim block resources.'), 79 | cfg.StrOpt('physical_devices', 80 | default='auto', 81 | help='This setting contains a list of block devices that' 82 | 'ioarbiter backend will manage. ' 83 | 'Example: ' 84 | 'physical_devices = /dev/sdl,/dev/sdm,/dev/sdn ' 85 | 'physical_devices = auto ') 86 | ] 87 | 88 | CONF = cfg.CONF 89 | CONF.register_opts(volume_opts) 90 | 91 | 92 | class IOArbLVMVolumeDriver(driver.VolumeDriver): 93 | """Executes commands relating to Volumes.""" 94 | 95 | VERSION = '0.5.0' 96 | 97 | def __init__(self, vg_obj=None, *args, **kwargs): 98 | # Parent sets db, host, _execute and base config 99 | super(IOArbLVMVolumeDriver, self).__init__(*args, **kwargs) 100 | 101 | self.configuration.append_config_values(volume_opts) 102 | self.hostname = socket.gethostname() 103 | self.vg = vg_obj 104 | self.backend_name =\ 105 | self.configuration.safe_get('volume_backend_name') or 'LVM' 106 | 107 | # Target Driver is what handles data-transport 108 | # Transport specific code should NOT be in 109 | # the driver (control path), this way 110 | # different target drivers can be added (iscsi, FC etc) 111 | target_driver = \ 112 | self.target_mapping[self.configuration.safe_get('iscsi_helper')] 113 | 114 | LOG.debug('Attempting to initialize LVM driver with the ' 115 | 'following target_driver: %s', 116 | target_driver) 117 | 118 | self.target_driver = importutils.import_object( 119 | target_driver, 120 | configuration=self.configuration, 121 | db=self.db, 122 | executor=self._execute) 123 | self.protocol = self.target_driver.protocol 124 | 125 | # [MRA] ioarbiter specifics. 126 | self.ref_physical_devices = self.configuration.physical_devices 127 | self._update_available_physical_devices() 128 | 129 | self.raidstat = {} 130 | 131 | def _update_available_physical_devices(self): 132 | """Filter out already-in-use devices""" 133 | root_helper = utils.get_root_helper() 134 | old_devlist = self.ref_physical_devices.split(',') 135 | new_devlist = lvm.LVM.filter_blkdev_in_use(root_helper, old_devlist) 136 | LOG.debug('[MRA] orig: %(old)s, filtered: %(new)s' 137 | % {'old': old_devlist, 'new': new_devlist}) 138 | # put back with original format. 139 | self.configuration.physical_devices = ','.join('{1}'.format(*k) 140 | for k in enumerate(new_devlist)) 141 | return len(new_devlist) 142 | 143 | def _sizestr(self, size_in_g): 144 | return '%sg' % size_in_g 145 | 146 | def _volume_not_present(self, volume_name): 147 | return self.vg.get_volume(volume_name) is None 148 | 149 | def _delete_volume(self, volume, is_snapshot=False): 150 | """Deletes a logical volume.""" 151 | if self.configuration.volume_clear != 'none' and \ 152 | self.configuration.lvm_type != 'thin': 153 | self._clear_volume(volume, is_snapshot) 154 | 155 | name = volume['name'] 156 | if is_snapshot: 157 | name = self._escape_snapshot(volume['name']) 158 | self.vg.delete(name) 159 | 160 | def _clear_volume(self, volume, is_snapshot=False): 161 | # zero out old volumes to prevent data leaking between users 162 | # TODO(ja): reclaiming space should be done lazy and low priority 163 | if is_snapshot: 164 | # if the volume to be cleared is a snapshot of another volume 165 | # we need to clear out the volume using the -cow instead of the 166 | # directly volume path. We need to skip this if we are using 167 | # thin provisioned LVs. 168 | # bug# lp1191812 169 | dev_path = self.local_path(volume) + "-cow" 170 | else: 171 | dev_path = self.local_path(volume) 172 | 173 | # TODO(jdg): Maybe we could optimize this for snaps by looking at 174 | # the cow table and only overwriting what's necessary? 175 | # for now we're still skipping on snaps due to hang issue 176 | if not os.path.exists(dev_path): 177 | msg = (_LE('Volume device file path %s does not exist.') 178 | % dev_path) 179 | LOG.error(msg) 180 | raise exception.VolumeBackendAPIException(data=msg) 181 | 182 | size_in_g = volume.get('volume_size') or volume.get('size') 183 | if size_in_g is None: 184 | msg = (_LE("Size for volume: %s not found, " 185 | "cannot secure delete.") % volume['id']) 186 | LOG.error(msg) 187 | raise exception.InvalidParameterValue(msg) 188 | 189 | # clear_volume expects sizes in MiB, we store integer GiB 190 | # be sure to convert before passing in 191 | vol_sz_in_meg = size_in_g * units.Ki 192 | 193 | volutils.clear_volume( 194 | vol_sz_in_meg, dev_path, 195 | volume_clear=self.configuration.volume_clear, 196 | volume_clear_size=self.configuration.volume_clear_size) 197 | 198 | def _escape_snapshot(self, snapshot_name): 199 | # Linux LVM reserves name that starts with snapshot, so that 200 | # such volume name can't be created. Mangle it. 201 | if not snapshot_name.startswith('snapshot'): 202 | return snapshot_name 203 | return '_' + snapshot_name 204 | 205 | def _create_volume(self, name, size, lvm_type, mirror_count, vg=None, cmd_prefix=None): 206 | vg_ref = self.vg 207 | if vg is not None: 208 | vg_ref = vg 209 | 210 | vg_ref.create_volume(name, size, lvm_type, mirror_count, cmd_prefix=cmd_prefix) 211 | 212 | def _update_volume_stats(self): 213 | """Retrieve stats info from volume group.""" 214 | 215 | # [MRA] piggypack periodic tasks here. 216 | root_helper = utils.get_root_helper() 217 | self._reclaim_unused_storage() 218 | ndev = self._update_available_physical_devices() 219 | if ndev == 0: 220 | LOG.debug("[MRA] nothing to update. ndev=0") 221 | self._stats = {} 222 | #return 223 | 224 | LOG.debug(("Updating volume stats")) 225 | 226 | # if self.vg is None: 227 | # LOG.warning(_LW('Unable to update stats on non-initialized ' 228 | # 'Volume Group: %s'), 229 | # self.configuration.volume_group) 230 | # return 231 | # 232 | # self.vg.update_volume_group_info() 233 | data = {} 234 | 235 | # Note(zhiteng): These information are driver/backend specific, 236 | # each driver may define these values in its own config options 237 | # or fetch from driver specific configuration file. 238 | data["volume_backend_name"] = self.backend_name 239 | data["vendor_name"] = 'ATT Research' 240 | data["driver_version"] = self.VERSION 241 | data["storage_protocol"] = self.protocol 242 | data["pools"] = [] 243 | 244 | total_capacity = 0 245 | free_capacity = 0 246 | 247 | # if self.configuration.lvm_mirrors > 0: 248 | # total_capacity =\ 249 | # self.vg.vg_mirror_size(self.configuration.lvm_mirrors) 250 | # free_capacity =\ 251 | # self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) 252 | # provisioned_capacity = round( 253 | # float(total_capacity) - float(free_capacity), 2) 254 | # elif self.configuration.lvm_type == 'thin': 255 | # total_capacity = self.vg.vg_thin_pool_size 256 | # free_capacity = self.vg.vg_thin_pool_free_space 257 | # provisioned_capacity = self.vg.vg_provisioned_capacity 258 | # else: 259 | # total_capacity = self.vg.vg_size 260 | # free_capacity = self.vg.vg_free_space 261 | # provisioned_capacity = round( 262 | # float(total_capacity) - float(free_capacity), 2) 263 | 264 | if ndev == 0: 265 | devinfo = [] 266 | total_capacity = 0 267 | else: 268 | devinfo = lvm.LVM.get_blkdev_info( 269 | root_helper, self.configuration.physical_devices) 270 | total_capacity = sum(dev['size'] for dev in devinfo) 271 | 272 | free_capacity = total_capacity 273 | provisioned_capacity = round( 274 | float(total_capacity) - float(free_capacity), 2) 275 | 276 | location_info = \ 277 | ('IOArbLVMVolumeDriver:%(hostname)s:%(vg)s' 278 | ':%(lvm_type)s:%(lvm_mirrors)s' % 279 | {'hostname': self.hostname, 280 | 'vg': self.configuration.volume_group, 281 | 'lvm_type': self.configuration.lvm_type, 282 | 'lvm_mirrors': self.configuration.lvm_mirrors}) 283 | 284 | thin_enabled = self.configuration.lvm_type == 'thin' 285 | 286 | # Calculate the total volumes used by the VG group. 287 | # This includes volumes and snapshots. 288 | #total_volumes = len(self.vg.get_volumes()) 289 | total_volumes = 0 290 | 291 | # Skip enabled_pools setting, treat the whole backend as one pool 292 | # XXX FIXME if multipool support is added to LVM driver. 293 | single_pool = {} 294 | single_pool.update(dict( 295 | pool_name=data["volume_backend_name"], 296 | total_capacity_gb=total_capacity, 297 | free_capacity_gb=free_capacity, 298 | reserved_percentage=self.configuration.reserved_percentage, 299 | location_info=location_info, 300 | QoS_support=False, 301 | provisioned_capacity_gb=provisioned_capacity, 302 | max_over_subscription_ratio=( 303 | self.configuration.max_over_subscription_ratio), 304 | thin_provisioning_support=thin_enabled, 305 | thick_provisioning_support=not thin_enabled, 306 | total_volumes=total_volumes, 307 | filter_function=self.get_filter_function(), 308 | goodness_function=self.get_goodness_function(), 309 | # [MRA] announce that this cinder-volume is capable. 310 | ioarb_sttype='ioarbiter', 311 | ioarb_cvtype='host', 312 | ioarb_resource=devinfo 313 | )) 314 | data["pools"].append(single_pool) 315 | 316 | self._stats = data 317 | 318 | def _create_initial_vg(self, conf): 319 | """Create an initial volume group for ioarbiter backend. 320 | 321 | We will use the first block device specified in 'physical_devices' 322 | field as a starting volume group for cinder-volume service. 323 | we might need a better mechanism later. 324 | """ 325 | LOG.debug('[MRA] initial vg [%(dev)s]' % {'dev': conf.physical_devices}) 326 | vgname = conf.volume_group 327 | phydev = conf.physical_devices.split(',')[0] 328 | return vgname, [ phydev ] 329 | 330 | def check_for_setup_error(self): 331 | """Verify that requirements are in place to use LVM driver.""" 332 | 333 | # [MRA] we will not use this function. 334 | return 335 | 336 | if self.vg is None: 337 | root_helper = utils.get_root_helper() 338 | 339 | lvm_conf_file = self.configuration.lvm_conf_file 340 | if lvm_conf_file.lower() == 'none': 341 | lvm_conf_file = None 342 | 343 | # [MRA] Automatic redundancy control. 344 | vgname, phydev = self._create_initial_vg(self.configuration) 345 | 346 | try: 347 | self.vg = lvm.LVM(self.configuration.volume_group, 348 | root_helper, 349 | create_vg=True, 350 | physical_volumes=phydev, 351 | lvm_type=self.configuration.lvm_type, 352 | executor=self._execute, 353 | lvm_conf=lvm_conf_file) 354 | 355 | except brick_exception.VolumeGroupNotFound: 356 | message = (_("Volume Group %s does not exist") % 357 | self.configuration.volume_group) 358 | raise exception.VolumeBackendAPIException(data=message) 359 | 360 | vg_list = volutils.get_all_volume_groups( 361 | self.configuration.volume_group) 362 | vg_dict = \ 363 | (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next() 364 | if vg_dict is None: 365 | message = (_("Volume Group %s does not exist") % 366 | self.configuration.volume_group) 367 | raise exception.VolumeBackendAPIException(data=message) 368 | 369 | if self.configuration.lvm_type == 'thin': 370 | # Specific checks for using Thin provisioned LV's 371 | if not volutils.supports_thin_provisioning(): 372 | message = _("Thin provisioning not supported " 373 | "on this version of LVM.") 374 | raise exception.VolumeBackendAPIException(data=message) 375 | 376 | pool_name = "%s-pool" % self.configuration.volume_group 377 | if self.vg.get_volume(pool_name) is None: 378 | try: 379 | self.vg.create_thin_pool(pool_name) 380 | except processutils.ProcessExecutionError as exc: 381 | exception_message = (_("Failed to create thin pool, " 382 | "error message was: %s") 383 | % exc.stderr) 384 | raise exception.VolumeBackendAPIException( 385 | data=exception_message) 386 | 387 | def _reclaim_unused_storage(self): 388 | """If an empty array exists, reclaim it for future use.""" 389 | 390 | # get root_helper. 391 | root_helper = utils.get_root_helper() 392 | arraydevs = lvm.LVM.get_raid_arrays(root_helper) 393 | jboddevs = lvm.LVM.get_jbods_devs(root_helper, 394 | contutil._get_cont_vg_prefix()) 395 | arraydevs.extend(jboddevs) 396 | 397 | for arrdev in arraydevs: 398 | vgname = contutil._get_cont_vg_name(arrdev) 399 | cnt = lvm.LVM.get_lvcnt_by_vgname(root_helper, vgname) 400 | if cnt == 0: 401 | if (arrdev in self.raidstat and self.raidstat[arrdev][0] == 0): 402 | if (time.time() - self.raidstat[arrdev][1] > 403 | self.configuration.reclaim_interval): 404 | # reclaim it if it has been unused for more than 5 min. 405 | contutil.remove_cont_cinder_volume(root_helper, arrdev) 406 | lvm.LVM.remove_array(root_helper, 407 | arrdev, 408 | vgname=vgname) 409 | del self.raidstat[arrdev] 410 | LOG.debug('[MRA] array [%(arr)s] has been reclaimed' % {'arr': arrdev}) 411 | continue 412 | 413 | # update stat. 414 | self.raidstat[arrdev] = [cnt, time.time()] 415 | 416 | 417 | # [MRA] this function is copied from solidfire driver. 418 | def _retrieve_qos_info(self, ctxt, type_id): 419 | qosspec = {} 420 | volume_type = volume_types.get_volume_type(ctxt, type_id) 421 | qos_specs_id = volume_type.get('qos_specs_id') 422 | voltype = volume_type.get('extra_specs') 423 | 424 | if qos_specs_id is not None: 425 | qosspec = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] 426 | 427 | return voltype, qosspec 428 | 429 | 430 | def _fork_cinder_volume_service(self, blkdev, root_helper, stspec, volume): 431 | """Create (or retrieve) a container for cinder-volume service.""" 432 | 433 | LOG.debug('[MRA] entered _fork_cinder_volume_service()' 434 | ' with [%(blk)s]' % {'blk': blkdev}) 435 | 436 | # container naming. 437 | cont_name = contutil._get_container_name(blkdev) 438 | backend_name = contutil._get_cont_backend_name(blkdev) 439 | 440 | # create a cinder.conf for the container. 441 | config = contutil.create_cinder_conf_for_container(blkdev, 442 | stspec, None) 443 | 444 | # memo reservation info. 445 | resv_fpath = ioarbresv.get_resv_filepath(blkdev) 446 | ioarbresv.add_resv_info(resv_fpath, volume['id'], stspec) 447 | 448 | # create a container instance. 449 | config = contutil.check_container_is_running(config, root_helper) 450 | config['resv_info'] = resv_fpath 451 | if not 'container_id' in config: 452 | config = contutil.create_container_instance(config, root_helper) 453 | 454 | # configure container. 455 | config = contutil.configure_container_instance(config, root_helper) 456 | 457 | # restart daemons. 458 | svclist = ['tgt', 'cinder-volume'] 459 | contutil.restart_processes_in_container(config['container_name'] 460 | , svclist, root_helper) 461 | 462 | return config 463 | 464 | def create_volume(self, volume): 465 | """Creates a logical volume. 466 | [MRA] this function is extended to support dynamic 467 | RAID configuration. 468 | """ 469 | 470 | # ensure usable block devices. 471 | ndev = self._update_available_physical_devices() 472 | if ndev == 0: 473 | LOG.error('[MRA] no available block devices.') 474 | raise 475 | 476 | # variable initialization. 477 | root_helper = utils.get_root_helper() 478 | vtype = None 479 | qosspec = None 480 | 481 | mirror_count = 0 482 | if self.configuration.lvm_mirrors: 483 | mirror_count = self.configuration.lvm_mirrors 484 | 485 | ctxt = context.get_admin_context() 486 | type_id = volume['volume_type_id'] 487 | if type_id is not None: 488 | voltype, qosspec = self._retrieve_qos_info(ctxt, type_id) 489 | 490 | # check request format. 491 | if not ioarbiter.STTYPE in qosspec.keys(): 492 | LOG.error('[MRA] invalid qos spec. [%(stype)s] field is missing.' 493 | % {'stype': ioarbiter.STTYPE}) 494 | raise 495 | else: 496 | # no volume type. 497 | qosspec = { ioarbiter.STTYPE: "ioarb-unknown" } 498 | 499 | # qosspec translation. 500 | stspec = ioarbiter.translate_qosspec(qosspec) 501 | stspec['phydevs'] = self.configuration.physical_devices 502 | LOG.debug('[MRA] setup: %(spec)s' % {'spec': stspec}) 503 | 504 | # sanity check. 505 | if len(stspec['phydevs']) == 0: 506 | LOG.error('[MRA] no avaialble physical devices.') 507 | raise 508 | 509 | # software RAID configuration. new_raiddev looks like '/dev/md[n]' 510 | new_blkdev = lvm.LVM.create_software_raid(root_helper, stspec) 511 | new_vgname = contutil._get_cont_vg_name(new_blkdev) 512 | 513 | # logical volume creation. 514 | newvg = lvm.LVM(new_vgname, root_helper, 515 | create_vg=True, 516 | physical_volumes=[ new_blkdev ]) 517 | 518 | # invoke a container & update volume metadata. 519 | config = self._fork_cinder_volume_service( 520 | new_blkdev, root_helper, stspec, volume) 521 | cmd_prefix = contutil.get_cmdprefix_for_exec_in_cont(config) 522 | self._create_volume(volume['name'], 523 | self._sizestr(volume['size']), 524 | self.configuration.lvm_type, 525 | mirror_count, 526 | vg=newvg, 527 | cmd_prefix=cmd_prefix) 528 | 529 | # return new cinder-volume endpoint. 530 | newhost = (config['container_name'] + '@' + config['backend_name']) 531 | 532 | LOG.debug('[MRA] volume is created. newhost: [%(newhost)s]' 533 | % {'newhost': newhost}) 534 | 535 | return { 'host': newhost } 536 | 537 | 538 | def create_volume_from_snapshot(self, volume, snapshot): 539 | """Creates a volume from a snapshot.""" 540 | self._create_volume(volume['name'], 541 | self._sizestr(volume['size']), 542 | self.configuration.lvm_type, 543 | self.configuration.lvm_mirrors) 544 | 545 | # Some configurations of LVM do not automatically activate 546 | # ThinLVM snapshot LVs. 547 | self.vg.activate_lv(snapshot['name'], is_snapshot=True) 548 | 549 | # copy_volume expects sizes in MiB, we store integer GiB 550 | # be sure to convert before passing in 551 | volutils.copy_volume(self.local_path(snapshot), 552 | self.local_path(volume), 553 | snapshot['volume_size'] * units.Ki, 554 | self.configuration.volume_dd_blocksize, 555 | execute=self._execute) 556 | 557 | def delete_volume(self, volume): 558 | """Deletes a logical volume.""" 559 | 560 | # NOTE(jdg): We don't need to explicitly call 561 | # remove export here because we already did it 562 | # in the manager before we got here. 563 | 564 | if self._volume_not_present(volume['name']): 565 | # If the volume isn't present, then don't attempt to delete 566 | return True 567 | 568 | if self.vg.lv_has_snapshot(volume['name']): 569 | LOG.error(_LE('Unabled to delete due to existing snapshot ' 570 | 'for volume: %s') % volume['name']) 571 | raise exception.VolumeIsBusy(volume_name=volume['name']) 572 | 573 | self._delete_volume(volume) 574 | LOG.info(_LI('Successfully deleted volume: %s'), volume['id']) 575 | 576 | def create_snapshot(self, snapshot): 577 | """Creates a snapshot.""" 578 | 579 | self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), 580 | snapshot['volume_name'], 581 | self.configuration.lvm_type) 582 | 583 | def delete_snapshot(self, snapshot): 584 | """Deletes a snapshot.""" 585 | if self._volume_not_present(self._escape_snapshot(snapshot['name'])): 586 | # If the snapshot isn't present, then don't attempt to delete 587 | LOG.warning(_LW("snapshot: %s not found, " 588 | "skipping delete operations") % snapshot['name']) 589 | LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id']) 590 | return True 591 | 592 | # TODO(yamahata): zeroing out the whole snapshot triggers COW. 593 | # it's quite slow. 594 | self._delete_volume(snapshot, is_snapshot=True) 595 | 596 | def local_path(self, volume, vg=None): 597 | if vg is None: 598 | vg = self.configuration.volume_group 599 | # NOTE(vish): stops deprecation warning 600 | escaped_group = vg.replace('-', '--') 601 | escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') 602 | return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) 603 | 604 | def copy_image_to_volume(self, context, volume, image_service, image_id): 605 | """Fetch the image from image_service and write it to the volume.""" 606 | image_utils.fetch_to_raw(context, 607 | image_service, 608 | image_id, 609 | self.local_path(volume), 610 | self.configuration.volume_dd_blocksize, 611 | size=volume['size']) 612 | 613 | def copy_volume_to_image(self, context, volume, image_service, image_meta): 614 | """Copy the volume to the specified image.""" 615 | image_utils.upload_volume(context, 616 | image_service, 617 | image_meta, 618 | self.local_path(volume)) 619 | 620 | def create_cloned_volume(self, volume, src_vref): 621 | """Creates a clone of the specified volume.""" 622 | 623 | mirror_count = 0 624 | if self.configuration.lvm_mirrors: 625 | mirror_count = self.configuration.lvm_mirrors 626 | LOG.info(_LI('Creating clone of volume: %s') % src_vref['id']) 627 | volume_name = src_vref['name'] 628 | temp_id = 'tmp-snap-%s' % volume['id'] 629 | temp_snapshot = {'volume_name': volume_name, 630 | 'size': src_vref['size'], 631 | 'volume_size': src_vref['size'], 632 | 'name': 'clone-snap-%s' % volume['id'], 633 | 'id': temp_id} 634 | 635 | self.create_snapshot(temp_snapshot) 636 | 637 | # copy_volume expects sizes in MiB, we store integer GiB 638 | # be sure to convert before passing in 639 | try: 640 | self._create_volume(volume['name'], 641 | self._sizestr(volume['size']), 642 | self.configuration.lvm_type, 643 | mirror_count) 644 | 645 | self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True) 646 | volutils.copy_volume( 647 | self.local_path(temp_snapshot), 648 | self.local_path(volume), 649 | src_vref['size'] * units.Ki, 650 | self.configuration.volume_dd_blocksize, 651 | execute=self._execute) 652 | finally: 653 | self.delete_snapshot(temp_snapshot) 654 | 655 | def clone_image(self, context, volume, 656 | image_location, image_meta, 657 | image_service): 658 | return None, False 659 | 660 | def backup_volume(self, context, backup, backup_service): 661 | """Create a new backup from an existing volume.""" 662 | volume = self.db.volume_get(context, backup['volume_id']) 663 | volume_path = self.local_path(volume) 664 | with utils.temporary_chown(volume_path): 665 | with fileutils.file_open(volume_path) as volume_file: 666 | backup_service.backup(backup, volume_file) 667 | 668 | def restore_backup(self, context, backup, volume, backup_service): 669 | """Restore an existing backup to a new or existing volume.""" 670 | volume_path = self.local_path(volume) 671 | with utils.temporary_chown(volume_path): 672 | with fileutils.file_open(volume_path, 'wb') as volume_file: 673 | backup_service.restore(backup, volume['id'], volume_file) 674 | 675 | def get_volume_stats(self, refresh=False): 676 | """Get volume status. 677 | 678 | If 'refresh' is True, run update the stats first. 679 | """ 680 | 681 | if refresh: 682 | self._update_volume_stats() 683 | 684 | return self._stats 685 | 686 | def extend_volume(self, volume, new_size): 687 | """Extend an existing volume's size.""" 688 | self.vg.extend_volume(volume['name'], 689 | self._sizestr(new_size)) 690 | 691 | def manage_existing(self, volume, existing_ref): 692 | """Manages an existing LV. 693 | 694 | Renames the LV to match the expected name for the volume. 695 | Error checking done by manage_existing_get_size is not repeated. 696 | """ 697 | lv_name = existing_ref['source-name'] 698 | self.vg.get_volume(lv_name) 699 | 700 | # Attempt to rename the LV to match the OpenStack internal name. 701 | try: 702 | self.vg.rename_volume(lv_name, volume['name']) 703 | except processutils.ProcessExecutionError as exc: 704 | exception_message = (_("Failed to rename logical volume %(name)s, " 705 | "error message was: %(err_msg)s") 706 | % {'name': lv_name, 707 | 'err_msg': exc.stderr}) 708 | raise exception.VolumeBackendAPIException( 709 | data=exception_message) 710 | 711 | def manage_existing_get_size(self, volume, existing_ref): 712 | """Return size of an existing LV for manage_existing. 713 | 714 | existing_ref is a dictionary of the form: 715 | {'source-name': } 716 | """ 717 | 718 | # Check that the reference is valid 719 | if 'source-name' not in existing_ref: 720 | reason = _('Reference must contain source-name element.') 721 | raise exception.ManageExistingInvalidReference( 722 | existing_ref=existing_ref, reason=reason) 723 | lv_name = existing_ref['source-name'] 724 | lv = self.vg.get_volume(lv_name) 725 | 726 | # Raise an exception if we didn't find a suitable LV. 727 | if not lv: 728 | kwargs = {'existing_ref': lv_name, 729 | 'reason': 'Specified logical volume does not exist.'} 730 | raise exception.ManageExistingInvalidReference(**kwargs) 731 | 732 | # LV size is returned in gigabytes. Attempt to parse size as a float 733 | # and round up to the next integer. 734 | try: 735 | lv_size = int(math.ceil(float(lv['size']))) 736 | except ValueError: 737 | exception_message = (_("Failed to manage existing volume " 738 | "%(name)s, because reported size %(size)s " 739 | "was not a floating-point number.") 740 | % {'name': lv_name, 741 | 'size': lv['size']}) 742 | raise exception.VolumeBackendAPIException( 743 | data=exception_message) 744 | return lv_size 745 | 746 | def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): 747 | """Optimize the migration if the destination is on the same server. 748 | 749 | If the specified host is another back-end on the same server, and 750 | the volume is not attached, we can do the migration locally without 751 | going through iSCSI. 752 | """ 753 | 754 | false_ret = (False, None) 755 | if volume['status'] != 'available': 756 | return false_ret 757 | if 'location_info' not in host['capabilities']: 758 | return false_ret 759 | info = host['capabilities']['location_info'] 760 | try: 761 | (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ 762 | info.split(':') 763 | lvm_mirrors = int(lvm_mirrors) 764 | except ValueError: 765 | return false_ret 766 | if (dest_type != 'IOArbLVMVolumeDriver' or dest_hostname != self.hostname): 767 | return false_ret 768 | 769 | if dest_vg != self.vg.vg_name: 770 | vg_list = volutils.get_all_volume_groups() 771 | try: 772 | (vg for vg in vg_list if vg['name'] == dest_vg).next() 773 | except StopIteration: 774 | message = (_LE("Destination Volume Group %s does not exist") % 775 | dest_vg) 776 | LOG.error(message) 777 | return false_ret 778 | 779 | helper = utils.get_root_helper() 780 | 781 | lvm_conf_file = self.configuration.lvm_conf_file 782 | if lvm_conf_file.lower() == 'none': 783 | lvm_conf_file = None 784 | 785 | dest_vg_ref = lvm.LVM(dest_vg, helper, 786 | lvm_type=lvm_type, 787 | executor=self._execute, 788 | lvm_conf=lvm_conf_file) 789 | 790 | self.remove_export(ctxt, volume) 791 | self._create_volume(volume['name'], 792 | self._sizestr(volume['size']), 793 | lvm_type, 794 | lvm_mirrors, 795 | dest_vg_ref) 796 | 797 | volutils.copy_volume(self.local_path(volume), 798 | self.local_path(volume, vg=dest_vg), 799 | volume['size'], 800 | self.configuration.volume_dd_blocksize, 801 | execute=self._execute) 802 | self._delete_volume(volume) 803 | model_update = self.create_export(ctxt, volume, vg=dest_vg) 804 | 805 | return (True, model_update) 806 | else: 807 | message = (_("Refusing to migrate volume ID: %(id)s. Please " 808 | "check your configuration because source and " 809 | "destination are the same Volume Group: %(name)s."), 810 | {'id': volume['id'], 'name': self.vg.vg_name}) 811 | LOG.exception(message) 812 | raise exception.VolumeBackendAPIException(data=message) 813 | 814 | def get_pool(self, volume): 815 | return self.backend_name 816 | 817 | # ####### Interface methods for DataPath (Target Driver) ######## 818 | 819 | def ensure_export(self, context, volume): 820 | volume_path = "/dev/%s/%s" % (self.configuration.volume_group, 821 | volume['name']) 822 | 823 | model_update = \ 824 | self.target_driver.ensure_export(context, volume, volume_path) 825 | return model_update 826 | 827 | def create_export(self, context, volume, vg=None): 828 | if vg is None: 829 | vg = self.configuration.volume_group 830 | 831 | volume_path = "/dev/%s/%s" % (vg, volume['name']) 832 | 833 | export_info = self.target_driver.create_export( 834 | context, 835 | volume, 836 | volume_path) 837 | return {'provider_location': export_info['location'], 838 | 'provider_auth': export_info['auth'], } 839 | 840 | def remove_export(self, context, volume): 841 | self.target_driver.remove_export(context, volume) 842 | 843 | def initialize_connection(self, volume, connector): 844 | return self.target_driver.initialize_connection(volume, connector) 845 | 846 | def validate_connector(self, connector): 847 | return self.target_driver.validate_connector(connector) 848 | 849 | def terminate_connection(self, volume, connector, **kwargs): 850 | return self.target_driver.terminate_connection(volume, connector, 851 | **kwargs) 852 | 853 | 854 | class IOArbLVMISCSIDriver(IOArbLVMVolumeDriver): 855 | """Empty class designation for LVMISCSI. 856 | 857 | Since we've decoupled the inheritance of iSCSI and LVM we 858 | don't really need this class any longer. We do however want 859 | to keep it (at least for now) for back compat in driver naming. 860 | 861 | """ 862 | def __init__(self, *args, **kwargs): 863 | super(IOArbLVMISCSIDriver, self).__init__(*args, **kwargs) 864 | LOG.warning(_LW('IOArbLVMISCSIDriver is deprecated, you should ' 865 | 'now just use IOArbLVMVolumeDriver and specify ' 866 | 'target_helper for the target driver you ' 867 | 'wish to use.')) 868 | 869 | 870 | class IOArbLVMISERDriver(IOArbLVMVolumeDriver): 871 | """Empty class designation for LVMISER. 872 | 873 | Since we've decoupled the inheritance of data path in LVM we 874 | don't really need this class any longer. We do however want 875 | to keep it (at least for now) for back compat in driver naming. 876 | 877 | """ 878 | def __init__(self, *args, **kwargs): 879 | super(IOArbLVMISERDriver, self).__init__(*args, **kwargs) 880 | 881 | LOG.warning(_LW('IOArbLVMISERDriver is deprecated, you should ' 882 | 'now just use IOArbLVMVolumeDriver and specify ' 883 | 'target_helper for the target driver you ' 884 | 'wish to use. In order to enable iser, please ' 885 | 'set iscsi_protocol with the value iser.')) 886 | 887 | LOG.debug('Attempting to initialize LVM driver with the ' 888 | 'following target_driver: ' 889 | 'cinder.volume.targets.iser.ISERTgtAdm') 890 | self.target_driver = importutils.import_object( 891 | 'cinder.volume.targets.iser.ISERTgtAdm', 892 | configuration=self.configuration, 893 | db=self.db, 894 | executor=self._execute) 895 | --------------------------------------------------------------------------------