├── .env ├── .github └── workflows │ └── continuous-integration.yml ├── .gitignore ├── LICENSE ├── README.md ├── Vagrantfile ├── ci.py ├── docker-compose-dev-mailserver.yml ├── docker-compose-dev.yml ├── docker-compose-log.yml ├── docker-compose-metric.yml ├── elasticsearch-init ├── Dockerfile ├── README.md ├── build.yml ├── elasticsearch-templates │ └── logs.template ├── upload.sh └── wait-for.sh ├── elasticsearch ├── Dockerfile ├── build.yml ├── docker-entrypoint.sh ├── elasticsearch.yml ├── jvm.options └── log4j2.properties ├── grafana-init ├── Dockerfile ├── README.md ├── build.yml ├── dashboards.d │ ├── 07-nodes.json │ ├── 08-openstack.json │ ├── 09-openstack_vm.json │ └── 10-openstack_hypervisor.json └── grafana.py └── grafana ├── Dockerfile ├── README.md ├── build.yml ├── drilldown.js ├── grafana.ini.j2 ├── start.sh └── template.py /.env: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # This file prepares the environment for a Monasca server integrated with an 3 | # OpenStack Keystone service or a standalone Keystone service. 4 | # 5 | # Related docker-compose files: 6 | # - docker-compose-dev.yml (standalone Keystone service with own MySQL 7 | # - docker-compose-metric.yml (metric pipeline) 8 | # - docker-compose-log.yml (as extension: log pipeline) 9 | # 10 | # Before you start: 11 | # - make sure that the OpenStack Keystone service is up and running 12 | # - provide the necessary configuration in this file and replace the 13 | # placeholders "<...>" with the correct settings (see below) 14 | # 15 | # If you use the OpenStack Keystone service: 16 | # * configure the IPv4 address (MON_KEYSTONE_URL) for the OpenStack Keystone host 17 | # * make sure that Kibana authorization is enabled (MON_MONASCA_PLUGIN_ENABLED) 18 | # * set the path to mount Kibana to the OpenStack Horizon proxy (MON_BASE_PATH) 19 | # * adapt the Grafana credentials for grafana-admin and grafana-user 20 | # (see MON_GRAFANA_*) 21 | # * adapt the user credentials for agent and admin to your 22 | # OpenStack Keystone installation (MON_AGENT_*) 23 | # * set the path for the data directories (MON_DOCKER_VOL_ROOT) 24 | # * set the path for the backup directories (MON_BACKUP_DIR) 25 | # * configure data retention for the Elasticsearch and InfluxDB databases 26 | # * configure the Notification Engine plugins 27 | # 28 | # If you use the standalone Keystone service: 29 | # * configure the IPv4 addresses (MON_KEYSTONE_URL and MON_KEYSTONE_IP_ADDR) 30 | # of the Keystone host 31 | # * configure the Monasca Endpoint URL in Keystone (MON_MONASCA_API_URL) 32 | # * leave the default values for the rest of the configuration parameters 33 | ################################################################################ 34 | 35 | ################################################################################ 36 | # Set the IPv4 address of the OpenStack Keystone host and the Monasca endpoint URL 37 | ################################################################################ 38 | MON_KEYSTONE_URL=http://192.168.188.110:5000 39 | MON_KEYSTONE_IP_ADDR=192.168.188.110 40 | 41 | # Set the Monasca Endpoint URL in Keystone 42 | MON_MONASCA_API_URL=http://192.168.188.110:8070/v2.0 43 | 44 | ################################################################################ 45 | # Specify the URL of the OpenStack Horizon host 46 | # The URL is needed for setting the Monasca data source in Grafana 47 | ################################################################################ 48 | HORIZON_URL=http://192.168.188.110:80 49 | HORIZON_PATH=/ 50 | 51 | ################################################################################ 52 | # Horizon <-> Grafana integration 53 | ################################################################################ 54 | MON_GRAFANA_IP=192.168.188.110 55 | MON_GRAFANA_PORT=3000 56 | 57 | ################################################################################ 58 | # Enable Kibana authorization via OpenStack Horizon 59 | ################################################################################ 60 | MON_MONASCA_PLUGIN_ENABLED=True 61 | 62 | ################################################################################ 63 | # Set the path to mount Kibana to the OpenStack Horizon proxy 64 | ################################################################################ 65 | MON_BASE_PATH=/monitoring/logs_proxy 66 | 67 | ################################################################################ 68 | # Define Grafana administrator settings 69 | ################################################################################ 70 | MON_GRAFANA_ADMIN_USER=grafana-admin 71 | MON_GRAFANA_ADMIN_PASSWORD=admin 72 | 73 | ################################################################################ 74 | # Set the OpenStack Keystone credentials 75 | ################################################################################ 76 | # Credentials of the user used for authenticating the agents against Keystone 77 | MON_AGENT_USERNAME=monasca-agent 78 | MON_AGENT_PASSWORD=password 79 | MON_AGENT_PROJECT_NAME=mini-mon 80 | 81 | # Credentials of the OpenStack admin 82 | MON_KEYSTONE_ADMIN_USER=admin 83 | MON_KEYSTONE_ADMIN_PASSWORD=secretadmin 84 | 85 | ################################################################################ 86 | # Set the path for the data directories of Elasticsearch, InfluxDB, MySQL, 87 | # Kafka, and Grafana 88 | ################################################################################ 89 | MON_DOCKER_VOL_ROOT=/opt/monasca-containers 90 | 91 | ################################################################################ 92 | # Set the path for the backup directories of Elasticsearch and InfluxDB 93 | ################################################################################ 94 | MON_BACKUP_DIR=/mount/backup 95 | 96 | ################################################################################ 97 | # Configure data retention 98 | ################################################################################ 99 | # Retention period for Elasticsearch database 100 | # Delete job is executed every day at 12 a.m. UTC 101 | MON_ELASTICSEARCH_DATA_RETENTION_DAYS=2 102 | 103 | # Retention period for InfluxDB database 104 | MON_INFLUXDB_RETENTION=2d 105 | 106 | # Interval in hours to check the max amount of transactional-logs and snapshots 107 | MON_ZK_PURGE_INTERVAL=12 108 | 109 | ################################################################################ 110 | # Configure Elasticsearch heap size 111 | # - For a 2GB heap size use: -Xms2g -Xmx2g 112 | # - For a 512MB heap size use: -Xms512m -Xmx512m 113 | # - https://www.elastic.co/guide/en/elasticsearch/reference/7.3/heap-size.html 114 | ################################################################################ 115 | MON_ELASTICSEARCH_HEAP_SIZE=-Xms1g -Xmx1g 116 | 117 | ################################################################################ 118 | # Enable the Notification Engine plugins 119 | # - Available plugins: email, webhook, pagerduty, hipchat, and slack 120 | # - Specify the names of the plugins to be enabled as comma-separated list 121 | # for the NF_PLUGINS parameter 122 | # - Specify the plugin-specific configuration parameters 123 | ################################################################################ 124 | NF_PLUGINS=webhook,email 125 | 126 | # Configure the Email plugin 127 | # The host name or IP address of the SMTP mail server. 128 | NF_EMAIL_SERVER=mailhog 129 | # The port number of the SMTP mail server. Default port number: 25. 130 | NF_EMAIL_PORT=1025 131 | # Optional. The name of a user to be used for authentication against the 132 | # SMTP mail system. 133 | NF_EMAIL_USER=notification 134 | # Password of the user specified in NF_EMAIL_USER. 135 | NF_EMAIL_PASSWORD=mail-password 136 | # Email address from which to send the emails. Example: name@example.com 137 | NF_EMAIL_FROM_ADDR=alarm@notification-cmm.com 138 | # Grafana URL. 139 | # The URL of Grafana. It will be included in the mail message. 140 | NF_EMAIL_GRAFANA_URL=http://192.168.188.110/grafana 141 | 142 | # Configure the WebHook plugin 143 | # Timeout period in seconds the notification engine tries to call a WebHook 144 | # when an alarm is triggered. Default: 5 145 | NF_WEBHOOK_TIMEOUT=5 146 | 147 | # Configure the PagerDuty plugin 148 | # Timeout period in seconds the notification engine tries to call PagerDuty 149 | # when an alarm is triggered. Default: 5 150 | NF_PAGERDUTY_TIMEOUT=5 151 | 152 | # Configure the Slack plugin 153 | # Timeout period in seconds the notification engine tries to call Slack 154 | # when an alarm is triggered. Default: 5 155 | NF_SLACK_TIMEOUT=5 156 | # Path to the SSL certificates. By default, the system certificates are used. 157 | NF_SLACK_CERTS= 158 | # If set to false, the SSL certificates are verified. 159 | NF_SLACK_INSECURE= 160 | # Optional. IP address and port of the HTTP(S) proxy server to be used for sending 161 | # notifications. Example: https://12.12.12.20:2222 162 | NF_SLACK_PROXY= 163 | 164 | ################################################################################ 165 | # Image versions 166 | ################################################################################ 167 | 168 | # Metric pipeline 169 | INFLUXDB_VERSION=1.8-alpine 170 | INFLUXDB_INIT_VERSION=2.1.0 171 | 172 | MYSQL_VERSION=5.7 173 | MYSQL_INIT_VERSION=2.0.14-1 174 | 175 | MEMCACHED_VERSION=1.5-alpine 176 | CADVISOR_VERSION=v0.33.0 177 | ZOOKEEPER_VERSION=3.4.14 178 | 179 | MON_KAFKA_VERSION=2.12-2.0.1-0.0.2 180 | MON_KAFKA_INIT_VERSION=2.0.0 181 | MON_GRAFANA_VERSION=7.4.3-master 182 | MON_GRAFANA_INIT_VERSION=master 183 | 184 | MON_API_VERSION=master 185 | MON_PERSISTER_VERSION=master 186 | MON_THRESH_VERSION=master 187 | MON_NOTIFICATION_VERSION=master 188 | MON_STATSD_VERSION=master 189 | MON_AGENT_FORWARDER_VERSION=master 190 | MON_AGENT_COLLECTOR_VERSION=master 191 | 192 | # Log pipeline 193 | MON_LOG_METRICS_VERSION=2.1.1 194 | MON_LOG_PERSISTER_VERSION=2.1.3 195 | MON_LOG_TRANSFORMER_VERSION=2.1.1 196 | MON_ELASTICSEARCH_VERSION=7.3.0-master 197 | MON_ELASTICSEARCH_INIT_VERSION=master 198 | MON_ELASTICSEARCH_CURATOR_VERSION=5.8.3-0.0.1 199 | MON_LOG_API_VERSION=master 200 | MON_KIBANA_VERSION=2.0.14-1 201 | MON_LOG_AGENT_VERSION=2.0.14-1 202 | MON_LOGSPOUT_VERSION=2.1.3 203 | -------------------------------------------------------------------------------- /.github/workflows/continuous-integration.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | metrics-pipeline: 11 | runs-on: ubuntu-18.04 12 | steps: 13 | - run: echo "Event ${{ github.event_name }}" 14 | - run: echo "Runner ${{ runner.os }}" 15 | 16 | - name: Install dependencies 17 | run: | 18 | python -m pip install --upgrade pip 19 | pip install git+https://github.com/monasca/dbuild.git 20 | 21 | - name: Check out repository code 22 | uses: actions/checkout@v2 23 | with: 24 | fetch-depth: 0 25 | 26 | - name: Set ENV variables 27 | run: | 28 | echo "CI_EVENT_TYPE=${{ github.event_name }}" >> $GITHUB_ENV 29 | if [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then 30 | echo "CI_BRANCH=${{ github.head_ref }}" >> $GITHUB_ENV 31 | echo "CI_COMMIT_RANGE=${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}" >> $GITHUB_ENV; 32 | else 33 | echo "CI_BRANCH=${CI_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = "/" } ; { print $3 }')}" >> $GITHUB_ENV 34 | echo "CI_COMMIT_RANGE=${{ github.event.before }}..${{ github.sha }}" >> $GITHUB_ENV; 35 | fi 36 | 37 | - name: Get IP Address from VM eth0 38 | run: | 39 | echo "if_ipaddr=$(ifconfig eth0 | awk '{ print $2}' | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}")" >> $GITHUB_ENV 40 | 41 | - name: Set IP Address in .env 42 | run: | 43 | sed -i -e "s/\([0-9]\{1,3\}\.\)\{3\}[0-9]\{1,3\}/$if_ipaddr/g" ".env" 44 | 45 | - name: Show .env 46 | run: | 47 | cat .env 48 | 49 | - name: Execute ci.py script 50 | env: 51 | DOCKER_HUB_USERNAME: chaconpiza 52 | DOCKER_HUB_PASSWORD: ${{ secrets.DOCKER_HUB_PASSWORD }} 53 | run: | 54 | python ci.py --pipeline metrics --print-logs --verbose 55 | 56 | - run: echo "This job's status is ${{ job.status }}" 57 | 58 | logs-pipeline: 59 | runs-on: ubuntu-18.04 60 | steps: 61 | - run: echo "Event ${{ github.event_name }}" 62 | - run: echo "Runner ${{ runner.os }}" 63 | 64 | - name: Install dependencies 65 | run: | 66 | python -m pip install --upgrade pip 67 | pip install git+https://github.com/monasca/dbuild.git 68 | 69 | - name: Check out repository code 70 | uses: actions/checkout@v2 71 | with: 72 | fetch-depth: 0 73 | 74 | - name: Set ENV variables 75 | run: | 76 | echo "CI_EVENT_TYPE=${{ github.event_name }}" >> $GITHUB_ENV 77 | if [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then 78 | echo "CI_BRANCH=${{ github.head_ref }}" >> $GITHUB_ENV 79 | echo "CI_COMMIT_RANGE=${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}" >> $GITHUB_ENV; 80 | else 81 | echo "CI_BRANCH=${CI_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = "/" } ; { print $3 }')}" >> $GITHUB_ENV 82 | echo "CI_COMMIT_RANGE=${{ github.event.before }}..${{ github.sha }}" >> $GITHUB_ENV; 83 | fi 84 | 85 | - name: Get IP Address from VM eth0 86 | run: | 87 | echo "if_ipaddr=$(ifconfig eth0 | awk '{ print $2}' | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}")" >> $GITHUB_ENV 88 | 89 | - name: Set IP Address in .env 90 | run: | 91 | sed -i -e "s/\([0-9]\{1,3\}\.\)\{3\}[0-9]\{1,3\}/$if_ipaddr/g" ".env" 92 | 93 | - name: Show .env 94 | run: | 95 | cat .env 96 | 97 | - name: Execute ci.py script 98 | env: 99 | DOCKER_HUB_USERNAME: chaconpiza 100 | DOCKER_HUB_PASSWORD: ${{ secrets.DOCKER_HUB_PASSWORD }} 101 | run: | 102 | python ci.py --pipeline logs --print-logs --verbose 103 | 104 | - run: echo "This job's status is ${{ job.status }}" 105 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | */host_vars/localhost 2 | *.pyc 3 | api/truststore.jks 4 | jenkins_jobs.ini 5 | ci/jobs/jjb 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker/docker-compose files for Monasca 2 | 3 | This repository contains resources for building and deploying a full Monasca 4 | stack in Docker and environment. 5 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # This file can be used to simulate Travis VM locally using Vagrant 2 | # It requires the vagrant-docker-compose plugin that can be installed with: 3 | # $ vagrant plugin install vagrant-docker-compose 4 | 5 | # -*- mode: ruby -*- 6 | # vi: set ft=ruby : 7 | 8 | Vagrant.configure(2) do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | # config.vm.box = "ubuntu/bionic64" #10GB 16 | config.vm.box = "bento/ubuntu-18.04" 17 | config.vm.hostname = "monasca" 18 | # access a port on your host machine (via localhost) and have all data forwarded to a port on the guest machine. 19 | config.vm.network "forwarded_port", guest: 9092, host: 9092 20 | # Create a private network, which allows host-only access to the machine 21 | # using a specific IP. 22 | config.vm.network "private_network", ip: "192.168.188.110" 23 | 24 | config.vm.provider "virtualbox" do |vb| 25 | vb.name = 'Monasca' 26 | vb.memory = 8200 27 | vb.cpus = 2 28 | #vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 29 | #vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 30 | end 31 | 32 | # set up Docker in the new VM: 33 | config.vm.provision :docker 34 | config.vm.provision :docker_compose 35 | 36 | config.vm.provision "shell", privileged: false, inline: <<-SHELL 37 | # sudo apt-get update 38 | # sudo apt-get -y upgrade 39 | sudo apt install -y python-pip 40 | sudo pip install pip --upgrade 41 | sudo pip install git+https://github.com/monasca/dbuild.git 42 | sudo pip install "six>=1.13.0" 43 | sudo apt-get -y install git 44 | # Change to the branch you want to test 45 | git clone https://github.com/monasca/monasca-docker.git -b master 46 | cd monasca-docker 47 | export CI_EVENT_TYPE="pull_request" 48 | # Choose the pipeline you want to test (metrics XOR logs): 49 | python ci.py --pipeline metrics --verbose 50 | # python ci.py --pipeline logs --verbose 51 | SHELL 52 | 53 | end 54 | -------------------------------------------------------------------------------- /ci.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import argparse 16 | import datetime 17 | import gzip 18 | import json 19 | import logging 20 | import os 21 | import re 22 | import shutil 23 | import signal 24 | import six 25 | import subprocess 26 | import sys 27 | import time 28 | import yaml 29 | 30 | 31 | parser = argparse.ArgumentParser(description='CI command') 32 | parser.add_argument('-p', '--pipeline', dest='pipeline', default=None, required=True, 33 | help='Select the pipeline [metrics|logs]') 34 | parser.add_argument('-nv', '--non-voting', dest='non_voting', action='store_true', 35 | help='Set the check as non-voting') 36 | parser.add_argument('-pl', '--print-logs', dest='printlogs', action='store_true', 37 | help='Print containers logs') 38 | parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', 39 | help='Increment verbosity') 40 | parser.add_argument('--CI_BRANCH', dest='ci_branch', default=None, required=False, 41 | help='') 42 | parser.add_argument('--CI_EVENT_TYPE', dest='ci_event_type', default=None, required=False, 43 | help='') 44 | parser.add_argument('--CI_COMMIT_RANGE', dest='ci_commit_range', default=None, required=False, 45 | help='') 46 | args = parser.parse_args() 47 | 48 | pipeline = args.pipeline 49 | non_voting = args.non_voting 50 | printlogs = args.printlogs 51 | verbose = args.verbose 52 | ci_branch = args.ci_branch if args.ci_branch else os.environ.get('CI_BRANCH', None) 53 | ci_event_type = args.ci_event_type if args.ci_event_type else os.environ.get('CI_EVENT_TYPE', None) 54 | ci_commit_range = args.ci_commit_range if args.ci_commit_range else os.environ.get('CI_COMMIT_RANGE', None) 55 | 56 | logging.basicConfig(format = '%(asctime)s %(levelname)5.5s %(message)s') 57 | LOG=logging.getLogger(__name__) 58 | verbose = args.verbose 59 | LOG.setLevel(logging.DEBUG) if verbose else LOG.setLevel(logging.INFO) 60 | LOG.debug(args) 61 | 62 | #TAG_REGEX = re.compile(r'^!(\w+)(?:\s+([\w-]+))?$') 63 | TAG_REGEX = re.compile(r'^!(build|push|readme)(?:\s([\w-]+))$') 64 | 65 | METRIC_PIPELINE_MARKER = 'metrics' 66 | LOG_PIPELINE_MARKER = 'logs' 67 | 68 | TEMPEST_TIMEOUT = 20 # minutes 69 | BUILD_TIMEOUT = 20 # minutes 70 | INITJOBS_ATTEMPS = 5 71 | 72 | METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES = { 73 | 'monasca-agent-forwarder': 'agent-forwarder', 74 | 'zookeeper': 'zookeeper', 75 | 'influxdb': 'influxdb', 76 | 'kafka': 'kafka', 77 | 'kafka-init': 'kafka-init', 78 | 'monasca-thresh': 'thresh', 79 | 'monasca-persister-python': 'monasca-persister', 80 | 'mysql-init': 'mysql-init', 81 | 'monasca-api-python': 'monasca', 82 | 'influxdb-init': 'influxdb-init', 83 | 'monasca-agent-collector': 'agent-collector', 84 | 'grafana': 'grafana', 85 | 'monasca-notification': 'monasca-notification', 86 | 'grafana-init': 'grafana-init', 87 | 'monasca-statsd': 'monasca-statsd' 88 | } 89 | LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES = { 90 | 'monasca-log-metrics': 'log-metrics', 91 | 'monasca-log-persister': 'log-persister', 92 | 'monasca-log-transformer': 'log-transformer', 93 | 'elasticsearch': 'elasticsearch', 94 | 'elasticsearch-curator': 'elasticsearch-curator', 95 | 'elasticsearch-init': 'elasticsearch-init', 96 | 'kafka-init': 'kafka-log-init', 97 | 'kibana': 'kibana', 98 | 'monasca-log-api': 'log-api', 99 | 'monasca-log-agent': 'log-agent', 100 | 'logspout': 'logspout', 101 | } 102 | 103 | METRIC_PIPELINE_INIT_JOBS = ('influxdb-init', 'kafka-init', 'mysql-init', 'grafana-init') 104 | LOG_PIPELINE_INIT_JOBS = ('elasticsearch-init', 'kafka-log-init') 105 | INIT_JOBS = { 106 | METRIC_PIPELINE_MARKER: METRIC_PIPELINE_INIT_JOBS, 107 | LOG_PIPELINE_MARKER: LOG_PIPELINE_INIT_JOBS 108 | } 109 | 110 | METRIC_PIPELINE_SERVICES = METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES.values() 111 | """Explicit list of services for docker compose 112 | to launch for metrics pipeline""" 113 | LOG_PIPELINE_SERVICES = LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES.values() 114 | """Explicit list of services for docker compose 115 | to launch for logs pipeline""" 116 | 117 | PIPELINE_TO_YAML_COMPOSE = { 118 | METRIC_PIPELINE_MARKER: 'docker-compose-metric.yml', 119 | LOG_PIPELINE_MARKER: 'docker-compose-log.yml' 120 | } 121 | 122 | CI_COMPOSE_FILE = 'ci-compose.yml' 123 | 124 | LOG_DIR = 'monasca-logs/' + \ 125 | datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') 126 | BUILD_LOG_DIR = LOG_DIR + '/build/' 127 | RUN_LOG_DIR = LOG_DIR + '/run/' 128 | LOG_DIRS = [LOG_DIR, BUILD_LOG_DIR, RUN_LOG_DIR] 129 | 130 | 131 | class SubprocessException(Exception): 132 | pass 133 | 134 | 135 | class FileReadException(Exception): 136 | pass 137 | 138 | 139 | class FileWriteException(Exception): 140 | pass 141 | 142 | 143 | class InitJobFailedException(Exception): 144 | pass 145 | 146 | 147 | class TempestTestFailedException(Exception): 148 | pass 149 | 150 | 151 | class SmokeTestFailedException(Exception): 152 | pass 153 | 154 | 155 | class BuildFailedException(Exception): 156 | pass 157 | 158 | 159 | def print_logs(): 160 | for log_dir in LOG_DIRS: 161 | for file_name in os.listdir(log_dir): 162 | file_path = log_dir + file_name 163 | if os.path.isfile(file_path): 164 | with open(file_path, 'r') as f: 165 | log_contents = f.read() 166 | LOG.info("#" * 100) 167 | LOG.info("###### Container Logs from {0}".format(file_name)) 168 | LOG.info("#" * 100) 169 | LOG.info(log_contents) 170 | 171 | 172 | def set_log_dir(): 173 | try: 174 | LOG.debug('Working directory: {0}'.format(os.getcwd())) 175 | if not os.path.exists(LOG_DIR): 176 | LOG.debug('Creating LOG_DIR: {0}'.format(LOG_DIR)) 177 | os.makedirs(LOG_DIR) 178 | if not os.path.exists(BUILD_LOG_DIR): 179 | LOG.debug('Creating BUILD_LOG_DIR: {0}'.format(BUILD_LOG_DIR)) 180 | os.makedirs(BUILD_LOG_DIR) 181 | if not os.path.exists(RUN_LOG_DIR): 182 | LOG.debug('Creating RUN_LOG_DIR: {0}'.format(RUN_LOG_DIR)) 183 | os.makedirs(RUN_LOG_DIR) 184 | except Exception as e: 185 | LOG.error('Unexpected error {0}'.format(e)) 186 | 187 | 188 | def get_changed_files(): 189 | if not ci_commit_range: 190 | return [] 191 | LOG.debug('Execute: git diff --name-only {0}'.format(ci_commit_range)) 192 | p = subprocess.Popen([ 193 | 'git', 'diff', '--name-only', ci_commit_range 194 | ], stdout=subprocess.PIPE) 195 | stdout, _ = p.communicate() 196 | 197 | if six.PY3: 198 | stdout = stdout.decode('utf-8') 199 | 200 | if p.returncode != 0: 201 | raise SubprocessException('git returned non-zero exit code') 202 | 203 | return [line.strip() for line in stdout.splitlines()] 204 | 205 | 206 | def get_message_tags(): 207 | if not ci_commit_range: 208 | return [] 209 | LOG.debug('Execute: git log --pretty=%B -1 {0}'.format(ci_commit_range)) 210 | p = subprocess.Popen([ 211 | 'git', 'log', '--pretty=%B', '-1', ci_commit_range 212 | ], stdout=subprocess.PIPE) 213 | stdout, _ = p.communicate() 214 | 215 | if six.PY3: 216 | stdout = stdout.decode('utf-8') 217 | 218 | if p.returncode != 0: 219 | raise SubprocessException('git returned non-zero exit code') 220 | 221 | tags = [] 222 | for line in stdout.splitlines(): 223 | line = line.strip() 224 | m = TAG_REGEX.match(line) 225 | if m: 226 | tags.append(m.groups()) 227 | 228 | return tags 229 | 230 | 231 | def get_dirty_modules(dirty_files): 232 | dirty = set() 233 | for f in dirty_files: 234 | if os.path.sep in f: 235 | mod, _ = f.split(os.path.sep, 1) 236 | 237 | if not os.path.exists(os.path.join(mod, 'Dockerfile')): 238 | continue 239 | 240 | if not os.path.exists(os.path.join(mod, 'build.yml')): 241 | continue 242 | 243 | dirty.add(mod) 244 | 245 | # if len(dirty) > 5: 246 | # LOG.error('Max number of changed modules exceded.', 247 | # 'Please break up the patch set until a maximum of 5 modules are changed.') 248 | # sys.exit(1) 249 | return list(dirty) 250 | 251 | 252 | def get_dirty_for_module(files, module=None): 253 | ret = [] 254 | for f in files: 255 | if os.path.sep in f: 256 | mod, rel_path = f.split(os.path.sep, 1) 257 | if mod == module: 258 | ret.append(rel_path) 259 | else: 260 | # top-level file, no module 261 | if module is None: 262 | ret.append(f) 263 | 264 | return ret 265 | 266 | 267 | def run_build(modules): 268 | log_dir = BUILD_LOG_DIR 269 | build_args = ['dbuild', '-sd', '--build-log-dir', log_dir, 'build', 'all', '+', ':ci-cd'] + modules 270 | LOG.debug('Executing build command: {0}\n'.format(' '.join(build_args))) 271 | 272 | p = subprocess.Popen(build_args, stdout=subprocess.PIPE, universal_newlines=True) 273 | 274 | def kill(signal, frame): 275 | p.kill() 276 | LOG.warn('Finished by Ctrl-c!') 277 | sys.exit(2) 278 | 279 | signal.signal(signal.SIGINT, kill) 280 | 281 | start_time = datetime.datetime.now() 282 | while True: 283 | output = p.stdout.readline() 284 | print(" " + output.strip()) 285 | return_code = p.poll() 286 | if return_code is not None: 287 | LOG.debug('Return code: {0}'.format(return_code)) 288 | if return_code != 0: 289 | LOG.error('BUILD FAILED !!!') 290 | raise BuildFailedException('Build failed') 291 | if return_code == 0: 292 | LOG.info('Build succeeded') 293 | # Process has finished, read rest of the output 294 | for output in p.stdout.readlines(): 295 | LOG.debug(output.strip()) 296 | break 297 | end_time = start_time + datetime.timedelta(minutes=BUILD_TIMEOUT) 298 | if datetime.datetime.now() >= end_time: 299 | LOG.error('BUILD TIMEOUT AFTER {0} MIN !!!'.format(BUILD_TIMEOUT)) 300 | p.kill() 301 | raise BuildFailedException('Build timeout') 302 | 303 | 304 | def run_push(modules, pipeline): 305 | if ci_branch != 'master': 306 | LOG.warn('Push images to Docker Hub is only allowed from master branch') 307 | return 308 | 309 | if pipeline == 'logs': 310 | LOG.info('Images are already pushed by metrics-pipeline, skipping!') 311 | return 312 | 313 | username = os.environ.get('DOCKER_HUB_USERNAME', None) 314 | password = os.environ.get('DOCKER_HUB_PASSWORD', None) 315 | 316 | if not password: 317 | LOG.info('Not DOCKER_HUB_PASSWORD, skipping!') 318 | LOG.info('Not pushing: {0}'.format(modules)) 319 | return 320 | 321 | if username and password: 322 | LOG.info('Logging into docker registry...') 323 | login = subprocess.Popen([ 324 | 'docker', 'login', 325 | '-u', username, 326 | '--password-stdin' 327 | ], stdin=subprocess.PIPE) 328 | login.communicate(password) 329 | if login.returncode != 0: 330 | LOG.error('Docker registry login failed, cannot push!') 331 | sys.exit(1) 332 | 333 | log_dir = BUILD_LOG_DIR 334 | push_args = ['dbuild', '-sd', '--build-log-dir', log_dir, 'build', 'push', 'all'] + modules 335 | LOG.debug('Executing push command: {0}\n'.format(' '.join(push_args))) 336 | 337 | p = subprocess.Popen(push_args) 338 | 339 | def kill(signal, frame): 340 | p.kill() 341 | LOG.warn('Finished by Ctrl-c!') 342 | sys.exit(2) 343 | 344 | signal.signal(signal.SIGINT, kill) 345 | if p.wait() != 0: 346 | LOG.error('PUSH FAILED !!!') 347 | sys.exit(p.returncode) 348 | 349 | 350 | def run_readme(modules): 351 | if ci_branch != 'master': 352 | LOG.warn('Update readme to Docker Hub is only allowed from master branch') 353 | return 354 | 355 | log_dir = BUILD_LOG_DIR 356 | readme_args = ['dbuild', '-sd', '--build-log-dir', log_dir, 'readme'] + modules 357 | LOG.debug('Executing readme command: {0}\n'.format(' '.join(readme_args))) 358 | 359 | p = subprocess.Popen(readme_args) 360 | 361 | def kill(signal, frame): 362 | p.kill() 363 | LOG.warn('Finished by Ctrl-c!') 364 | sys.exit(2) 365 | 366 | signal.signal(signal.SIGINT, kill) 367 | if p.wait() != 0: 368 | LOG.error('README FAILED !!!') 369 | sys.exit(p.returncode) 370 | 371 | 372 | def update_docker_compose(modules, pipeline): 373 | compose_dict = load_yml(PIPELINE_TO_YAML_COMPOSE['metrics']) 374 | services_to_changes = METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES.copy() 375 | 376 | if pipeline == 'logs': 377 | LOG.info('\'logs\' pipeline is enabled, including in CI run') 378 | log_compose = load_yml(PIPELINE_TO_YAML_COMPOSE['logs']) 379 | compose_dict['services'].update(log_compose['services']) 380 | services_to_changes.update( 381 | LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES.copy() 382 | ) 383 | 384 | if modules: 385 | compose_services = compose_dict['services'] 386 | for module in modules: 387 | # Not all modules are included in docker compose 388 | if module not in services_to_changes: 389 | continue 390 | service_name = services_to_changes[module] 391 | services_to_update = service_name.split(',') 392 | for service in services_to_update: 393 | image = compose_services[service]['image'] 394 | image = image.split(':')[0] 395 | image += ":ci-cd" 396 | compose_services[service]['image'] = image 397 | 398 | # Update compose version 399 | compose_dict['version'] = '2' 400 | 401 | LOG.debug("Displaying {0}\n\n{1}".format(CI_COMPOSE_FILE, yaml.dump(compose_dict, default_flow_style=False))) 402 | 403 | try: 404 | with open(CI_COMPOSE_FILE, 'w') as docker_compose: 405 | yaml.dump(compose_dict, docker_compose, default_flow_style=False) 406 | except: 407 | raise FileWriteException( 408 | 'Error writing CI dictionary to {0}'.format(CI_COMPOSE_FILE) 409 | ) 410 | 411 | 412 | def load_yml(yml_path): 413 | try: 414 | with open(yml_path) as compose_file: 415 | compose_dict = yaml.safe_load(compose_file) 416 | return compose_dict 417 | except: 418 | raise FileReadException('Failed to read {0}'.format(yml_path)) 419 | 420 | 421 | def handle_pull_request(files, modules, tags, pipeline): 422 | modules_to_build = modules[:] 423 | 424 | for tag, arg in tags: 425 | if tag in ('build', 'push'): 426 | if arg is None: 427 | # arg-less doesn't make sense for PRs since any changes to a 428 | # module already result in a rebuild 429 | continue 430 | 431 | modules_to_build.append(arg) 432 | 433 | # note(kornicameister) check if module belong to the pipeline 434 | # if not, there's no point of building that as it will be build 435 | # for the given pipeline 436 | pipeline_modules = pick_modules_for_pipeline(modules_to_build, pipeline) 437 | 438 | if pipeline_modules: 439 | run_build(pipeline_modules) 440 | else: 441 | LOG.info('No modules to build.') 442 | 443 | update_docker_compose(pipeline_modules, pipeline) 444 | run_docker_keystone() 445 | run_docker_compose(pipeline) 446 | wait_for_init_jobs(pipeline) 447 | LOG.info('Waiting for containers to be ready 1 min...') 448 | time.sleep(60) 449 | output_docker_ps() 450 | 451 | cool_test_mapper = { 452 | 'smoke': { 453 | METRIC_PIPELINE_MARKER: run_smoke_tests_metrics, 454 | LOG_PIPELINE_MARKER: lambda : LOG.info('No smoke tests for logs') 455 | }, 456 | 'tempest': { 457 | METRIC_PIPELINE_MARKER: run_tempest_tests_metrics, 458 | LOG_PIPELINE_MARKER: lambda : LOG.info('No tempest tests for logs') 459 | } 460 | } 461 | 462 | cool_test_mapper['smoke'][pipeline]() 463 | cool_test_mapper['tempest'][pipeline]() 464 | 465 | 466 | def pick_modules_for_pipeline(modules, pipeline): 467 | if not modules: 468 | return [] 469 | 470 | modules_for_pipeline = { 471 | LOG_PIPELINE_MARKER: LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES.keys(), 472 | METRIC_PIPELINE_MARKER: METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES.keys() 473 | } 474 | 475 | pipeline_modules = modules_for_pipeline[pipeline] 476 | 477 | # some of the modules are not used in pipelines, but should be 478 | # taken into consideration during the build 479 | other_modules = [ 480 | 'storm' 481 | ] 482 | LOG.info('Modules to build: {0}'.format(modules)) 483 | LOG.info('Modules to pull: {0}'.format(pipeline_modules)) 484 | 485 | # iterate over copy of all modules that are planned for the build 486 | # if one of them does not belong to active pipeline 487 | # remove from current run 488 | for m in modules[::]: 489 | if m not in pipeline_modules: 490 | if m in other_modules: 491 | LOG.info('Module {0} is not part of either pipeline, but it will be build anyway'.format(m)) 492 | continue 493 | LOG.info('Module {0} does not belong to {1}, skipping'.format(m, pipeline)) 494 | modules.remove(m) 495 | 496 | return modules 497 | 498 | 499 | def get_current_init_status(docker_id): 500 | init_status = ['docker', 'inspect', '-f', '{{ .State.ExitCode }}:{{ .State.Status }}', docker_id] 501 | 502 | p = subprocess.Popen(init_status, stdin=subprocess.PIPE, stdout=subprocess.PIPE) 503 | 504 | def kill(signal, frame): 505 | p.kill() 506 | LOG.warn('Finished by Ctrl-c!') 507 | sys.exit(2) 508 | 509 | signal.signal(signal.SIGINT, kill) 510 | 511 | output, err = p.communicate() 512 | 513 | if six.PY3: 514 | output = output.decode('utf-8') 515 | 516 | if p.wait() != 0: 517 | LOG.info('getting current status failed') 518 | return False 519 | status_output = output.rstrip() 520 | 521 | exit_code, status = status_output.split(":", 1) 522 | LOG.debug('Status from init-container {0}, exit_code {1}, status {2}'.format(docker_id, exit_code, status)) 523 | return exit_code == "0" and status == "exited" 524 | 525 | 526 | def output_docker_logs(): 527 | LOG.info("Saving container logs at {0}".format(LOG_DIR)) 528 | docker_names = ['docker', 'ps', '-a', '--format', '"{{.Names}}"'] 529 | 530 | LOG.debug('Executing: {0}'.format(' '.join(docker_names))) 531 | p = subprocess.Popen(docker_names, stdout=subprocess.PIPE) 532 | 533 | def kill(signal, frame): 534 | p.kill() 535 | LOG.warn('Finished by Ctrl-c!') 536 | sys.exit(2) 537 | 538 | signal.signal(signal.SIGINT, kill) 539 | 540 | output, err = p.communicate() 541 | 542 | if six.PY3: 543 | output = output.decode('utf-8') 544 | 545 | names = output.replace('"', '').split('\n') 546 | 547 | for name in names: 548 | if not name: 549 | continue 550 | 551 | docker_logs = ['docker', 'logs', '-t', name] 552 | log_name = RUN_LOG_DIR + 'docker_log_' + name + '.log' 553 | 554 | LOG.debug('Executing: {0}'.format(' '.join(docker_logs))) 555 | with open(log_name, 'w') as out: 556 | p = subprocess.Popen(docker_logs, stdout=out, 557 | stderr=subprocess.STDOUT) 558 | signal.signal(signal.SIGINT, kill) 559 | if p.wait() != 0: 560 | LOG.error('Error running docker log for {0}'.format(name)) 561 | 562 | def addtab(s): 563 | white = " " * 2 564 | return white + white.join(s.splitlines(1)) 565 | 566 | def output_docker_ps(): 567 | docker_ps = ['docker', 'ps', '-a'] 568 | 569 | LOG.debug('Executing: {0}'.format(' '.join(docker_ps))) 570 | p = subprocess.Popen(docker_ps, stdout=subprocess.PIPE) 571 | 572 | def kill(signal, frame): 573 | p.kill() 574 | LOG.warn('Finished by Ctrl-c!') 575 | sys.exit(2) 576 | 577 | signal.signal(signal.SIGINT, kill) 578 | 579 | output, err = p.communicate() 580 | 581 | if six.PY3: 582 | output = output.decode('utf-8') 583 | LOG.info("Displaying all docker containers\n" + addtab(output)) 584 | 585 | 586 | def output_compose_details(pipeline): 587 | if pipeline == 'metrics': 588 | services = METRIC_PIPELINE_SERVICES 589 | else: 590 | services = LOG_PIPELINE_SERVICES 591 | if six.PY3: 592 | services = list(services) 593 | LOG.info('All services that are about to start: {0}'.format(', '.join(services))) 594 | 595 | 596 | def get_docker_id(init_job): 597 | docker_id = ['docker-compose', 598 | '-f', CI_COMPOSE_FILE, 599 | 'ps', 600 | '-q', init_job] 601 | 602 | p = subprocess.Popen(docker_id, stdout=subprocess.PIPE) 603 | 604 | def kill(signal, frame): 605 | p.kill() 606 | LOG.warn('Finished by Ctrl-c!') 607 | sys.exit(2) 608 | 609 | signal.signal(signal.SIGINT, kill) 610 | 611 | output, err = p.communicate() 612 | 613 | if six.PY3: 614 | output = output.decode('utf-8') 615 | 616 | if p.wait() != 0: 617 | LOG.error('error getting docker id') 618 | return "" 619 | return output.rstrip() 620 | 621 | 622 | def wait_for_init_jobs(pipeline): 623 | LOG.info('Waiting 20 sec for init jobs to finish...') 624 | init_status_dict = {job: False for job in INIT_JOBS[pipeline]} 625 | docker_id_dict = {job: "" for job in INIT_JOBS[pipeline]} 626 | 627 | amount_succeeded = 0 628 | for attempt in range(INITJOBS_ATTEMPS): 629 | time.sleep(20) 630 | amount_succeeded = 0 631 | for init_job, status in init_status_dict.items(): 632 | if docker_id_dict[init_job] == "": 633 | docker_id_dict[init_job] = get_docker_id(init_job) 634 | if status: 635 | amount_succeeded += 1 636 | else: 637 | updated_status = get_current_init_status(docker_id_dict[init_job]) 638 | init_status_dict[init_job] = updated_status 639 | if updated_status: 640 | amount_succeeded += 1 641 | if amount_succeeded == len(docker_id_dict): 642 | LOG.info("All init-jobs finished successfully !!!") 643 | break 644 | else: 645 | LOG.info("Not all init jobs have finished yet, waiting another 20 sec. " + 646 | "Try " + str(attempt + 1) + " of {0}...".format(INITJOBS_ATTEMPS)) 647 | if amount_succeeded != len(docker_id_dict): 648 | LOG.error("INIT-JOBS FAILED !!!") 649 | raise InitJobFailedException("Not all init-containers finished with exit code 0") 650 | 651 | 652 | def handle_push(files, modules, tags, pipeline): 653 | modules_to_push = [] 654 | modules_to_readme = [] 655 | 656 | force_push = False 657 | force_readme = False 658 | 659 | for tag, arg in tags: 660 | if tag in ('build', 'push'): 661 | if arg is None: 662 | force_push = True 663 | else: 664 | modules_to_push.append(arg) 665 | elif tag == 'readme': 666 | if arg is None: 667 | force_readme = True 668 | else: 669 | modules_to_readme.append(arg) 670 | 671 | for module in modules: 672 | dirty = get_dirty_for_module(files, module) 673 | if force_push or 'build.yml' in dirty: 674 | modules_to_push.append(module) 675 | 676 | if force_readme or 'README.md' in dirty: 677 | modules_to_readme.append(module) 678 | 679 | if modules_to_push: 680 | run_push(modules_to_push, pipeline) 681 | else: 682 | LOG.info('No modules to push.') 683 | 684 | if modules_to_readme: 685 | run_readme(modules_to_readme) 686 | else: 687 | LOG.info('No READMEs to update.') 688 | 689 | def run_docker_keystone(): 690 | LOG.info('Running docker compose for Keystone') 691 | 692 | username = os.environ.get('DOCKER_HUB_USERNAME', None) 693 | password = os.environ.get('DOCKER_HUB_PASSWORD', None) 694 | 695 | if username and password: 696 | LOG.info('Logging into docker registry...') 697 | login = subprocess.Popen([ 698 | 'docker', 'login', 699 | '-u', username, 700 | '--password-stdin' 701 | ], stdin=subprocess.PIPE) 702 | login.communicate(password) 703 | if login.returncode != 0: 704 | LOG.error('Docker registry login failed!') 705 | sys.exit(1) 706 | 707 | 708 | docker_compose_dev_command = ['docker-compose', 709 | '-f', 'docker-compose-dev.yml', 710 | 'up', '-d'] 711 | 712 | LOG.debug('Executing: {0}'.format(' '.join(docker_compose_dev_command))) 713 | with open(RUN_LOG_DIR + 'docker_compose_dev.log', 'w') as out: 714 | p = subprocess.Popen(docker_compose_dev_command, stdout=out) 715 | 716 | def kill(signal, frame): 717 | p.kill() 718 | LOG.warn('Finished by Ctrl-c!') 719 | sys.exit(2) 720 | 721 | signal.signal(signal.SIGINT, kill) 722 | if p.wait() != 0: 723 | LOG.error('DOCKER COMPOSE FAILED !!!') 724 | sys.exit(p.returncode) 725 | 726 | # print out running images for debugging purposes 727 | LOG.info('docker compose dev succeeded') 728 | output_docker_ps() 729 | 730 | 731 | def run_docker_compose(pipeline): 732 | LOG.info('Running docker compose') 733 | output_compose_details(pipeline) 734 | 735 | username = os.environ.get('DOCKER_HUB_USERNAME', None) 736 | password = os.environ.get('DOCKER_HUB_PASSWORD', None) 737 | 738 | if username and password: 739 | LOG.info('Logging into docker registry...') 740 | login = subprocess.Popen([ 741 | 'docker', 'login', 742 | '-u', username, 743 | '--password-stdin' 744 | ], stdin=subprocess.PIPE) 745 | login.communicate(password) 746 | if login.returncode != 0: 747 | LOG.error('Docker registry login failed!') 748 | sys.exit(1) 749 | 750 | if pipeline == 'metrics': 751 | services = METRIC_PIPELINE_SERVICES 752 | else: 753 | services = LOG_PIPELINE_SERVICES 754 | 755 | if six.PY3: 756 | services = list(services) 757 | docker_compose_command = ['docker-compose', 758 | '-f', CI_COMPOSE_FILE, 759 | 'up', '-d'] + services 760 | 761 | LOG.debug('Executing: {0}'.format(' '.join(docker_compose_command))) 762 | with open(RUN_LOG_DIR + 'docker_compose.log', 'w') as out: 763 | p = subprocess.Popen(docker_compose_command, stdout=out) 764 | 765 | def kill(signal, frame): 766 | p.kill() 767 | LOG.warn('Finished by Ctrl-c!') 768 | sys.exit(2) 769 | 770 | signal.signal(signal.SIGINT, kill) 771 | if p.wait() != 0: 772 | LOG.error('DOCKER COMPOSE FAILED !!!') 773 | sys.exit(p.returncode) 774 | 775 | # print out running images for debugging purposes 776 | LOG.info('docker compose succeeded') 777 | output_docker_ps() 778 | 779 | 780 | def run_smoke_tests_metrics(): 781 | LOG.info('Running Smoke-tests') 782 | #TODO: branch as variable... use TRAVIS_PULL_REQUEST_BRANCH ? 783 | smoke_tests_run = ['docker', 'run', 784 | '-e', 'OS_AUTH_URL=http://keystone:35357/v3', 785 | '-e', 'MONASCA_URL=http://monasca:8070', 786 | '-e', 'METRIC_NAME_TO_CHECK=monasca.thread_count', 787 | '--net', 'monasca-docker_default', 788 | '-p', '0.0.0.0:8080:8080', 789 | '--name', 'monasca-docker-smoke', 790 | 'fest/smoke-tests:pike-latest'] 791 | 792 | LOG.debug('Executing: {0}'.format(' '.join(smoke_tests_run))) 793 | p = subprocess.Popen(smoke_tests_run) 794 | 795 | def kill(signal, frame): 796 | p.kill() 797 | LOG.warn('Finished by Ctrl-c!') 798 | sys.exit(2) 799 | 800 | signal.signal(signal.SIGINT, kill) 801 | if p.wait() != 0: 802 | LOG.error('SMOKE-TEST FAILED !!!') 803 | raise SmokeTestFailedException("Smoke Tests Failed") 804 | 805 | 806 | def run_tempest_tests_metrics(): 807 | LOG.info('Running Tempest-tests') 808 | tempest_tests_run = ['docker', 'run', 809 | '-e', 'KEYSTONE_IDENTITY_URI=http://keystone:35357', 810 | '-e', 'OS_AUTH_URL=http://keystone:35357/v3', 811 | '-e', 'MONASCA_WAIT_FOR_API=true', 812 | '-e', 'STAY_ALIVE_ON_FAILURE=false', 813 | '--net', 'monasca-docker_default', 814 | '--name', 'monasca-docker-tempest', 815 | 'chaconpiza/tempest-tests:test'] 816 | 817 | LOG.debug('Executing: {0}'.format(' '.join(tempest_tests_run))) 818 | p = subprocess.Popen(tempest_tests_run, stdout=subprocess.PIPE, universal_newlines=True) 819 | 820 | def kill(signal, frame): 821 | p.kill() 822 | LOG.warn('Finished by Ctrl-c!') 823 | sys.exit(2) 824 | 825 | signal.signal(signal.SIGINT, kill) 826 | 827 | start_time = datetime.datetime.now() 828 | while True: 829 | output = p.stdout.readline() 830 | LOG.info(output.strip()) 831 | return_code = p.poll() 832 | if return_code is not None: 833 | LOG.debug('RETURN CODE: {0}'.format(return_code)) 834 | if return_code != 0: 835 | LOG.error('TEMPEST-TEST FAILED !!!') 836 | raise TempestTestFailedException("Tempest Tests finished but some tests failed") 837 | if return_code == 0: 838 | LOG.info('Tempest-tests succeeded') 839 | # Process has finished, read rest of the output 840 | for output in p.stdout.readlines(): 841 | LOG.debug(output.strip()) 842 | break 843 | end_time = start_time + datetime.timedelta(minutes=TEMPEST_TIMEOUT) 844 | if datetime.datetime.now() >= end_time: 845 | LOG.error('TEMPEST-TEST TIMEOUT AFTER {0} MIN !!!'.format(TEMPEST_TIMEOUT)) 846 | p.kill() 847 | raise TempestTestFailedException("Tempest Tests failed by timeout") 848 | 849 | 850 | def handle_other(files, modules, tags, pipeline): 851 | LOG.error('Unsupported event type: {0}, nothing to do.'.format(ci_event_type)) 852 | exit(2) 853 | 854 | def print_env(): 855 | 856 | env_vars_used = ['pipeline={0}'.format(pipeline), 857 | 'non_voting={0}'.format(non_voting), 858 | 'printlogs={0}'.format(printlogs), 859 | 'verbose={0}'.format(verbose), 860 | 'CI_EVENT_TYPE="{0}"'.format(ci_event_type), 861 | 'CI_BRANCH="{0}"'.format(ci_branch), 862 | 'CI_COMMIT_RANGE="{0}"'.format(ci_commit_range) 863 | ] 864 | 865 | LOG.info('Variables used in CI:\n {0}'.format('\n '.join(env_vars_used))) 866 | 867 | def main(): 868 | try: 869 | LOG.info("DOCKER_HUB_USERNAME: {0}".format(os.environ.get('DOCKER_HUB_USERNAME', None))) 870 | LOG.info("DOCKER_HUB_PASSWORD: {0}".format(os.environ.get('DOCKER_HUB_PASSWORD', None))) 871 | print_env() 872 | 873 | if not pipeline or pipeline not in ('logs', 'metrics'): 874 | LOG.error('UNKNOWN PIPELINE: {0} !!! Choose (metrics|logs)'.format(pipeline)) 875 | exit(2) 876 | 877 | set_log_dir() 878 | files = get_changed_files() 879 | LOG.info('Changed files: {0}'.format(files)) 880 | modules = get_dirty_modules(files) 881 | LOG.info('Dirty modules: {0}'.format(modules)) 882 | tags = get_message_tags() 883 | LOG.info('Message tags: {0}'.format(tags)) 884 | 885 | if tags: 886 | LOG.debug('Tags detected:') 887 | for tag in tags: 888 | LOG.debug(' '.format(tag)) 889 | else: 890 | LOG.info('No tags detected.') 891 | 892 | func = { 893 | 'cron': handle_pull_request, 894 | 'pull_request': handle_pull_request, 895 | 'push': handle_push 896 | }.get(ci_event_type, handle_other) 897 | 898 | func(files, modules, tags, pipeline) 899 | except (FileReadException, FileWriteException, SubprocessException) as ex: 900 | LOG.error("FAILED !!! RCA: {0}".format(ex)) 901 | exit(1) 902 | except (InitJobFailedException, SmokeTestFailedException, 903 | TempestTestFailedException) as ex: 904 | if non_voting: 905 | LOG.warn('{0} is non voting, skipping failure'.format(pipeline)) 906 | else: 907 | LOG.error("FAILED !!! RCA: {0}".format(ex)) 908 | exit(1) 909 | except Exception as ex: 910 | LOG.error("UNKNOWN EXCEPTION !!! RCA: {0}".format(ex)) 911 | exit(1) 912 | finally: 913 | output_docker_ps() 914 | output_docker_logs() 915 | if printlogs: 916 | print_logs() 917 | 918 | 919 | if __name__ == '__main__': 920 | main() 921 | -------------------------------------------------------------------------------- /docker-compose-dev-mailserver.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | mailhog: 4 | image: mailhog/mailhog:v1.0.1 5 | ports: 6 | - "1025:1025" 7 | - "8025:8025" 8 | 9 | networks: 10 | default: 11 | external: 12 | name: monasca-docker_default 13 | -------------------------------------------------------------------------------- /docker-compose-dev.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | mysql_keystone: 5 | hostname: dev-host 6 | image: mysql:5.7 7 | restart: unless-stopped 8 | environment: 9 | MYSQL_ROOT_PASSWORD: secretmysql 10 | LOGSTASH_FIELDS: "service=mysql_keystone" 11 | networks: 12 | - default 13 | 14 | mysql-init_keystone: 15 | hostname: dev-host 16 | image: chaconpiza/mysql-init:${MYSQL_INIT_VERSION} 17 | environment: 18 | MYSQL_INIT_DISABLE_REMOTE_ROOT: "false" 19 | MYSQL_INIT_RANDOM_PASSWORD: "false" 20 | KEYSTONE_DB_ENABLED: 'true' 21 | CREATE_MON_USERS: "false" 22 | GRAFANA_DB_ENABLED: "false" 23 | MYSQL_INIT_HOST: mysql_keystone 24 | LOGSTASH_FIELDS: "service=mysql_keystone-init" 25 | networks: 26 | - default 27 | depends_on: 28 | - mysql_keystone 29 | 30 | keystone: 31 | hostname: dev-host 32 | image: monasca/keystone:1.2.0 33 | restart: unless-stopped 34 | environment: 35 | KEYSTONE_HOST: keystone 36 | KEYSTONE_PASSWORD: secretadmin 37 | KEYSTONE_DATABASE_BACKEND: mysql 38 | KEYSTONE_MYSQL_HOST: mysql_keystone 39 | KEYSTONE_MYSQL_USER: keystone 40 | KEYSTONE_MYSQL_PASSWORD: keystone 41 | KEYSTONE_MYSQL_DATABASE: keystone 42 | KEYSTONE_MONASCA_ENDPOINT: ${MON_MONASCA_API_URL} 43 | LOGSTASH_FIELDS: "service=keystone" 44 | depends_on: 45 | - mysql_keystone 46 | ports: 47 | - "5000:5000" 48 | - "35357:35357" 49 | networks: 50 | - default 51 | 52 | horizon: 53 | hostname: dev-host 54 | image: chaconpiza/horizon:2.1.2 55 | restart: unless-stopped 56 | environment: 57 | MON_KEYSTONE_URL: ${MON_KEYSTONE_URL} 58 | GRAFANA_IP: ${MON_GRAFANA_IP} 59 | GRAFANA_PORT: ${MON_GRAFANA_PORT} 60 | SHOW_LOGS: 'false' 61 | LOGSPOUT: ignore 62 | LOGSTASH_FIELDS: "service=horizon" 63 | depends_on: 64 | - keystone 65 | ports: 66 | - "80:80" 67 | networks: 68 | - default 69 | 70 | networks: 71 | network_keystone: 72 | driver: bridge 73 | -------------------------------------------------------------------------------- /docker-compose-log.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | log-metrics: 5 | hostname: docker-host 6 | image: chaconpiza/log-metrics:${MON_LOG_METRICS_VERSION} 7 | restart: unless-stopped 8 | environment: 9 | LOGSTASH_FIELDS: "service=monasca-log-metrics" 10 | depends_on: 11 | - kafka 12 | - zookeeper 13 | - log-transformer 14 | 15 | log-persister: 16 | hostname: docker-host 17 | image: chaconpiza/log-persister:${MON_LOG_PERSISTER_VERSION} 18 | restart: unless-stopped 19 | environment: 20 | LOGSTASH_FIELDS: "service=monasca-log-persister" 21 | depends_on: 22 | - kafka 23 | - zookeeper 24 | - elasticsearch 25 | - log-transformer 26 | 27 | log-transformer: 28 | hostname: docker-host 29 | image: chaconpiza/log-transformer:${MON_LOG_TRANSFORMER_VERSION} 30 | restart: unless-stopped 31 | environment: 32 | LOGSTASH_FIELDS: "service=monasca-log-transformer" 33 | depends_on: 34 | - kafka 35 | - zookeeper 36 | - log-api 37 | 38 | elasticsearch: 39 | hostname: elasticsearch 40 | image: monasca/elasticsearch:${MON_ELASTICSEARCH_VERSION} 41 | restart: unless-stopped 42 | environment: 43 | ES_JAVA_OPTS: ${MON_ELASTICSEARCH_HEAP_SIZE} 44 | discovery.type: "single-node" 45 | LOGSTASH_FIELDS: "service=elasticsearch" 46 | ulimits: 47 | memlock: 48 | soft: -1 49 | hard: -1 50 | volumes: 51 | - ${MON_DOCKER_VOL_ROOT}/elasticsearch/data:/usr/share/elasticsearch/data:Z 52 | - ${MON_BACKUP_DIR}/elasticsearch_backup:/usr/share/elasticsearch/backup:Z 53 | 54 | elasticsearch-init: 55 | hostname: docker-host 56 | image: monasca/elasticsearch-init:${MON_ELASTICSEARCH_INIT_VERSION} 57 | environment: 58 | LOGSTASH_FIELDS: "service=elasticsearch-init" 59 | depends_on: 60 | - elasticsearch 61 | 62 | elasticsearch-curator: 63 | hostname: docker-host 64 | image: chaconpiza/elasticsearch-curator:${MON_ELASTICSEARCH_CURATOR_VERSION} 65 | restart: unless-stopped 66 | environment: 67 | CURATOR_EXCLUDED_INDEX_NAME: .kibana 68 | CURATOR_DELETE_BY_AGE: ${MON_ELASTICSEARCH_DATA_RETENTION_DAYS} 69 | LOGSTASH_FIELDS: "service=elasticsearch-curator" 70 | depends_on: 71 | - elasticsearch 72 | 73 | kafka-log-init: 74 | hostname: docker-host 75 | image: chaconpiza/kafka-init:${MON_KAFKA_INIT_VERSION} 76 | environment: 77 | KAFKA_TOPIC_CONFIG: segment.ms=900000 # 15m 78 | KAFKA_CREATE_TOPICS: "\ 79 | log:4:1,\ 80 | log-transformed:4:1" 81 | LOGSTASH_FIELDS: "service=kafka-log-init" 82 | depends_on: 83 | - kafka 84 | 85 | kibana: 86 | hostname: docker-host 87 | image: chaconpiza/kibana:${MON_KIBANA_VERSION} 88 | restart: unless-stopped 89 | environment: 90 | KEYSTONE_URI: ${MON_KEYSTONE_URL} 91 | MONASCA_PLUGIN_ENABLED: ${MON_MONASCA_PLUGIN_ENABLED} 92 | BASE_PATH: ${MON_BASE_PATH} 93 | LOGSTASH_FIELDS: "service=kibana" 94 | depends_on: 95 | - elasticsearch 96 | ports: 97 | - 5601:5601 98 | 99 | log-api: 100 | hostname: docker-host 101 | image: monasca/api:${MON_API_VERSION} 102 | restart: unless-stopped 103 | environment: 104 | KEYSTONE_IDENTITY_URI: ${MON_KEYSTONE_URL} 105 | KEYSTONE_AUTH_URI: ${MON_KEYSTONE_URL} 106 | KEYSTONE_ADMIN_USER: ${MON_KEYSTONE_ADMIN_USER} 107 | KEYSTONE_ADMIN_PASSWORD: ${MON_KEYSTONE_ADMIN_PASSWORD} 108 | DELEGATE_AUTHORIZED_ROLES: "monitoring-delegate" 109 | ENABLE_METRICS_API: 'false' 110 | ENABLE_LOGS_API: 'true' 111 | GUNICORN_WORKERS: '3' 112 | GUNICORN_TIMEOUT: '60' 113 | LOGSTASH_FIELDS: "service=monasca-log-api" 114 | depends_on: 115 | - influxdb 116 | - mysql 117 | - zookeeper 118 | - kafka 119 | - memcached 120 | ports: 121 | - "5607:8070" 122 | 123 | log-agent: 124 | hostname: docker-host 125 | image: chaconpiza/log-agent:${MON_LOG_AGENT_VERSION} 126 | restart: unless-stopped 127 | environment: 128 | OS_AUTH_URL: ${MON_KEYSTONE_URL}/v3 129 | OS_USERNAME: ${MON_AGENT_USERNAME} 130 | OS_PASSWORD: ${MON_AGENT_PASSWORD} 131 | OS_PROJECT_NAME: ${MON_AGENT_PROJECT_NAME} 132 | LOGSPOUT: ignore 133 | LOGSTASH_FIELDS: "service=monasca-log-agent" 134 | depends_on: 135 | - log-api 136 | 137 | logspout: 138 | hostname: docker-host 139 | image: chaconpiza/logspout:${MON_LOGSPOUT_VERSION} 140 | restart: unless-stopped 141 | privileged: true 142 | environment: 143 | RETRY_SEND: "true" 144 | LOGSPOUT: ignore 145 | LOGSTASH_FIELDS: "service=logspout" 146 | volumes: 147 | - "/var/run/docker.sock:/var/run/docker.sock" 148 | depends_on: 149 | - log-agent 150 | -------------------------------------------------------------------------------- /docker-compose-metric.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | memcached: 5 | hostname: docker-host 6 | image: memcached:${MEMCACHED_VERSION} 7 | restart: unless-stopped 8 | environment: 9 | LOGSTASH_FIELDS: "service=memcached" 10 | 11 | influxdb: 12 | hostname: docker-host 13 | image: influxdb:${INFLUXDB_VERSION} 14 | restart: unless-stopped 15 | environment: 16 | INFLUXDB_DATA_INDEX_VERSION: "tsi1" 17 | INFLUXDB_DATA_QUERY_LOG_ENABLED: "false" 18 | INFLUXDB_HTTP_LOG_ENABLED: "false" 19 | INFLUXDB_REPORTING_DISABLED: "true" 20 | LOGSTASH_FIELDS: "service=influxdb" 21 | volumes: 22 | - "${MON_DOCKER_VOL_ROOT}/influxdb:/var/lib/influxdb:Z" 23 | - "${MON_BACKUP_DIR}/influxdb_backup:/influxdb_backup:Z" 24 | 25 | influxdb-init: 26 | hostname: docker-host 27 | image: chaconpiza/influxdb-init:${INFLUXDB_INIT_VERSION} 28 | environment: 29 | INFLUXDB_DEFAULT_RETENTION: ${MON_INFLUXDB_RETENTION} 30 | LOGSTASH_FIELDS: "service=influxdb-init" 31 | depends_on: 32 | - influxdb 33 | 34 | # cadvisor will allow host metrics to be collected, but requires significant 35 | # access to the host system 36 | # if this is not desired, the following can be commented out, and the CADVISOR 37 | # environment variable should be set to "false" in the `agent-collector` 38 | # block - however no metrics will be collected 39 | cadvisor: 40 | hostname: docker-host 41 | image: google/cadvisor:${CADVISOR_VERSION} 42 | restart: unless-stopped 43 | environment: 44 | LOGSTASH_FIELDS: "service=cadvisor" 45 | privileged: true 46 | volumes: 47 | - "/:/rootfs:ro" 48 | - "/var/run:/var/run:rw" 49 | - "/sys:/sys:ro" 50 | - "/var/lib/docker:/var/lib/docker:ro" 51 | 52 | agent-forwarder: 53 | hostname: docker-host 54 | image: monasca/agent-forwarder:${MON_AGENT_FORWARDER_VERSION} 55 | restart: unless-stopped 56 | environment: 57 | NON_LOCAL_TRAFFIC: "true" 58 | OS_AUTH_URL: ${MON_KEYSTONE_URL}/v3 59 | OS_USERNAME: ${MON_AGENT_USERNAME} 60 | OS_PASSWORD: ${MON_AGENT_PASSWORD} 61 | OS_PROJECT_NAME: ${MON_AGENT_PROJECT_NAME} 62 | LOGSTASH_FIELDS: "service=monasca-agent-forwarder" 63 | depends_on: 64 | - monasca 65 | 66 | agent-collector: 67 | hostname: docker-host 68 | image: monasca/agent-collector:${MON_AGENT_COLLECTOR_VERSION} 69 | restart: unless-stopped 70 | environment: 71 | AGENT_HOSTNAME: "docker-host" 72 | FORWARDER_URL: "http://agent-forwarder:17123" 73 | CADVISOR: "true" 74 | CADVISOR_URL: "http://cadvisor:8080/" 75 | KEYSTONE_DEFAULTS_ENABLED: "false" 76 | DOCKER: "true" 77 | LOGSTASH_FIELDS: "service=monasca-agent-collector" 78 | privileged: true 79 | volumes: 80 | - "/:/host:ro" 81 | - "/var/run/docker.sock:/var/run/docker.sock:rw" 82 | depends_on: 83 | - agent-forwarder 84 | 85 | zookeeper: 86 | hostname: docker-host 87 | image: zookeeper:${ZOOKEEPER_VERSION} 88 | restart: unless-stopped 89 | environment: 90 | ZOO_AUTOPURGE_PURGEINTERVAL: ${MON_ZK_PURGE_INTERVAL} 91 | LOGSTASH_FIELDS: "service=zookeeper" 92 | 93 | kafka: 94 | hostname: docker-host 95 | image: chaconpiza/kafka:${MON_KAFKA_VERSION} 96 | restart: unless-stopped 97 | environment: 98 | LOGSTASH_FIELDS: "service=kafka" 99 | volumes: 100 | - "${MON_DOCKER_VOL_ROOT}/kafka:/data:Z" 101 | depends_on: 102 | - zookeeper 103 | 104 | kafka-init: 105 | hostname: docker-host 106 | image: chaconpiza/kafka-init:${MON_KAFKA_INIT_VERSION} 107 | environment: 108 | ZOOKEEPER_CONNECTION_STRING: "zookeeper:2181" 109 | KAFKA_DELETE_TOPIC_ENABLE: "true" 110 | KAFKA_TOPIC_CONFIG: segment.ms=900000 # 15m 111 | KAFKA_CREATE_TOPICS: "\ 112 | metrics:16:1,\ 113 | alarm-state-transitions:12:1,\ 114 | alarm-notifications:12:1,\ 115 | retry-notifications:3:1,\ 116 | events:12:1,\ 117 | 60-seconds-notifications:3:1" 118 | LOGSTASH_FIELDS: "service=kafka-init" 119 | depends_on: 120 | - zookeeper 121 | - kafka 122 | 123 | mysql: 124 | hostname: docker-host 125 | image: mysql:${MYSQL_VERSION} 126 | restart: unless-stopped 127 | environment: 128 | MYSQL_ROOT_PASSWORD: secretmysql 129 | LOGSTASH_FIELDS: "service=mysql" 130 | volumes: 131 | - "${MON_DOCKER_VOL_ROOT}/mysql:/var/lib/mysql:Z" 132 | - "${MON_BACKUP_DIR}/mysql_backup:/mysql_backup:Z" 133 | 134 | mysql-init: 135 | hostname: docker-host 136 | image: chaconpiza/mysql-init:${MYSQL_INIT_VERSION} 137 | environment: 138 | MYSQL_INIT_DISABLE_REMOTE_ROOT: "false" 139 | MYSQL_INIT_RANDOM_PASSWORD: "false" 140 | KEYSTONE_DB_ENABLED: 'false' 141 | CREATE_MON_USERS: "true" 142 | GRAFANA_DB_ENABLED: "true" 143 | MYSQL_INIT_HOST: "mysql" 144 | LOGSTASH_FIELDS: "service=mysql-init" 145 | depends_on: 146 | - mysql 147 | 148 | monasca: 149 | hostname: docker-host 150 | image: monasca/api:${MON_API_VERSION} 151 | restart: unless-stopped 152 | environment: 153 | KEYSTONE_IDENTITY_URI: ${MON_KEYSTONE_URL} 154 | KEYSTONE_AUTH_URI: ${MON_KEYSTONE_URL} 155 | KEYSTONE_ADMIN_USER: ${MON_KEYSTONE_ADMIN_USER} 156 | KEYSTONE_ADMIN_PASSWORD: ${MON_KEYSTONE_ADMIN_PASSWORD} 157 | DELEGATE_AUTHORIZED_ROLES: "monitoring-delegate" 158 | ENABLE_METRICS_API: 'true' 159 | ENABLE_LOGS_API: 'false' 160 | GUNICORN_WORKERS: '8' 161 | GUNICORN_TIMEOUT: '60' 162 | LOGSTASH_FIELDS: "service=monasca-api" 163 | depends_on: 164 | - influxdb 165 | - mysql 166 | - zookeeper 167 | - kafka 168 | - memcached 169 | ports: 170 | - "8070:8070" 171 | 172 | monasca-persister: 173 | hostname: docker-host 174 | image: monasca/persister:${MON_PERSISTER_VERSION} 175 | restart: unless-stopped 176 | environment: 177 | LOG_LEVEL: "INFO" 178 | KAFKA_LEGACY_CLIENT_ENABLED: "false" 179 | LOGSTASH_FIELDS: "service=monasca-persister" 180 | depends_on: 181 | - monasca 182 | - influxdb 183 | - zookeeper 184 | - kafka 185 | 186 | thresh: 187 | hostname: docker-host 188 | image: monasca/thresh:${MON_THRESH_VERSION} 189 | restart: unless-stopped 190 | environment: 191 | NO_STORM_CLUSTER: "true" 192 | WORKER_MAX_HEAP_MB: "768" 193 | LOG_LEVEL: "INFO" 194 | LOGSTASH_FIELDS: "service=monasca-thresh" 195 | depends_on: 196 | - zookeeper 197 | - kafka 198 | 199 | monasca-notification: 200 | hostname: docker-host 201 | image: monasca/notification:${MON_NOTIFICATION_VERSION} 202 | restart: unless-stopped 203 | environment: 204 | NF_PLUGINS: ${NF_PLUGINS} 205 | NF_EMAIL_SERVER: ${NF_EMAIL_SERVER} 206 | NF_EMAIL_PORT: ${NF_EMAIL_PORT} 207 | NF_EMAIL_USER: ${NF_EMAIL_USER} 208 | NF_EMAIL_PASSWORD: ${NF_EMAIL_PASSWORD} 209 | NF_EMAIL_FROM_ADDR: ${NF_EMAIL_FROM_ADDR} 210 | NF_EMAIL_GRAFANA_URL: ${NF_EMAIL_GRAFANA_URL} 211 | NF_WEBHOOK_TIMEOUT: ${NF_WEBHOOK_TIMEOUT} 212 | NF_PAGERDUTY_TIMEOUT: ${NF_PAGERDUTY_TIMEOUT} 213 | NF_SLACK_TIMEOUT: ${NF_SLACK_TIMEOUT} 214 | NF_SLACK_CERTS: ${NF_SLACK_CERTS} 215 | NF_SLACK_INSECURE: ${NF_SLACK_INSECURE} 216 | NF_SLACK_PROXY: ${NF_SLACK_PROXY} 217 | MONASCASTATSD_LOG_LEVEL: "CRITICAL" 218 | LOGSTASH_FIELDS: "service=monasca-notification" 219 | depends_on: 220 | - monasca 221 | - zookeeper 222 | - kafka 223 | - mysql 224 | 225 | monasca-statsd: 226 | image: monasca/statsd:${MON_STATSD_VERSION} 227 | restart: unless-stopped 228 | environment: 229 | FORWARDER_URL: http://agent-forwarder:17123 230 | LOG_LEVEL: WARN 231 | LOGSTASH_FIELDS: "service=monasca-statsd" 232 | 233 | grafana: 234 | hostname: docker-host 235 | image: monasca/grafana:${MON_GRAFANA_VERSION} 236 | restart: unless-stopped 237 | environment: 238 | GF_AUTH_BASIC_ENABLED: "true" 239 | GF_USERS_ALLOW_SIGN_UP: "true" 240 | GF_USERS_ALLOW_ORG_CREATE: "true" 241 | GF_AUTH_KEYSTONE_ENABLED: "true" 242 | GF_AUTH_KEYSTONE_AUTH_URL: ${MON_KEYSTONE_URL} 243 | GRAFANA_ADMIN_USERNAME: ${MON_GRAFANA_ADMIN_USER} 244 | GRAFANA_ADMIN_PASSWORD: ${MON_GRAFANA_ADMIN_PASSWORD} 245 | GF_SECURITY_ADMIN_USER: ${MON_GRAFANA_ADMIN_USER} 246 | GF_SECURITY_ADMIN_PASSWORD: ${MON_GRAFANA_ADMIN_PASSWORD} 247 | GF_AUTH_KEYSTONE_VERIFY_SSL_CERT: "false" 248 | GF_AUTH_KEYSTONE_DEFAULT_DOMAIN: "Default" 249 | LOGSTASH_FIELDS: "service=grafana" 250 | volumes: 251 | - "${MON_DOCKER_VOL_ROOT}/grafana/data:/var/lib/grafana/data:Z" 252 | ports: 253 | - "3000:3000" 254 | depends_on: 255 | - monasca 256 | 257 | grafana-init: 258 | hostname: docker-host 259 | image: monasca/grafana-init:${MON_GRAFANA_INIT_VERSION} 260 | environment: 261 | GRAFANA_URL: "${HORIZON_URL}/grafana" 262 | GRAFANA_ADMIN_USERNAME: ${MON_GRAFANA_ADMIN_USER} 263 | GRAFANA_ADMIN_PASSWORD: ${MON_GRAFANA_ADMIN_PASSWORD} 264 | DATASOURCE_URL: "${HORIZON_URL}${HORIZON_PATH}/monitoring/proxy" 265 | DATASOURCE_ACCESS_MODE: "direct" 266 | DATASOURCE_AUTH: "Horizon" 267 | LOGSTASH_FIELDS: "service=grafana-init" 268 | depends_on: 269 | - grafana 270 | -------------------------------------------------------------------------------- /elasticsearch-init/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM appropriate/curl 2 | 3 | ENV ELASTICSEARCH_URI=elasticsearch:9200 \ 4 | ELASTICSEARCH_TIMEOUT=60 5 | 6 | RUN mkdir elasticsearch-templates 7 | COPY elasticsearch-templates elasticsearch-templates 8 | 9 | COPY wait-for.sh upload.sh / 10 | RUN chmod +x /wait-for.sh /upload.sh 11 | 12 | ENTRYPOINT /wait-for.sh ${ELASTICSEARCH_URI} && /upload.sh 13 | -------------------------------------------------------------------------------- /elasticsearch-init/README.md: -------------------------------------------------------------------------------- 1 | elasticsearch-init 2 | ================== 3 | 4 | A container for loading the templates to ElasticSearch. 5 | 6 | Tags 7 | ---- 8 | 9 | **elasticsearch-init** uses simple [SemVer][3] tags as follows: 10 | 11 | * `master` 12 | 13 | Usage 14 | ----- 15 | 16 | **elasticsearch-init** leverages the [Docker volumes][1]. Each template, 17 | that image is supposed to upload to [ElasticSearch][2], is represented as single file 18 | mounted to ```/templates/``` directory inside the container. 19 | Another point, to keep in mind, is that template uploading requires also a **template name**. 20 | That bit is equal to the filename that holds the template. For instance: 21 | 22 | ```docker run -v ./tpls/logs.json:/templates/logs -l elasticsearch monasca/elasticsearch-init``` 23 | 24 | means that: 25 | 26 | * content of ```/template/logs``` will be uploaded to [ElasticSearch][2] 27 | * template name will be ```logs``` 28 | 29 | Configuration 30 | ------------- 31 | 32 | A number of environment variables can be passed to the container: 33 | 34 | | Variable | Default | Description | 35 | |---------------------------|--------------|-----------------------------------| 36 | | `ELASTICSEARCH_URI` | `elasticsearch:9200` | URI to connect to ES | 37 | | `ELASTICSEARCH_TIMEOUT` | `60` | How long to wait for ElasticSearch connection | 38 | 39 | [1]: https://docs.docker.com/engine/tutorials/dockervolumes/ 40 | [2]: https://hub.docker.com/_/elasticsearch/ 41 | [3]: http://semver.org/ 42 | -------------------------------------------------------------------------------- /elasticsearch-init/build.yml: -------------------------------------------------------------------------------- 1 | repository: monasca/elasticsearch-init 2 | variants: 3 | - tag: master 4 | 5 | -------------------------------------------------------------------------------- /elasticsearch-init/elasticsearch-templates/logs.template: -------------------------------------------------------------------------------- 1 | { 2 | "order" : 0, 3 | "template" : "*", 4 | "settings" : { 5 | "index.refresh_interval" : "5s", 6 | "index" : { 7 | "analysis": { 8 | "analyzer" : { 9 | "no_token" : { 10 | "type" : "custom", 11 | "tokenizer" : "keyword", 12 | "filter" : "lowercase" 13 | } 14 | } 15 | } 16 | } 17 | }, 18 | "mappings" : { 19 | "_default_" : { 20 | "dynamic_templates" : [ { 21 | "message_field" : { 22 | "mapping" : { 23 | "index" : "analyzed", 24 | "omit_norms" : true, 25 | "type" : "string" 26 | }, 27 | "match_mapping_type" : "string", 28 | "match" : "message" 29 | } 30 | }, { 31 | "string_fields" : { 32 | "mapping" : { 33 | "analyzer" : "no_token", 34 | "omit_norms" : true, 35 | "type" : "string" 36 | }, 37 | "match_mapping_type" : "string", 38 | "match" : "*" 39 | } 40 | } ], 41 | "properties" : { 42 | "geoip" : { 43 | "dynamic" : true, 44 | "properties" : { 45 | "location" : { 46 | "type" : "geo_point" 47 | } 48 | }, 49 | "type" : "object" 50 | }, 51 | "@version" : { 52 | "index" : "not_analyzed", 53 | "type" : "string" 54 | } 55 | }, 56 | "_all" : { 57 | "enabled" : true, 58 | "omit_norms" : true 59 | } 60 | } 61 | }, 62 | "aliases" : { } 63 | } 64 | -------------------------------------------------------------------------------- /elasticsearch-init/upload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # shellcheck shell=dash 3 | 4 | TPL_DIR=/elasticsearch-templates 5 | 6 | _get_tpl_name_from_file() { 7 | local tpl=$1 8 | local tpl_name 9 | tpl_name=$(basename "$tpl") 10 | echo "$tpl_name" 11 | } 12 | 13 | 14 | if [ ! -d $TPL_DIR ]; then 15 | echo "ERROR: directory $TPL_DIR not found" 16 | exit 1 17 | fi 18 | 19 | TPLS=$(ls $TPL_DIR) 20 | if [ -z "$TPLS" ]; then 21 | echo "ERROR: no templates found" 22 | exit 2 23 | fi 24 | 25 | for template in $TPLS; do 26 | echo "Handling template file $template" 27 | tpl_name=$(_get_tpl_name_from_file "$template") 28 | 29 | curl -XPUT --retry 2 --retry-delay 2 "$ELASTICSEARCH_URI"/_template/"${tpl_name}" -d @$TPL_DIR/"$template" 30 | returnCode=$? 31 | if test "$returnCode" != "0"; then 32 | echo "ERROR: curl to elasticsearch API failed with: $returnCode" 33 | exit 3 34 | fi 35 | done 36 | -------------------------------------------------------------------------------- /elasticsearch-init/wait-for.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | : "${SLEEP_LENGTH:=2}" 4 | 5 | wait_for() { 6 | echo "Waiting for $1 to listen on $2..." 7 | while ! nc -z "$1" "$2"; do echo sleeping; sleep "$SLEEP_LENGTH"; done 8 | } 9 | 10 | for var in "$@" 11 | do 12 | host=${var%:*} 13 | port=${var#*:} 14 | wait_for "$host" "$port" 15 | done 16 | -------------------------------------------------------------------------------- /elasticsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/elasticsearch/elasticsearch-oss:7.3.0 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/ 4 | COPY elasticsearch.yml /usr/share/elasticsearch/config/ 5 | COPY jvm.options /usr/share/elasticsearch/config/ 6 | COPY log4j2.properties /usr/share/elasticsearch/config/ 7 | 8 | USER root 9 | RUN chown elasticsearch:root /usr/share/elasticsearch/config/elasticsearch.yml 10 | RUN chown elasticsearch:root /usr/share/elasticsearch/config/jvm.options 11 | RUN chown elasticsearch:root /usr/share/elasticsearch/config/log4j2.properties 12 | RUN chmod 777 /usr/local/bin/docker-entrypoint.sh 13 | -------------------------------------------------------------------------------- /elasticsearch/build.yml: -------------------------------------------------------------------------------- 1 | repository: monasca/elasticsearch 2 | variants: 3 | - tag: 7.3.0-master 4 | -------------------------------------------------------------------------------- /elasticsearch/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Files created by Elasticsearch should always be group writable too 5 | umask 0002 6 | 7 | run_as_other_user_if_needed() { 8 | if [[ "$(id -u)" == "0" ]]; then 9 | # If running as root, drop to specified UID and run command 10 | exec chroot --userspec=1000 / "${@}" 11 | else 12 | # Either we are running in Openshift with random uid and are a member of the root group 13 | # or with a custom --user 14 | exec "${@}" 15 | fi 16 | } 17 | 18 | # Allow user specify custom CMD, maybe bin/elasticsearch itself 19 | # for example to directly specify `-E` style parameters for elasticsearch on k8s 20 | # or simply to run /bin/bash to check the image 21 | if [[ "$1" != "eswrapper" ]]; then 22 | if [[ "$(id -u)" == "0" && $(basename "$1") == "elasticsearch" ]]; then 23 | # centos:7 chroot doesn't have the `--skip-chdir` option and 24 | # changes our CWD. 25 | # Rewrite CMD args to replace $1 with `elasticsearch` explicitly, 26 | # so that we are backwards compatible with the docs 27 | # from the previous Elasticsearch versions<6 28 | # and configuration option D: 29 | # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink 30 | # Without this, user could specify `elasticsearch -E x.y=z` but 31 | # `bin/elasticsearch -E x.y=z` would not work. 32 | set -- "elasticsearch" "${@:2}" 33 | # Use chroot to switch to UID 1000 34 | exec chroot --userspec=1000 / "$@" 35 | else 36 | # User probably wants to run something else, like /bin/bash, with another uid forced (Openshift?) 37 | exec "$@" 38 | fi 39 | fi 40 | 41 | # Parse Docker env vars to customize Elasticsearch 42 | # 43 | # e.g. Setting the env var cluster.name=testcluster 44 | # 45 | # will cause Elasticsearch to be invoked with -Ecluster.name=testcluster 46 | # 47 | # see https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html#_setting_default_settings 48 | 49 | declare -a es_opts 50 | 51 | while IFS='=' read -r envvar_key envvar_value 52 | do 53 | # Elasticsearch settings need to have at least two dot separated lowercase 54 | # words, e.g. `cluster.name`, except for `processors` which we handle 55 | # specially 56 | if [[ "$envvar_key" =~ ^[a-z0-9_]+\.[a-z0-9_]+ || "$envvar_key" == "processors" ]]; then 57 | # shellcheck disable=SC2236 58 | if [[ ! -z $envvar_value ]]; then 59 | es_opt="-E${envvar_key}=${envvar_value}" 60 | es_opts+=("${es_opt}") 61 | fi 62 | fi 63 | done < <(env) 64 | 65 | # The virtual file /proc/self/cgroup should list the current cgroup 66 | # membership. For each hierarchy, you can follow the cgroup path from 67 | # this file to the cgroup filesystem (usually /sys/fs/cgroup/) and 68 | # introspect the statistics for the cgroup for the given 69 | # hierarchy. Alas, Docker breaks this by mounting the container 70 | # statistics at the root while leaving the cgroup paths as the actual 71 | # paths. Therefore, Elasticsearch provides a mechanism to override 72 | # reading the cgroup path from /proc/self/cgroup and instead uses the 73 | # cgroup path defined the JVM system property 74 | # es.cgroups.hierarchy.override. Therefore, we set this value here so 75 | # that cgroup statistics are available for the container this process 76 | # will run in. 77 | export ES_JAVA_OPTS="-Des.cgroups.hierarchy.override=/ $ES_JAVA_OPTS" 78 | 79 | if [[ -f bin/elasticsearch-users ]]; then 80 | # Check for the ELASTIC_PASSWORD environment variable to set the 81 | # bootstrap password for Security. 82 | # 83 | # This is only required for the first node in a cluster with Security 84 | # enabled, but we have no way of knowing which node we are yet. We'll just 85 | # honor the variable if it's present. 86 | if [[ -n "$ELASTIC_PASSWORD" ]]; then 87 | [[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (run_as_other_user_if_needed elasticsearch-keystore create) 88 | if ! (run_as_other_user_if_needed elasticsearch-keystore list | grep -q '^bootstrap.password$'); then 89 | (run_as_other_user_if_needed echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password') 90 | fi 91 | fi 92 | fi 93 | 94 | if [[ "$(id -u)" == "0" ]]; then 95 | chown -R 1000:1000 /usr/share/elasticsearch/{data,logs,backup} 96 | fi 97 | 98 | run_as_other_user_if_needed /usr/share/elasticsearch/bin/elasticsearch "${es_opts[@]}" 99 | -------------------------------------------------------------------------------- /elasticsearch/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | network.host: ${HOSTNAME} 2 | network.bind_host: 0.0.0.0 3 | cluster.name: monasca 4 | node.name: elasticsearch_node_1 5 | 6 | # Directory inside container for storing backups 7 | path.repo: ["/usr/share/elasticsearch/backup"] 8 | 9 | #For details see: 10 | #https://www.elastic.co/guide/en/elasticsearch/reference/7.3/setup.html 11 | bootstrap.memory_lock: true 12 | -------------------------------------------------------------------------------- /elasticsearch/jvm.options: -------------------------------------------------------------------------------- 1 | ## JVM configuration 2 | 3 | ################################################################ 4 | ## IMPORTANT: JVM heap size 5 | ################################################################ 6 | ## 7 | ## You should always set the min and max JVM heap 8 | ## size to the same value. For example, to set 9 | ## the heap to 4 GB, set: 10 | ## 11 | ## -Xms4g 12 | ## -Xmx4g 13 | ## 14 | ## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html 15 | ## for more information 16 | ## 17 | ################################################################ 18 | 19 | # Xms represents the initial size of total heap space 20 | # Xmx represents the maximum size of total heap space 21 | 22 | # Commented to use it from docker env vars 23 | #-Xms1g 24 | #-Xmx1g 25 | 26 | ################################################################ 27 | ## Expert settings 28 | ################################################################ 29 | ## 30 | ## All settings below this section are considered 31 | ## expert settings. Don't tamper with them unless 32 | ## you understand what you are doing 33 | ## 34 | ################################################################ 35 | 36 | ## GC configuration 37 | -XX:+UseConcMarkSweepGC 38 | -XX:CMSInitiatingOccupancyFraction=75 39 | -XX:+UseCMSInitiatingOccupancyOnly 40 | 41 | ## G1GC Configuration 42 | # NOTE: G1GC is only supported on JDK version 10 or later. 43 | # To use G1GC uncomment the lines below. 44 | # 10-:-XX:-UseConcMarkSweepGC 45 | # 10-:-XX:-UseCMSInitiatingOccupancyOnly 46 | # 10-:-XX:+UseG1GC 47 | # 10-:-XX:InitiatingHeapOccupancyPercent=75 48 | 49 | ## DNS cache policy 50 | # cache ttl in seconds for positive DNS lookups noting that this overrides the 51 | # JDK security property networkaddress.cache.ttl; set to -1 to cache forever 52 | -Des.networkaddress.cache.ttl=60 53 | # cache ttl in seconds for negative DNS lookups noting that this overrides the 54 | # JDK security property networkaddress.cache.negative ttl; set to -1 to cache 55 | # forever 56 | -Des.networkaddress.cache.negative.ttl=10 57 | 58 | ## optimizations 59 | 60 | # pre-touch memory pages used by the JVM during initialization 61 | -XX:+AlwaysPreTouch 62 | 63 | ## basic 64 | 65 | # explicitly set the stack size 66 | -Xss1m 67 | 68 | # set to headless, just in case 69 | -Djava.awt.headless=true 70 | 71 | # ensure UTF-8 encoding by default (e.g. filenames) 72 | -Dfile.encoding=UTF-8 73 | 74 | # use our provided JNA always versus the system one 75 | -Djna.nosys=true 76 | 77 | # turn off a JDK optimization that throws away stack traces for common 78 | # exceptions because stack traces are important for debugging 79 | -XX:-OmitStackTraceInFastThrow 80 | 81 | # flags to configure Netty 82 | -Dio.netty.noUnsafe=true 83 | -Dio.netty.noKeySetOptimization=true 84 | -Dio.netty.recycler.maxCapacityPerThread=0 85 | 86 | # log4j 2 87 | -Dlog4j.shutdownHookEnabled=false 88 | -Dlog4j2.disable.jmx=true 89 | 90 | -Djava.io.tmpdir=${ES_TMPDIR} 91 | 92 | ## heap dumps 93 | 94 | # generate a heap dump when an allocation from the Java heap fails 95 | # heap dumps are created in the working directory of the JVM 96 | -XX:+HeapDumpOnOutOfMemoryError 97 | 98 | # specify an alternative path for heap dumps; ensure the directory exists and 99 | # has sufficient space 100 | -XX:HeapDumpPath=data 101 | 102 | # specify an alternative path for JVM fatal error logs 103 | -XX:ErrorFile=logs/hs_err_pid%p.log 104 | 105 | ## JDK 8 GC logging 106 | 107 | 8:-XX:+PrintGCDetails 108 | 8:-XX:+PrintGCDateStamps 109 | 8:-XX:+PrintTenuringDistribution 110 | 8:-XX:+PrintGCApplicationStoppedTime 111 | 8:-Xloggc:logs/gc.log 112 | 8:-XX:+UseGCLogFileRotation 113 | 8:-XX:NumberOfGCLogFiles=32 114 | 8:-XX:GCLogFileSize=64m 115 | 116 | # JDK 9+ GC logging 117 | 9-:-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m 118 | # due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise 119 | # time/date parsing will break in an incompatible way for some date patterns and locals 120 | 9-:-Djava.locale.providers=COMPAT 121 | -------------------------------------------------------------------------------- /elasticsearch/log4j2.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | 3 | # log action execution errors for easier debugging 4 | logger.action.name = org.elasticsearch.action 5 | logger.action.level = debug 6 | 7 | appender.rolling.type = Console 8 | appender.rolling.name = rolling 9 | appender.rolling.layout.type = PatternLayout 10 | appender.rolling.layout.pattern = %d{asctime} %-5p [%-25c{1.}] [%node_name]%marker %.-10000m%n 11 | 12 | rootLogger.level = info 13 | rootLogger.appenderRef.rolling.ref = rolling 14 | 15 | appender.deprecation_rolling.type = Console 16 | appender.deprecation_rolling.name = deprecation_rolling 17 | appender.deprecation_rolling.layout.type = ESJsonLayout 18 | appender.deprecation_rolling.layout.type_name = deprecation 19 | 20 | logger.deprecation.name = org.elasticsearch.deprecation 21 | logger.deprecation.level = warn 22 | logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling 23 | logger.deprecation.additivity = false 24 | 25 | appender.index_search_slowlog_rolling.type = Console 26 | appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling 27 | appender.index_search_slowlog_rolling.layout.type = ESJsonLayout 28 | appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog 29 | 30 | logger.index_search_slowlog_rolling.name = index.search.slowlog 31 | logger.index_search_slowlog_rolling.level = trace 32 | logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling 33 | logger.index_search_slowlog_rolling.additivity = false 34 | 35 | appender.index_indexing_slowlog_rolling.type = Console 36 | appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling 37 | appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout 38 | appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog 39 | 40 | logger.index_indexing_slowlog.name = index.indexing.slowlog.index 41 | logger.index_indexing_slowlog.level = trace 42 | logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling 43 | logger.index_indexing_slowlog.additivity = false 44 | -------------------------------------------------------------------------------- /grafana-init/Dockerfile: -------------------------------------------------------------------------------- 1 | from alpine:3.13 2 | 3 | env LOG_LEVEL=INFO \ 4 | GRAFANA_URL=http://grafana:3000 \ 5 | DATASOURCE_TYPE=monasca \ 6 | DATASOURCE_URL=http://monasca:8070/ \ 7 | DATASOURCE_ACCESS_MODE=proxy \ 8 | DATASOURCE_AUTH=Keystone \ 9 | DASHBOARDS_DIR=/dashboards.d 10 | 11 | run apk add --no-cache python3 py3-requests 12 | copy grafana.py /grafana.py 13 | copy dashboards.d/ /dashboards.d/ 14 | cmd ["python3", "/grafana.py"] 15 | -------------------------------------------------------------------------------- /grafana-init/README.md: -------------------------------------------------------------------------------- 1 | grafana-init Dockerfile 2 | ======================= 3 | 4 | This image runs a script to initialize a Grafana instance with a set of default 5 | resources, like a datasource and JSON dashboards. For more information on the 6 | Monasca project, see [the wiki][1]. 7 | 8 | Sources: [grafana-init][2] · [Dockerfile][3] · [monasca-docker][4] 9 | 10 | Tags 11 | ---- 12 | 13 | Images in this repository are tagged as follows: 14 | 15 | * `master`: refers to the latest stable point release 16 | 17 | Usage 18 | ----- 19 | 20 | This image requires a running Grafana instance. The [`monasca/grafana`][5] image 21 | is recommended for use with Monasca, but any recent Grafana server should work 22 | (albeit with some minor modifications). 23 | 24 | To use with defaults, run: 25 | 26 | docker run --link grafana monasca/grafana-init:latest 27 | 28 | The image will connect to a Grafana server at the default address 29 | (`http://grafana:3000/`), create a Monasca datasource, and import several 30 | default dashboards. 31 | 32 | Note that this script will not run successfully on a Grafana instance that has 33 | already been initialized. 34 | 35 | Configuration 36 | ------------- 37 | 38 | | Variable | Default | Description | 39 | |--------------------|------------------------|---------------------------------| 40 | | `LOG_LEVEL` | `INFO` | Logging level, e.g. `DEBUG` | 41 | | `GRAFANA_URL` | `http://grafana:3000` | Location of Grafana server | 42 | | `DATASOURCE_TYPE` | `monasca` | Agent Keystone user domain | 43 | | `GRAFANA_ADMIN_USERNAME` | `admin` | Agent Keystone admin username | 44 | | `GRAFANA_ADMIN_PASSWORD` | `password` | Agent Keystone admin username | 45 | | `DATASOURCE_URL` | `http://monasca:8070/` | Agent Keystone project name | 46 | | `DATASOURCE_ACCESS_MODE` | `proxy` | Grafana access mode string | 47 | | `DATASOURCE_AUTH` | `Keystone` | Grafana authentication option (`Keystone`, `Horizon`, `Token`) | 48 | | `DATASOURCE_AUTH_TOKEN` | `` | Keystone token for authentication (for use when `DATASOURCE_AUTH` is set to `Token`) | 49 | | `DASHBOARDS_DIR` | `/dashboards.d` | Directory to scan for .json dashboards | 50 | 51 | Note that the only datasource type supported at the moment is `monasca`. Other 52 | datasources should be simple to implement as needed by adding logic to 53 | `create_datasource_payload()` in [`grafana.py`][6]. 54 | 55 | ### Grafana users 56 | 57 | `GRAFANA_ADMIN_USERNAME` and `GRAFANA_ADMIN_PASSWORD` 58 | 59 | Grafan admin user is need for uploading dashboards. 60 | 61 | 62 | ### Custom Dashboards 63 | 64 | This image comes with a default set of Monasca-specific dashboards, but these 65 | can be overridden. To do so, mount a directory containing `.json` files to 66 | `/dashboards.d` (path configurable via `DASHBOARDS_DIR`). 67 | 68 | The directory will be scanned and each `.json` file will be imported. Files are 69 | sorted before importing, so filenames can be prefixed, e.g. `01-first.json`, 70 | `99-last.json`, to help ensure proper ordering in the Grafana UI, 71 | 72 | 73 | [1]: https://wiki.openstack.org/wiki/Monasca 74 | [2]: https://github.com/monasca/monasca-docker/blob/master/grafana-init/ 75 | [3]: https://github.com/monasca/monasca-docker/blob/master/grafana-init/Dockerfile 76 | [4]: https://github.com/monasca/monasca-docker/ 77 | [5]: https://hub.docker.com/r/monasca/grafana/ 78 | [6]: https://github.com/monasca/monasca-docker/blob/master/grafana-init/grafana.py 79 | -------------------------------------------------------------------------------- /grafana-init/build.yml: -------------------------------------------------------------------------------- 1 | repository: monasca/grafana-init 2 | variants: 3 | - tag: master 4 | -------------------------------------------------------------------------------- /grafana-init/dashboards.d/07-nodes.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 5, 19 | "iteration": 1617966596131, 20 | "links": [], 21 | "panels": [ 22 | { 23 | "datasource": null, 24 | "description": "", 25 | "fieldConfig": { 26 | "defaults": { 27 | "color": { 28 | "mode": "thresholds" 29 | }, 30 | "custom": {}, 31 | "mappings": [], 32 | "thresholds": { 33 | "mode": "absolute", 34 | "steps": [ 35 | { 36 | "color": "red", 37 | "value": null 38 | }, 39 | { 40 | "color": "#EAB839", 41 | "value": 22 42 | }, 43 | { 44 | "color": "green", 45 | "value": 23 46 | } 47 | ] 48 | }, 49 | "unit": "short" 50 | }, 51 | "overrides": [] 52 | }, 53 | "gridPos": { 54 | "h": 4, 55 | "w": 3, 56 | "x": 0, 57 | "y": 0 58 | }, 59 | "id": 20, 60 | "options": { 61 | "colorMode": "value", 62 | "graphMode": "none", 63 | "justifyMode": "auto", 64 | "orientation": "auto", 65 | "reduceOptions": { 66 | "calcs": [ 67 | "lastNotNull" 68 | ], 69 | "fields": "", 70 | "values": false 71 | }, 72 | "text": {}, 73 | "textMode": "auto" 74 | }, 75 | "pluginVersion": "7.4.2", 76 | "targets": [ 77 | { 78 | "aggregator": "avg", 79 | "dimensions": [], 80 | "error": "", 81 | "group": false, 82 | "metric": "container.running_count", 83 | "period": "$average", 84 | "refId": "A" 85 | } 86 | ], 87 | "title": "Amount of containers", 88 | "type": "stat" 89 | }, 90 | { 91 | "datasource": null, 92 | "fieldConfig": { 93 | "defaults": { 94 | "color": { 95 | "mode": "thresholds" 96 | }, 97 | "custom": {}, 98 | "mappings": [], 99 | "thresholds": { 100 | "mode": "absolute", 101 | "steps": [ 102 | { 103 | "color": "green", 104 | "value": null 105 | }, 106 | { 107 | "color": "red", 108 | "value": 90 109 | } 110 | ] 111 | }, 112 | "unit": "percent" 113 | }, 114 | "overrides": [] 115 | }, 116 | "gridPos": { 117 | "h": 4, 118 | "w": 3, 119 | "x": 3, 120 | "y": 0 121 | }, 122 | "id": 26, 123 | "options": { 124 | "reduceOptions": { 125 | "calcs": [ 126 | "lastNotNull" 127 | ], 128 | "fields": "", 129 | "values": false 130 | }, 131 | "showThresholdLabels": false, 132 | "showThresholdMarkers": true, 133 | "text": {} 134 | }, 135 | "pluginVersion": "7.4.2", 136 | "targets": [ 137 | { 138 | "aggregator": "avg", 139 | "alias": "", 140 | "dimensions": [ 141 | { 142 | "key": "hostname", 143 | "value": "docker-host" 144 | } 145 | ], 146 | "error": "", 147 | "group": false, 148 | "metric": "mem.used_perc", 149 | "period": "$average", 150 | "refId": "A" 151 | } 152 | ], 153 | "title": "MEM", 154 | "type": "gauge" 155 | }, 156 | { 157 | "datasource": null, 158 | "fieldConfig": { 159 | "defaults": { 160 | "color": { 161 | "mode": "thresholds" 162 | }, 163 | "custom": {}, 164 | "mappings": [], 165 | "thresholds": { 166 | "mode": "absolute", 167 | "steps": [ 168 | { 169 | "color": "green", 170 | "value": null 171 | }, 172 | { 173 | "color": "red", 174 | "value": 90 175 | } 176 | ] 177 | }, 178 | "unit": "percent" 179 | }, 180 | "overrides": [] 181 | }, 182 | "gridPos": { 183 | "h": 4, 184 | "w": 3, 185 | "x": 6, 186 | "y": 0 187 | }, 188 | "id": 28, 189 | "options": { 190 | "reduceOptions": { 191 | "calcs": [ 192 | "lastNotNull" 193 | ], 194 | "fields": "", 195 | "values": false 196 | }, 197 | "showThresholdLabels": false, 198 | "showThresholdMarkers": true, 199 | "text": {} 200 | }, 201 | "pluginVersion": "7.4.2", 202 | "targets": [ 203 | { 204 | "aggregator": "avg", 205 | "dimensions": [ 206 | { 207 | "key": "hostname", 208 | "value": "docker-host" 209 | }, 210 | { 211 | "key": "device", 212 | "value": "overlay" 213 | } 214 | ], 215 | "error": "", 216 | "metric": "fs.usage_perc", 217 | "period": "$average", 218 | "refId": "A" 219 | } 220 | ], 221 | "title": "Overlay space used", 222 | "type": "gauge" 223 | }, 224 | { 225 | "collapsed": false, 226 | "datasource": null, 227 | "gridPos": { 228 | "h": 1, 229 | "w": 24, 230 | "x": 0, 231 | "y": 4 232 | }, 233 | "id": 9, 234 | "panels": [], 235 | "repeat": null, 236 | "title": "CMM Containers", 237 | "type": "row" 238 | }, 239 | { 240 | "aliasColors": {}, 241 | "bars": false, 242 | "dashLength": 10, 243 | "dashes": false, 244 | "datasource": null, 245 | "decimals": null, 246 | "fieldConfig": { 247 | "defaults": { 248 | "color": {}, 249 | "custom": {}, 250 | "thresholds": { 251 | "mode": "absolute", 252 | "steps": [] 253 | }, 254 | "unit": "percent" 255 | }, 256 | "overrides": [] 257 | }, 258 | "fill": 1, 259 | "fillGradient": 0, 260 | "gridPos": { 261 | "h": 8, 262 | "w": 24, 263 | "x": 0, 264 | "y": 5 265 | }, 266 | "hiddenSeries": false, 267 | "id": 16, 268 | "legend": { 269 | "alignAsTable": true, 270 | "avg": false, 271 | "current": true, 272 | "hideEmpty": false, 273 | "hideZero": false, 274 | "max": true, 275 | "min": false, 276 | "rightSide": true, 277 | "show": true, 278 | "sideWidth": 380, 279 | "sort": "current", 280 | "sortDesc": true, 281 | "total": false, 282 | "values": true 283 | }, 284 | "lines": true, 285 | "linewidth": 1, 286 | "nullPointMode": "null", 287 | "options": { 288 | "alertThreshold": true 289 | }, 290 | "percentage": false, 291 | "pluginVersion": "7.4.2", 292 | "pointradius": 0.5, 293 | "points": true, 294 | "renderer": "flot", 295 | "seriesOverrides": [], 296 | "spaceLength": 10, 297 | "stack": false, 298 | "steppedLine": false, 299 | "targets": [ 300 | { 301 | "aggregator": "avg", 302 | "alias": "@image", 303 | "dimensions": [ 304 | { 305 | "key": "image", 306 | "value": "$all" 307 | } 308 | ], 309 | "error": "", 310 | "group": true, 311 | "metric": "container.cpu.utilization_perc", 312 | "period": "$average", 313 | "refId": "A" 314 | } 315 | ], 316 | "thresholds": [], 317 | "timeFrom": null, 318 | "timeRegions": [], 319 | "timeShift": null, 320 | "title": "Container utilization CPU (%)", 321 | "tooltip": { 322 | "shared": true, 323 | "sort": 1, 324 | "value_type": "individual" 325 | }, 326 | "type": "graph", 327 | "xaxis": { 328 | "buckets": null, 329 | "mode": "time", 330 | "name": null, 331 | "show": true, 332 | "values": [] 333 | }, 334 | "yaxes": [ 335 | { 336 | "$$hashKey": "object:875", 337 | "format": "percent", 338 | "label": null, 339 | "logBase": 1, 340 | "max": null, 341 | "min": null, 342 | "show": true 343 | }, 344 | { 345 | "$$hashKey": "object:876", 346 | "format": "short", 347 | "label": null, 348 | "logBase": 1, 349 | "max": null, 350 | "min": null, 351 | "show": true 352 | } 353 | ], 354 | "yaxis": { 355 | "align": false, 356 | "alignLevel": null 357 | } 358 | }, 359 | { 360 | "aliasColors": {}, 361 | "bars": false, 362 | "dashLength": 10, 363 | "dashes": false, 364 | "datasource": null, 365 | "fieldConfig": { 366 | "defaults": { 367 | "custom": {}, 368 | "unit": "percent" 369 | }, 370 | "overrides": [] 371 | }, 372 | "fill": 1, 373 | "fillGradient": 0, 374 | "gridPos": { 375 | "h": 8, 376 | "w": 24, 377 | "x": 0, 378 | "y": 13 379 | }, 380 | "hiddenSeries": false, 381 | "id": 18, 382 | "legend": { 383 | "alignAsTable": true, 384 | "avg": false, 385 | "current": true, 386 | "max": true, 387 | "min": false, 388 | "rightSide": true, 389 | "show": true, 390 | "sideWidth": 380, 391 | "sort": "current", 392 | "sortDesc": true, 393 | "total": false, 394 | "values": true 395 | }, 396 | "lines": true, 397 | "linewidth": 1, 398 | "nullPointMode": "null", 399 | "options": { 400 | "alertThreshold": true 401 | }, 402 | "percentage": false, 403 | "pluginVersion": "7.4.2", 404 | "pointradius": 0.5, 405 | "points": true, 406 | "renderer": "flot", 407 | "seriesOverrides": [], 408 | "spaceLength": 10, 409 | "stack": false, 410 | "steppedLine": false, 411 | "targets": [ 412 | { 413 | "aggregator": "avg", 414 | "alias": "@image", 415 | "dimensions": [ 416 | { 417 | "key": "image", 418 | "value": "$all" 419 | } 420 | ], 421 | "error": "", 422 | "group": true, 423 | "metric": "container.mem.used_perc", 424 | "period": "$average", 425 | "refId": "A" 426 | } 427 | ], 428 | "thresholds": [], 429 | "timeFrom": null, 430 | "timeRegions": [], 431 | "timeShift": null, 432 | "title": "Container utilization MEM (%)", 433 | "tooltip": { 434 | "shared": true, 435 | "sort": 0, 436 | "value_type": "individual" 437 | }, 438 | "type": "graph", 439 | "xaxis": { 440 | "buckets": null, 441 | "mode": "time", 442 | "name": null, 443 | "show": true, 444 | "values": [] 445 | }, 446 | "yaxes": [ 447 | { 448 | "$$hashKey": "object:1309", 449 | "format": "percent", 450 | "label": null, 451 | "logBase": 1, 452 | "max": null, 453 | "min": null, 454 | "show": true 455 | }, 456 | { 457 | "$$hashKey": "object:1310", 458 | "format": "short", 459 | "label": null, 460 | "logBase": 1, 461 | "max": null, 462 | "min": null, 463 | "show": true 464 | } 465 | ], 466 | "yaxis": { 467 | "align": false, 468 | "alignLevel": null 469 | } 470 | }, 471 | { 472 | "aliasColors": {}, 473 | "bars": false, 474 | "dashLength": 10, 475 | "dashes": false, 476 | "datasource": null, 477 | "fieldConfig": { 478 | "defaults": { 479 | "custom": {}, 480 | "unit": "binBps" 481 | }, 482 | "overrides": [] 483 | }, 484 | "fill": 1, 485 | "fillGradient": 0, 486 | "gridPos": { 487 | "h": 8, 488 | "w": 24, 489 | "x": 0, 490 | "y": 21 491 | }, 492 | "hiddenSeries": false, 493 | "id": 22, 494 | "legend": { 495 | "alignAsTable": true, 496 | "avg": false, 497 | "current": true, 498 | "max": true, 499 | "min": false, 500 | "rightSide": true, 501 | "show": true, 502 | "sideWidth": 380, 503 | "sort": "current", 504 | "sortDesc": true, 505 | "total": false, 506 | "values": true 507 | }, 508 | "lines": true, 509 | "linewidth": 1, 510 | "nullPointMode": "null", 511 | "options": { 512 | "alertThreshold": true 513 | }, 514 | "percentage": false, 515 | "pluginVersion": "7.4.2", 516 | "pointradius": 0.5, 517 | "points": true, 518 | "renderer": "flot", 519 | "seriesOverrides": [], 520 | "spaceLength": 10, 521 | "stack": false, 522 | "steppedLine": false, 523 | "targets": [ 524 | { 525 | "aggregator": "avg", 526 | "alias": "@image", 527 | "dimensions": [ 528 | { 529 | "key": "image", 530 | "value": "$all" 531 | }, 532 | { 533 | "key": "hostname", 534 | "value": "docker-host" 535 | } 536 | ], 537 | "error": "", 538 | "group": true, 539 | "metric": "container.io.write_bytes_sec", 540 | "period": "$average", 541 | "refId": "A" 542 | } 543 | ], 544 | "thresholds": [], 545 | "timeFrom": null, 546 | "timeRegions": [], 547 | "timeShift": null, 548 | "title": "Container io writes", 549 | "tooltip": { 550 | "shared": true, 551 | "sort": 0, 552 | "value_type": "individual" 553 | }, 554 | "type": "graph", 555 | "xaxis": { 556 | "buckets": null, 557 | "mode": "time", 558 | "name": null, 559 | "show": true, 560 | "values": [] 561 | }, 562 | "yaxes": [ 563 | { 564 | "$$hashKey": "object:3709", 565 | "format": "binBps", 566 | "label": null, 567 | "logBase": 1, 568 | "max": null, 569 | "min": null, 570 | "show": true 571 | }, 572 | { 573 | "$$hashKey": "object:3710", 574 | "format": "short", 575 | "label": null, 576 | "logBase": 1, 577 | "max": null, 578 | "min": null, 579 | "show": true 580 | } 581 | ], 582 | "yaxis": { 583 | "align": false, 584 | "alignLevel": null 585 | } 586 | }, 587 | { 588 | "aliasColors": {}, 589 | "bars": false, 590 | "dashLength": 10, 591 | "dashes": false, 592 | "datasource": null, 593 | "fieldConfig": { 594 | "defaults": { 595 | "custom": {}, 596 | "unit": "binBps" 597 | }, 598 | "overrides": [] 599 | }, 600 | "fill": 1, 601 | "fillGradient": 0, 602 | "gridPos": { 603 | "h": 8, 604 | "w": 24, 605 | "x": 0, 606 | "y": 29 607 | }, 608 | "hiddenSeries": false, 609 | "id": 24, 610 | "legend": { 611 | "alignAsTable": true, 612 | "avg": false, 613 | "current": true, 614 | "max": true, 615 | "min": false, 616 | "rightSide": true, 617 | "show": true, 618 | "sideWidth": 380, 619 | "sort": "current", 620 | "sortDesc": true, 621 | "total": false, 622 | "values": true 623 | }, 624 | "lines": true, 625 | "linewidth": 1, 626 | "nullPointMode": "null", 627 | "options": { 628 | "alertThreshold": true 629 | }, 630 | "percentage": false, 631 | "pluginVersion": "7.4.2", 632 | "pointradius": 0.5, 633 | "points": true, 634 | "renderer": "flot", 635 | "seriesOverrides": [], 636 | "spaceLength": 10, 637 | "stack": false, 638 | "steppedLine": false, 639 | "targets": [ 640 | { 641 | "aggregator": "avg", 642 | "alias": "@image", 643 | "dimensions": [ 644 | { 645 | "key": "image", 646 | "value": "$all" 647 | }, 648 | { 649 | "key": "hostname", 650 | "value": "docker-host" 651 | } 652 | ], 653 | "error": "", 654 | "group": true, 655 | "metric": "container.io.read_bytes_sec", 656 | "period": "$average", 657 | "refId": "A" 658 | } 659 | ], 660 | "thresholds": [], 661 | "timeFrom": null, 662 | "timeRegions": [], 663 | "timeShift": null, 664 | "title": "Container io reads", 665 | "tooltip": { 666 | "shared": true, 667 | "sort": 0, 668 | "value_type": "individual" 669 | }, 670 | "type": "graph", 671 | "xaxis": { 672 | "buckets": null, 673 | "mode": "time", 674 | "name": null, 675 | "show": true, 676 | "values": [] 677 | }, 678 | "yaxes": [ 679 | { 680 | "$$hashKey": "object:3026", 681 | "format": "binBps", 682 | "label": null, 683 | "logBase": 1, 684 | "max": null, 685 | "min": null, 686 | "show": true 687 | }, 688 | { 689 | "$$hashKey": "object:3027", 690 | "format": "short", 691 | "label": null, 692 | "logBase": 1, 693 | "max": null, 694 | "min": null, 695 | "show": true 696 | } 697 | ], 698 | "yaxis": { 699 | "align": false, 700 | "alignLevel": null 701 | } 702 | }, 703 | { 704 | "collapsed": false, 705 | "datasource": null, 706 | "gridPos": { 707 | "h": 1, 708 | "w": 24, 709 | "x": 0, 710 | "y": 37 711 | }, 712 | "id": 10, 713 | "panels": [], 714 | "repeat": null, 715 | "title": "CMM Host", 716 | "type": "row" 717 | }, 718 | { 719 | "aliasColors": {}, 720 | "bars": false, 721 | "dashLength": 10, 722 | "dashes": false, 723 | "datasource": null, 724 | "editable": true, 725 | "error": false, 726 | "fieldConfig": { 727 | "defaults": { 728 | "custom": {} 729 | }, 730 | "overrides": [] 731 | }, 732 | "fill": 1, 733 | "fillGradient": 0, 734 | "gridPos": { 735 | "h": 8, 736 | "w": 12, 737 | "x": 0, 738 | "y": 38 739 | }, 740 | "hiddenSeries": false, 741 | "id": 1, 742 | "legend": { 743 | "alignAsTable": false, 744 | "avg": false, 745 | "current": true, 746 | "max": true, 747 | "min": false, 748 | "rightSide": false, 749 | "show": true, 750 | "total": false, 751 | "values": true 752 | }, 753 | "lines": true, 754 | "linewidth": 1, 755 | "links": [], 756 | "nullPointMode": "connected", 757 | "options": { 758 | "alertThreshold": true 759 | }, 760 | "percentage": false, 761 | "pluginVersion": "7.4.2", 762 | "pointradius": 0.5, 763 | "points": true, 764 | "renderer": "flot", 765 | "seriesOverrides": [], 766 | "spaceLength": 10, 767 | "stack": false, 768 | "steppedLine": false, 769 | "targets": [ 770 | { 771 | "aggregator": "avg", 772 | "alias": "CMM-Server", 773 | "dimensions": [ 774 | { 775 | "key": "hostname", 776 | "value": "docker-host" 777 | } 778 | ], 779 | "error": "", 780 | "group": false, 781 | "metric": "cpu.total_time_sec", 782 | "period": "$average", 783 | "refId": "A" 784 | } 785 | ], 786 | "thresholds": [], 787 | "timeFrom": null, 788 | "timeRegions": [], 789 | "timeShift": null, 790 | "title": "CPU Usage", 791 | "tooltip": { 792 | "msResolution": false, 793 | "shared": false, 794 | "sort": 0, 795 | "value_type": "individual" 796 | }, 797 | "type": "graph", 798 | "xaxis": { 799 | "buckets": null, 800 | "mode": "time", 801 | "name": null, 802 | "show": true, 803 | "values": [] 804 | }, 805 | "yaxes": [ 806 | { 807 | "$$hashKey": "object:3796", 808 | "format": "s", 809 | "label": null, 810 | "logBase": 1, 811 | "max": null, 812 | "min": null, 813 | "show": true 814 | }, 815 | { 816 | "$$hashKey": "object:3797", 817 | "format": "short", 818 | "label": null, 819 | "logBase": 1, 820 | "max": null, 821 | "min": null, 822 | "show": false 823 | } 824 | ], 825 | "yaxis": { 826 | "align": false, 827 | "alignLevel": null 828 | } 829 | }, 830 | { 831 | "aliasColors": {}, 832 | "bars": false, 833 | "dashLength": 10, 834 | "dashes": false, 835 | "datasource": null, 836 | "editable": true, 837 | "error": false, 838 | "fieldConfig": { 839 | "defaults": { 840 | "custom": {}, 841 | "unit": "bytes" 842 | }, 843 | "overrides": [] 844 | }, 845 | "fill": 1, 846 | "fillGradient": 0, 847 | "gridPos": { 848 | "h": 8, 849 | "w": 12, 850 | "x": 12, 851 | "y": 38 852 | }, 853 | "hiddenSeries": false, 854 | "id": 2, 855 | "legend": { 856 | "alignAsTable": false, 857 | "avg": false, 858 | "current": true, 859 | "max": true, 860 | "min": false, 861 | "rightSide": false, 862 | "show": true, 863 | "total": false, 864 | "values": true 865 | }, 866 | "lines": true, 867 | "linewidth": 1, 868 | "links": [], 869 | "nullPointMode": "connected", 870 | "options": { 871 | "alertThreshold": true 872 | }, 873 | "percentage": false, 874 | "pluginVersion": "7.4.2", 875 | "pointradius": 0.5, 876 | "points": true, 877 | "renderer": "flot", 878 | "seriesOverrides": [], 879 | "spaceLength": 10, 880 | "stack": false, 881 | "steppedLine": false, 882 | "targets": [ 883 | { 884 | "aggregator": "avg", 885 | "alias": "CMM-Server", 886 | "dimensions": [ 887 | { 888 | "key": "hostname", 889 | "value": "docker-host" 890 | } 891 | ], 892 | "error": "", 893 | "group": false, 894 | "metric": "mem.used_bytes", 895 | "period": "$average", 896 | "refId": "A" 897 | } 898 | ], 899 | "thresholds": [], 900 | "timeFrom": null, 901 | "timeRegions": [], 902 | "timeShift": null, 903 | "title": "Memory Used", 904 | "tooltip": { 905 | "msResolution": false, 906 | "shared": false, 907 | "sort": 0, 908 | "value_type": "individual" 909 | }, 910 | "type": "graph", 911 | "xaxis": { 912 | "buckets": null, 913 | "mode": "time", 914 | "name": null, 915 | "show": true, 916 | "values": [] 917 | }, 918 | "yaxes": [ 919 | { 920 | "$$hashKey": "object:3853", 921 | "format": "bytes", 922 | "label": null, 923 | "logBase": 1, 924 | "max": null, 925 | "min": null, 926 | "show": true 927 | }, 928 | { 929 | "$$hashKey": "object:3854", 930 | "format": "short", 931 | "label": null, 932 | "logBase": 1, 933 | "max": null, 934 | "min": null, 935 | "show": true 936 | } 937 | ], 938 | "yaxis": { 939 | "align": false, 940 | "alignLevel": null 941 | } 942 | }, 943 | { 944 | "aliasColors": {}, 945 | "bars": false, 946 | "dashLength": 10, 947 | "dashes": false, 948 | "datasource": null, 949 | "editable": true, 950 | "error": false, 951 | "fieldConfig": { 952 | "defaults": { 953 | "custom": {}, 954 | "unit": "percent" 955 | }, 956 | "overrides": [] 957 | }, 958 | "fill": 1, 959 | "fillGradient": 0, 960 | "gridPos": { 961 | "h": 8, 962 | "w": 12, 963 | "x": 0, 964 | "y": 46 965 | }, 966 | "hiddenSeries": false, 967 | "id": 8, 968 | "legend": { 969 | "alignAsTable": false, 970 | "avg": false, 971 | "current": true, 972 | "max": true, 973 | "min": false, 974 | "rightSide": false, 975 | "show": true, 976 | "sideWidth": null, 977 | "sort": "current", 978 | "sortDesc": true, 979 | "total": false, 980 | "values": true 981 | }, 982 | "lines": true, 983 | "linewidth": 1, 984 | "links": [], 985 | "nullPointMode": "connected", 986 | "options": { 987 | "alertThreshold": true 988 | }, 989 | "percentage": false, 990 | "pluginVersion": "7.4.2", 991 | "pointradius": 0.5, 992 | "points": true, 993 | "renderer": "flot", 994 | "seriesOverrides": [], 995 | "spaceLength": 10, 996 | "stack": false, 997 | "steppedLine": false, 998 | "targets": [ 999 | { 1000 | "aggregator": "avg", 1001 | "alias": "CMM-Server @device", 1002 | "dimensions": [ 1003 | { 1004 | "key": "hostname", 1005 | "value": "docker-host" 1006 | }, 1007 | { 1008 | "key": "device", 1009 | "value": "overlay" 1010 | } 1011 | ], 1012 | "error": "", 1013 | "group": false, 1014 | "metric": "fs.usage_perc", 1015 | "period": "$average", 1016 | "refId": "A" 1017 | } 1018 | ], 1019 | "thresholds": [], 1020 | "timeFrom": null, 1021 | "timeRegions": [], 1022 | "timeShift": null, 1023 | "title": "Overlay Disk Space Used", 1024 | "tooltip": { 1025 | "msResolution": false, 1026 | "shared": true, 1027 | "sort": 0, 1028 | "value_type": "individual" 1029 | }, 1030 | "type": "graph", 1031 | "xaxis": { 1032 | "buckets": null, 1033 | "mode": "time", 1034 | "name": null, 1035 | "show": true, 1036 | "values": [] 1037 | }, 1038 | "yaxes": [ 1039 | { 1040 | "$$hashKey": "object:3539", 1041 | "format": "percent", 1042 | "label": null, 1043 | "logBase": 1, 1044 | "max": null, 1045 | "min": null, 1046 | "show": true 1047 | }, 1048 | { 1049 | "$$hashKey": "object:3540", 1050 | "format": "short", 1051 | "label": null, 1052 | "logBase": 1, 1053 | "max": null, 1054 | "min": null, 1055 | "show": true 1056 | } 1057 | ], 1058 | "yaxis": { 1059 | "align": false, 1060 | "alignLevel": null 1061 | } 1062 | }, 1063 | { 1064 | "aliasColors": {}, 1065 | "bars": false, 1066 | "dashLength": 10, 1067 | "dashes": false, 1068 | "datasource": null, 1069 | "description": "", 1070 | "editable": true, 1071 | "error": false, 1072 | "fieldConfig": { 1073 | "defaults": { 1074 | "custom": {} 1075 | }, 1076 | "overrides": [] 1077 | }, 1078 | "fill": 1, 1079 | "fillGradient": 0, 1080 | "gridPos": { 1081 | "h": 8, 1082 | "w": 12, 1083 | "x": 12, 1084 | "y": 46 1085 | }, 1086 | "hiddenSeries": false, 1087 | "id": 4, 1088 | "legend": { 1089 | "alignAsTable": false, 1090 | "avg": false, 1091 | "current": true, 1092 | "max": true, 1093 | "min": false, 1094 | "rightSide": false, 1095 | "show": true, 1096 | "total": false, 1097 | "values": true 1098 | }, 1099 | "lines": true, 1100 | "linewidth": 1, 1101 | "links": [], 1102 | "nullPointMode": "connected", 1103 | "options": { 1104 | "alertThreshold": true 1105 | }, 1106 | "percentage": false, 1107 | "pluginVersion": "7.4.2", 1108 | "pointradius": 0.5, 1109 | "points": true, 1110 | "renderer": "flot", 1111 | "seriesOverrides": [], 1112 | "spaceLength": 10, 1113 | "stack": false, 1114 | "steppedLine": false, 1115 | "targets": [ 1116 | { 1117 | "aggregator": "avg", 1118 | "alias": "CMM-Server out", 1119 | "dimensions": [ 1120 | { 1121 | "key": "hostname", 1122 | "value": "docker-host" 1123 | } 1124 | ], 1125 | "error": "", 1126 | "group": false, 1127 | "metric": "net.out_bytes_sec", 1128 | "period": "$average", 1129 | "refId": "A" 1130 | }, 1131 | { 1132 | "aggregator": "avg", 1133 | "alias": "CMM-Server in", 1134 | "dimensions": [ 1135 | { 1136 | "key": "hostname", 1137 | "value": "docker-host" 1138 | } 1139 | ], 1140 | "error": "", 1141 | "hide": false, 1142 | "metric": "net.in_bytes_sec", 1143 | "period": "$average", 1144 | "refId": "B" 1145 | } 1146 | ], 1147 | "thresholds": [], 1148 | "timeFrom": null, 1149 | "timeRegions": [], 1150 | "timeShift": null, 1151 | "title": "Network Stats", 1152 | "tooltip": { 1153 | "msResolution": false, 1154 | "shared": true, 1155 | "sort": 0, 1156 | "value_type": "individual" 1157 | }, 1158 | "type": "graph", 1159 | "xaxis": { 1160 | "buckets": null, 1161 | "mode": "time", 1162 | "name": null, 1163 | "show": true, 1164 | "values": [] 1165 | }, 1166 | "yaxes": [ 1167 | { 1168 | "$$hashKey": "object:3199", 1169 | "format": "Bps", 1170 | "label": null, 1171 | "logBase": 1, 1172 | "max": null, 1173 | "min": null, 1174 | "show": true 1175 | }, 1176 | { 1177 | "$$hashKey": "object:3200", 1178 | "format": "short", 1179 | "label": null, 1180 | "logBase": 1, 1181 | "max": null, 1182 | "min": null, 1183 | "show": false 1184 | } 1185 | ], 1186 | "yaxis": { 1187 | "align": false, 1188 | "alignLevel": null 1189 | } 1190 | } 1191 | ], 1192 | "refresh": "1h", 1193 | "schemaVersion": 27, 1194 | "style": "dark", 1195 | "tags": [], 1196 | "templating": { 1197 | "list": [ 1198 | { 1199 | "allValue": null, 1200 | "current": { 1201 | "selected": true, 1202 | "text": "600", 1203 | "value": "600" 1204 | }, 1205 | "description": "Average time in seconds. Choose a big average for a big time period in order to have a fast response.", 1206 | "error": null, 1207 | "hide": 0, 1208 | "includeAll": false, 1209 | "label": "Average [seconds]", 1210 | "multi": false, 1211 | "name": "average", 1212 | "options": [ 1213 | { 1214 | "selected": false, 1215 | "text": "300", 1216 | "value": "300" 1217 | }, 1218 | { 1219 | "selected": true, 1220 | "text": "600", 1221 | "value": "600" 1222 | }, 1223 | { 1224 | "selected": false, 1225 | "text": "900", 1226 | "value": "900" 1227 | }, 1228 | { 1229 | "selected": false, 1230 | "text": "1200", 1231 | "value": "1200" 1232 | }, 1233 | { 1234 | "selected": false, 1235 | "text": "1800", 1236 | "value": "1800" 1237 | }, 1238 | { 1239 | "selected": false, 1240 | "text": "3600", 1241 | "value": "3600" 1242 | }, 1243 | { 1244 | "selected": false, 1245 | "text": "43200", 1246 | "value": "43200" 1247 | }, 1248 | { 1249 | "selected": false, 1250 | "text": "86400", 1251 | "value": "86400" 1252 | }, 1253 | { 1254 | "selected": false, 1255 | "text": "604800", 1256 | "value": "604800" 1257 | } 1258 | ], 1259 | "query": "300, 600, 900, 1200, 1800, 3600, 43200, 86400, 604800", 1260 | "queryValue": "", 1261 | "skipUrlSync": false, 1262 | "type": "custom" 1263 | } 1264 | ] 1265 | }, 1266 | "time": { 1267 | "from": "now-24h", 1268 | "to": "now" 1269 | }, 1270 | "timepicker": { 1271 | "refresh_intervals": [ 1272 | "5m", 1273 | "15m", 1274 | "30m", 1275 | "1h", 1276 | "2h", 1277 | "1d" 1278 | ], 1279 | "time_options": [ 1280 | "5m", 1281 | "15m", 1282 | "1h", 1283 | "6h", 1284 | "12h", 1285 | "24h", 1286 | "2d", 1287 | "7d", 1288 | "30d" 1289 | ] 1290 | }, 1291 | "timezone": "browser", 1292 | "title": "CMM Stats", 1293 | "uid": "monasca", 1294 | "version": 1 1295 | } 1296 | -------------------------------------------------------------------------------- /grafana-init/dashboards.d/09-openstack_vm.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 3, 19 | "links": [], 20 | "panels": [ 21 | { 22 | "collapsed": false, 23 | "datasource": null, 24 | "gridPos": { 25 | "h": 1, 26 | "w": 24, 27 | "x": 0, 28 | "y": 0 29 | }, 30 | "id": 18, 31 | "panels": [], 32 | "repeat": null, 33 | "title": "CPU and Memory", 34 | "type": "row" 35 | }, 36 | { 37 | "aliasColors": {}, 38 | "bars": false, 39 | "dashLength": 10, 40 | "dashes": false, 41 | "datasource": "monasca", 42 | "editable": true, 43 | "error": false, 44 | "fieldConfig": { 45 | "defaults": { 46 | "custom": {} 47 | }, 48 | "overrides": [] 49 | }, 50 | "fill": 1, 51 | "fillGradient": 0, 52 | "grid": {}, 53 | "gridPos": { 54 | "h": 8, 55 | "w": 6, 56 | "x": 0, 57 | "y": 1 58 | }, 59 | "hiddenSeries": false, 60 | "id": 1, 61 | "legend": { 62 | "avg": false, 63 | "current": false, 64 | "max": false, 65 | "min": false, 66 | "show": true, 67 | "total": false, 68 | "values": false 69 | }, 70 | "lines": true, 71 | "linewidth": 2, 72 | "links": [], 73 | "nullPointMode": "connected", 74 | "options": { 75 | "alertThreshold": true 76 | }, 77 | "percentage": false, 78 | "pluginVersion": "7.4.2", 79 | "pointradius": 0.5, 80 | "points": true, 81 | "renderer": "flot", 82 | "seriesOverrides": [], 83 | "spaceLength": 10, 84 | "stack": false, 85 | "steppedLine": false, 86 | "targets": [ 87 | { 88 | "aggregator": "avg", 89 | "alias": "@hostname", 90 | "dimensions": [ 91 | { 92 | "key": "hostname", 93 | "value": "$all" 94 | }, 95 | { 96 | "key": "component", 97 | "value": "vm" 98 | } 99 | ], 100 | "error": "", 101 | "errors": {}, 102 | "group": true, 103 | "metric": "cpu.utilization_perc", 104 | "period": "300", 105 | "refId": "A" 106 | } 107 | ], 108 | "thresholds": [], 109 | "timeFrom": null, 110 | "timeRegions": [], 111 | "timeShift": null, 112 | "title": "CPU Utilization", 113 | "tooltip": { 114 | "msResolution": false, 115 | "ordering": "alphabetical", 116 | "shared": true, 117 | "sort": 0, 118 | "value_type": "cumulative" 119 | }, 120 | "type": "graph", 121 | "xaxis": { 122 | "buckets": null, 123 | "mode": "time", 124 | "name": null, 125 | "show": true, 126 | "values": [] 127 | }, 128 | "yaxes": [ 129 | { 130 | "$$hashKey": "object:587", 131 | "format": "percent", 132 | "logBase": 1, 133 | "max": null, 134 | "min": 0, 135 | "show": true 136 | }, 137 | { 138 | "$$hashKey": "object:588", 139 | "format": "short", 140 | "logBase": 1, 141 | "max": null, 142 | "min": null, 143 | "show": true 144 | } 145 | ], 146 | "yaxis": { 147 | "align": false, 148 | "alignLevel": null 149 | } 150 | }, 151 | { 152 | "aliasColors": {}, 153 | "bars": false, 154 | "dashLength": 10, 155 | "dashes": false, 156 | "datasource": "monasca", 157 | "editable": true, 158 | "error": false, 159 | "fieldConfig": { 160 | "defaults": { 161 | "custom": {} 162 | }, 163 | "overrides": [] 164 | }, 165 | "fill": 1, 166 | "fillGradient": 0, 167 | "grid": {}, 168 | "gridPos": { 169 | "h": 8, 170 | "w": 6, 171 | "x": 6, 172 | "y": 1 173 | }, 174 | "hiddenSeries": false, 175 | "id": 2, 176 | "legend": { 177 | "avg": false, 178 | "current": false, 179 | "max": false, 180 | "min": false, 181 | "show": true, 182 | "total": false, 183 | "values": false 184 | }, 185 | "lines": true, 186 | "linewidth": 2, 187 | "links": [], 188 | "nullPointMode": "connected", 189 | "options": { 190 | "alertThreshold": true 191 | }, 192 | "percentage": false, 193 | "pluginVersion": "7.4.2", 194 | "pointradius": 0.5, 195 | "points": true, 196 | "renderer": "flot", 197 | "seriesOverrides": [], 198 | "spaceLength": 10, 199 | "stack": false, 200 | "steppedLine": false, 201 | "targets": [ 202 | { 203 | "aggregator": "avg", 204 | "alias": "@hostname", 205 | "dimensions": [ 206 | { 207 | "key": "hostname", 208 | "value": "$all" 209 | }, 210 | { 211 | "key": "component", 212 | "value": "vm" 213 | } 214 | ], 215 | "error": "", 216 | "errors": {}, 217 | "group": true, 218 | "hide": false, 219 | "metric": "mem.used_gb", 220 | "period": "300", 221 | "refId": "A" 222 | } 223 | ], 224 | "thresholds": [], 225 | "timeFrom": null, 226 | "timeRegions": [], 227 | "timeShift": null, 228 | "title": "Used Memory", 229 | "tooltip": { 230 | "msResolution": false, 231 | "ordering": "alphabetical", 232 | "shared": true, 233 | "sort": 0, 234 | "value_type": "cumulative" 235 | }, 236 | "type": "graph", 237 | "xaxis": { 238 | "buckets": null, 239 | "mode": "time", 240 | "name": null, 241 | "show": true, 242 | "values": [] 243 | }, 244 | "yaxes": [ 245 | { 246 | "$$hashKey": "object:332", 247 | "format": "decgbytes", 248 | "logBase": 1, 249 | "max": null, 250 | "min": null, 251 | "show": true 252 | }, 253 | { 254 | "$$hashKey": "object:333", 255 | "format": "short", 256 | "logBase": 1, 257 | "max": null, 258 | "min": null, 259 | "show": true 260 | } 261 | ], 262 | "yaxis": { 263 | "align": false, 264 | "alignLevel": null 265 | } 266 | }, 267 | { 268 | "aliasColors": {}, 269 | "bars": false, 270 | "dashLength": 10, 271 | "dashes": false, 272 | "datasource": "monasca", 273 | "fieldConfig": { 274 | "defaults": { 275 | "custom": {} 276 | }, 277 | "overrides": [] 278 | }, 279 | "fill": 1, 280 | "fillGradient": 0, 281 | "gridPos": { 282 | "h": 8, 283 | "w": 6, 284 | "x": 12, 285 | "y": 1 286 | }, 287 | "hiddenSeries": false, 288 | "id": 17, 289 | "legend": { 290 | "avg": false, 291 | "current": false, 292 | "max": false, 293 | "min": false, 294 | "show": true, 295 | "total": false, 296 | "values": false 297 | }, 298 | "lines": true, 299 | "linewidth": 1, 300 | "links": [], 301 | "nullPointMode": "null", 302 | "options": { 303 | "alertThreshold": true 304 | }, 305 | "percentage": false, 306 | "pluginVersion": "7.4.2", 307 | "pointradius": 0.5, 308 | "points": true, 309 | "renderer": "flot", 310 | "seriesOverrides": [], 311 | "spaceLength": 10, 312 | "stack": false, 313 | "steppedLine": false, 314 | "targets": [ 315 | { 316 | "aggregator": "avg", 317 | "alias": "@hostname", 318 | "dimensions": [ 319 | { 320 | "key": "hostname", 321 | "value": "$all" 322 | }, 323 | { 324 | "key": "component", 325 | "value": "vm" 326 | } 327 | ], 328 | "error": "", 329 | "group": true, 330 | "metric": "mem.free_gb", 331 | "period": "300", 332 | "refId": "A" 333 | } 334 | ], 335 | "thresholds": [], 336 | "timeFrom": null, 337 | "timeRegions": [], 338 | "timeShift": null, 339 | "title": "Free memory", 340 | "tooltip": { 341 | "shared": true, 342 | "sort": 0, 343 | "value_type": "individual" 344 | }, 345 | "type": "graph", 346 | "xaxis": { 347 | "buckets": null, 348 | "mode": "time", 349 | "name": null, 350 | "show": true, 351 | "values": [] 352 | }, 353 | "yaxes": [ 354 | { 355 | "$$hashKey": "object:295", 356 | "format": "decgbytes", 357 | "label": null, 358 | "logBase": 1, 359 | "max": null, 360 | "min": null, 361 | "show": true 362 | }, 363 | { 364 | "$$hashKey": "object:296", 365 | "format": "short", 366 | "label": null, 367 | "logBase": 1, 368 | "max": null, 369 | "min": null, 370 | "show": true 371 | } 372 | ], 373 | "yaxis": { 374 | "align": false, 375 | "alignLevel": null 376 | } 377 | }, 378 | { 379 | "aliasColors": {}, 380 | "bars": true, 381 | "dashLength": 10, 382 | "dashes": false, 383 | "datasource": "monasca", 384 | "fieldConfig": { 385 | "defaults": { 386 | "custom": {} 387 | }, 388 | "overrides": [] 389 | }, 390 | "fill": 1, 391 | "fillGradient": 0, 392 | "gridPos": { 393 | "h": 8, 394 | "w": 6, 395 | "x": 18, 396 | "y": 1 397 | }, 398 | "hiddenSeries": false, 399 | "id": 16, 400 | "legend": { 401 | "avg": false, 402 | "current": false, 403 | "max": false, 404 | "min": false, 405 | "show": false, 406 | "total": false, 407 | "values": false 408 | }, 409 | "lines": false, 410 | "linewidth": 1, 411 | "links": [], 412 | "nullPointMode": "null", 413 | "options": { 414 | "alertThreshold": true 415 | }, 416 | "percentage": false, 417 | "pluginVersion": "7.4.2", 418 | "pointradius": 5, 419 | "points": false, 420 | "renderer": "flot", 421 | "seriesOverrides": [], 422 | "spaceLength": 10, 423 | "stack": false, 424 | "steppedLine": false, 425 | "targets": [ 426 | { 427 | "aggregator": "avg", 428 | "alias": "@hostname", 429 | "dimensions": [ 430 | { 431 | "key": "hostname", 432 | "value": "$all" 433 | }, 434 | { 435 | "key": "component", 436 | "value": "vm" 437 | } 438 | ], 439 | "error": "", 440 | "group": true, 441 | "metric": "mem.total_gb", 442 | "period": "300", 443 | "refId": "A" 444 | } 445 | ], 446 | "thresholds": [], 447 | "timeFrom": null, 448 | "timeRegions": [], 449 | "timeShift": null, 450 | "title": "Total memory", 451 | "tooltip": { 452 | "shared": false, 453 | "sort": 0, 454 | "value_type": "individual" 455 | }, 456 | "type": "graph", 457 | "xaxis": { 458 | "buckets": null, 459 | "mode": "series", 460 | "name": null, 461 | "show": true, 462 | "values": [ 463 | "current" 464 | ] 465 | }, 466 | "yaxes": [ 467 | { 468 | "$$hashKey": "object:439", 469 | "format": "decgbytes", 470 | "label": null, 471 | "logBase": 1, 472 | "max": null, 473 | "min": null, 474 | "show": true 475 | }, 476 | { 477 | "$$hashKey": "object:440", 478 | "format": "short", 479 | "label": null, 480 | "logBase": 1, 481 | "max": null, 482 | "min": null, 483 | "show": true 484 | } 485 | ], 486 | "yaxis": { 487 | "align": false, 488 | "alignLevel": null 489 | } 490 | }, 491 | { 492 | "collapsed": false, 493 | "datasource": null, 494 | "gridPos": { 495 | "h": 1, 496 | "w": 24, 497 | "x": 0, 498 | "y": 9 499 | }, 500 | "id": 19, 501 | "panels": [], 502 | "repeat": null, 503 | "title": "Network", 504 | "type": "row" 505 | }, 506 | { 507 | "aliasColors": {}, 508 | "bars": false, 509 | "dashLength": 10, 510 | "dashes": false, 511 | "datasource": "monasca", 512 | "description": "", 513 | "fieldConfig": { 514 | "defaults": { 515 | "custom": {} 516 | }, 517 | "overrides": [] 518 | }, 519 | "fill": 1, 520 | "fillGradient": 0, 521 | "gridPos": { 522 | "h": 7, 523 | "w": 6, 524 | "x": 0, 525 | "y": 10 526 | }, 527 | "hiddenSeries": false, 528 | "id": 8, 529 | "legend": { 530 | "avg": false, 531 | "current": false, 532 | "max": false, 533 | "min": false, 534 | "show": true, 535 | "total": false, 536 | "values": false 537 | }, 538 | "lines": true, 539 | "linewidth": 1, 540 | "links": [], 541 | "nullPointMode": "null", 542 | "options": { 543 | "alertThreshold": true 544 | }, 545 | "percentage": false, 546 | "pluginVersion": "7.4.2", 547 | "pointradius": 0.5, 548 | "points": true, 549 | "renderer": "flot", 550 | "seriesOverrides": [], 551 | "spaceLength": 10, 552 | "stack": false, 553 | "steppedLine": false, 554 | "targets": [ 555 | { 556 | "aggregator": "avg", 557 | "alias": "@hostname", 558 | "dimensions": [ 559 | { 560 | "key": "hostname", 561 | "value": "$all" 562 | }, 563 | { 564 | "key": "component", 565 | "value": "vm" 566 | } 567 | ], 568 | "error": "", 569 | "group": true, 570 | "metric": "net.in_bytes_sec", 571 | "period": "300", 572 | "refId": "A" 573 | } 574 | ], 575 | "thresholds": [], 576 | "timeFrom": null, 577 | "timeRegions": [], 578 | "timeShift": null, 579 | "title": "Network received", 580 | "tooltip": { 581 | "shared": true, 582 | "sort": 0, 583 | "value_type": "individual" 584 | }, 585 | "type": "graph", 586 | "xaxis": { 587 | "buckets": null, 588 | "mode": "time", 589 | "name": null, 590 | "show": true, 591 | "values": [] 592 | }, 593 | "yaxes": [ 594 | { 595 | "$$hashKey": "object:797", 596 | "format": "Bps", 597 | "label": null, 598 | "logBase": 1, 599 | "max": null, 600 | "min": null, 601 | "show": true 602 | }, 603 | { 604 | "$$hashKey": "object:798", 605 | "format": "short", 606 | "label": null, 607 | "logBase": 1, 608 | "max": null, 609 | "min": null, 610 | "show": true 611 | } 612 | ], 613 | "yaxis": { 614 | "align": false, 615 | "alignLevel": null 616 | } 617 | }, 618 | { 619 | "aliasColors": {}, 620 | "bars": true, 621 | "dashLength": 10, 622 | "dashes": false, 623 | "datasource": "monasca", 624 | "fieldConfig": { 625 | "defaults": { 626 | "custom": {} 627 | }, 628 | "overrides": [] 629 | }, 630 | "fill": 1, 631 | "fillGradient": 0, 632 | "gridPos": { 633 | "h": 7, 634 | "w": 6, 635 | "x": 6, 636 | "y": 10 637 | }, 638 | "hiddenSeries": false, 639 | "id": 14, 640 | "legend": { 641 | "avg": false, 642 | "current": false, 643 | "max": false, 644 | "min": false, 645 | "show": false, 646 | "total": false, 647 | "values": false 648 | }, 649 | "lines": false, 650 | "linewidth": 1, 651 | "links": [], 652 | "nullPointMode": "null", 653 | "options": { 654 | "alertThreshold": true 655 | }, 656 | "percentage": false, 657 | "pluginVersion": "7.4.2", 658 | "pointradius": 5, 659 | "points": false, 660 | "renderer": "flot", 661 | "seriesOverrides": [], 662 | "spaceLength": 10, 663 | "stack": false, 664 | "steppedLine": false, 665 | "targets": [ 666 | { 667 | "aggregator": "avg", 668 | "alias": "@hostname", 669 | "dimensions": [ 670 | { 671 | "key": "hostname", 672 | "value": "$all" 673 | }, 674 | { 675 | "key": "component", 676 | "value": "vm" 677 | } 678 | ], 679 | "error": "", 680 | "group": true, 681 | "metric": "net.in_bytes", 682 | "period": "300", 683 | "refId": "A" 684 | } 685 | ], 686 | "thresholds": [], 687 | "timeFrom": null, 688 | "timeRegions": [], 689 | "timeShift": null, 690 | "title": "Network received total bytes", 691 | "tooltip": { 692 | "shared": false, 693 | "sort": 0, 694 | "value_type": "individual" 695 | }, 696 | "type": "graph", 697 | "xaxis": { 698 | "buckets": null, 699 | "mode": "series", 700 | "name": null, 701 | "show": true, 702 | "values": [ 703 | "current" 704 | ] 705 | }, 706 | "yaxes": [ 707 | { 708 | "format": "decbytes", 709 | "label": "", 710 | "logBase": 1, 711 | "max": null, 712 | "min": null, 713 | "show": true 714 | }, 715 | { 716 | "format": "short", 717 | "label": "", 718 | "logBase": 1, 719 | "max": null, 720 | "min": null, 721 | "show": true 722 | } 723 | ], 724 | "yaxis": { 725 | "align": false, 726 | "alignLevel": null 727 | } 728 | }, 729 | { 730 | "aliasColors": {}, 731 | "bars": false, 732 | "dashLength": 10, 733 | "dashes": false, 734 | "datasource": "monasca", 735 | "fieldConfig": { 736 | "defaults": { 737 | "custom": {} 738 | }, 739 | "overrides": [] 740 | }, 741 | "fill": 1, 742 | "fillGradient": 0, 743 | "gridPos": { 744 | "h": 7, 745 | "w": 6, 746 | "x": 12, 747 | "y": 10 748 | }, 749 | "hiddenSeries": false, 750 | "id": 9, 751 | "legend": { 752 | "avg": false, 753 | "current": false, 754 | "max": false, 755 | "min": false, 756 | "show": true, 757 | "total": false, 758 | "values": false 759 | }, 760 | "lines": true, 761 | "linewidth": 1, 762 | "links": [], 763 | "nullPointMode": "null", 764 | "options": { 765 | "alertThreshold": true 766 | }, 767 | "percentage": false, 768 | "pluginVersion": "7.4.2", 769 | "pointradius": 0.5, 770 | "points": true, 771 | "renderer": "flot", 772 | "seriesOverrides": [], 773 | "spaceLength": 10, 774 | "stack": false, 775 | "steppedLine": false, 776 | "targets": [ 777 | { 778 | "aggregator": "avg", 779 | "alias": "@hostname", 780 | "dimensions": [ 781 | { 782 | "key": "hostname", 783 | "value": "$all" 784 | }, 785 | { 786 | "key": "component", 787 | "value": "vm" 788 | } 789 | ], 790 | "error": "", 791 | "group": true, 792 | "metric": "net.out_bytes_sec", 793 | "period": "300", 794 | "refId": "A" 795 | } 796 | ], 797 | "thresholds": [], 798 | "timeFrom": null, 799 | "timeRegions": [], 800 | "timeShift": null, 801 | "title": "Network transmitted", 802 | "tooltip": { 803 | "shared": true, 804 | "sort": 0, 805 | "value_type": "individual" 806 | }, 807 | "type": "graph", 808 | "xaxis": { 809 | "buckets": null, 810 | "mode": "time", 811 | "name": null, 812 | "show": true, 813 | "values": [] 814 | }, 815 | "yaxes": [ 816 | { 817 | "$$hashKey": "object:860", 818 | "format": "Bps", 819 | "label": null, 820 | "logBase": 1, 821 | "max": null, 822 | "min": null, 823 | "show": true 824 | }, 825 | { 826 | "$$hashKey": "object:861", 827 | "format": "short", 828 | "label": null, 829 | "logBase": 1, 830 | "max": null, 831 | "min": null, 832 | "show": true 833 | } 834 | ], 835 | "yaxis": { 836 | "align": false, 837 | "alignLevel": null 838 | } 839 | }, 840 | { 841 | "aliasColors": {}, 842 | "bars": true, 843 | "dashLength": 10, 844 | "dashes": false, 845 | "datasource": "monasca", 846 | "fieldConfig": { 847 | "defaults": { 848 | "custom": {} 849 | }, 850 | "overrides": [] 851 | }, 852 | "fill": 1, 853 | "fillGradient": 0, 854 | "gridPos": { 855 | "h": 7, 856 | "w": 6, 857 | "x": 18, 858 | "y": 10 859 | }, 860 | "hiddenSeries": false, 861 | "id": 15, 862 | "legend": { 863 | "avg": false, 864 | "current": false, 865 | "max": false, 866 | "min": false, 867 | "show": false, 868 | "total": false, 869 | "values": false 870 | }, 871 | "lines": false, 872 | "linewidth": 1, 873 | "links": [], 874 | "nullPointMode": "null", 875 | "options": { 876 | "alertThreshold": true 877 | }, 878 | "percentage": false, 879 | "pluginVersion": "7.4.2", 880 | "pointradius": 5, 881 | "points": false, 882 | "renderer": "flot", 883 | "seriesOverrides": [], 884 | "spaceLength": 10, 885 | "stack": false, 886 | "steppedLine": false, 887 | "targets": [ 888 | { 889 | "aggregator": "none", 890 | "alias": "@hostname", 891 | "dimensions": [ 892 | { 893 | "key": "hostname", 894 | "value": "$all" 895 | }, 896 | { 897 | "key": "component", 898 | "value": "vm" 899 | } 900 | ], 901 | "error": "", 902 | "group": true, 903 | "metric": "net.out_bytes", 904 | "period": "300", 905 | "refId": "A" 906 | } 907 | ], 908 | "thresholds": [], 909 | "timeFrom": null, 910 | "timeRegions": [], 911 | "timeShift": null, 912 | "title": "Network transmitted total bytes", 913 | "tooltip": { 914 | "shared": false, 915 | "sort": 0, 916 | "value_type": "individual" 917 | }, 918 | "type": "graph", 919 | "xaxis": { 920 | "buckets": null, 921 | "mode": "series", 922 | "name": null, 923 | "show": true, 924 | "values": [ 925 | "current" 926 | ] 927 | }, 928 | "yaxes": [ 929 | { 930 | "format": "decbytes", 931 | "label": "", 932 | "logBase": 1, 933 | "max": null, 934 | "min": null, 935 | "show": true 936 | }, 937 | { 938 | "format": "short", 939 | "label": "", 940 | "logBase": 1, 941 | "max": null, 942 | "min": null, 943 | "show": true 944 | } 945 | ], 946 | "yaxis": { 947 | "align": false, 948 | "alignLevel": null 949 | } 950 | }, 951 | { 952 | "collapsed": false, 953 | "datasource": null, 954 | "gridPos": { 955 | "h": 1, 956 | "w": 24, 957 | "x": 0, 958 | "y": 17 959 | }, 960 | "id": 20, 961 | "panels": [], 962 | "repeat": null, 963 | "title": "Disk I/O", 964 | "type": "row" 965 | }, 966 | { 967 | "aliasColors": {}, 968 | "bars": false, 969 | "dashLength": 10, 970 | "dashes": false, 971 | "datasource": "monasca", 972 | "fieldConfig": { 973 | "defaults": { 974 | "custom": {} 975 | }, 976 | "overrides": [] 977 | }, 978 | "fill": 1, 979 | "fillGradient": 0, 980 | "gridPos": { 981 | "h": 7, 982 | "w": 12, 983 | "x": 0, 984 | "y": 18 985 | }, 986 | "hiddenSeries": false, 987 | "id": 10, 988 | "legend": { 989 | "avg": false, 990 | "current": false, 991 | "max": false, 992 | "min": false, 993 | "show": true, 994 | "total": false, 995 | "values": false 996 | }, 997 | "lines": true, 998 | "linewidth": 1, 999 | "links": [], 1000 | "nullPointMode": "null", 1001 | "options": { 1002 | "alertThreshold": true 1003 | }, 1004 | "percentage": false, 1005 | "pluginVersion": "7.4.2", 1006 | "pointradius": 0.5, 1007 | "points": true, 1008 | "renderer": "flot", 1009 | "seriesOverrides": [], 1010 | "spaceLength": 10, 1011 | "stack": false, 1012 | "steppedLine": false, 1013 | "targets": [ 1014 | { 1015 | "aggregator": "avg", 1016 | "alias": "@hostname", 1017 | "dimensions": [ 1018 | { 1019 | "key": "hostname", 1020 | "value": "$all" 1021 | }, 1022 | { 1023 | "key": "component", 1024 | "value": "vm" 1025 | } 1026 | ], 1027 | "error": "", 1028 | "group": true, 1029 | "hide": false, 1030 | "metric": "io.write_bytes_sec", 1031 | "period": "300", 1032 | "refId": "A" 1033 | } 1034 | ], 1035 | "thresholds": [], 1036 | "timeFrom": null, 1037 | "timeRegions": [], 1038 | "timeShift": null, 1039 | "title": "Disk I/O write bytes per second", 1040 | "tooltip": { 1041 | "shared": true, 1042 | "sort": 0, 1043 | "value_type": "individual" 1044 | }, 1045 | "type": "graph", 1046 | "xaxis": { 1047 | "buckets": null, 1048 | "mode": "time", 1049 | "name": null, 1050 | "show": true, 1051 | "values": [] 1052 | }, 1053 | "yaxes": [ 1054 | { 1055 | "$$hashKey": "object:923", 1056 | "format": "Bps", 1057 | "label": null, 1058 | "logBase": 1, 1059 | "max": null, 1060 | "min": null, 1061 | "show": true 1062 | }, 1063 | { 1064 | "$$hashKey": "object:924", 1065 | "format": "short", 1066 | "label": null, 1067 | "logBase": 1, 1068 | "max": null, 1069 | "min": null, 1070 | "show": true 1071 | } 1072 | ], 1073 | "yaxis": { 1074 | "align": false, 1075 | "alignLevel": null 1076 | } 1077 | }, 1078 | { 1079 | "aliasColors": {}, 1080 | "bars": false, 1081 | "dashLength": 10, 1082 | "dashes": false, 1083 | "datasource": "monasca", 1084 | "fieldConfig": { 1085 | "defaults": { 1086 | "custom": {} 1087 | }, 1088 | "overrides": [] 1089 | }, 1090 | "fill": 1, 1091 | "fillGradient": 0, 1092 | "gridPos": { 1093 | "h": 7, 1094 | "w": 12, 1095 | "x": 12, 1096 | "y": 18 1097 | }, 1098 | "hiddenSeries": false, 1099 | "id": 11, 1100 | "legend": { 1101 | "avg": false, 1102 | "current": false, 1103 | "max": false, 1104 | "min": false, 1105 | "show": true, 1106 | "total": false, 1107 | "values": false 1108 | }, 1109 | "lines": true, 1110 | "linewidth": 1, 1111 | "links": [], 1112 | "nullPointMode": "null", 1113 | "options": { 1114 | "alertThreshold": true 1115 | }, 1116 | "percentage": false, 1117 | "pluginVersion": "7.4.2", 1118 | "pointradius": 0.5, 1119 | "points": true, 1120 | "renderer": "flot", 1121 | "seriesOverrides": [], 1122 | "spaceLength": 10, 1123 | "stack": false, 1124 | "steppedLine": false, 1125 | "targets": [ 1126 | { 1127 | "aggregator": "avg", 1128 | "alias": "@hostname", 1129 | "dimensions": [ 1130 | { 1131 | "key": "hostname", 1132 | "value": "$all" 1133 | }, 1134 | { 1135 | "key": "component", 1136 | "value": "vm" 1137 | } 1138 | ], 1139 | "error": "", 1140 | "group": true, 1141 | "metric": "io.read_bytes_sec", 1142 | "period": "300", 1143 | "refId": "A" 1144 | } 1145 | ], 1146 | "thresholds": [], 1147 | "timeFrom": null, 1148 | "timeRegions": [], 1149 | "timeShift": null, 1150 | "title": "Disk I/O read bytes per second", 1151 | "tooltip": { 1152 | "shared": true, 1153 | "sort": 0, 1154 | "value_type": "individual" 1155 | }, 1156 | "type": "graph", 1157 | "xaxis": { 1158 | "buckets": null, 1159 | "mode": "time", 1160 | "name": null, 1161 | "show": true, 1162 | "values": [] 1163 | }, 1164 | "yaxes": [ 1165 | { 1166 | "$$hashKey": "object:986", 1167 | "format": "Bps", 1168 | "label": null, 1169 | "logBase": 1, 1170 | "max": null, 1171 | "min": null, 1172 | "show": true 1173 | }, 1174 | { 1175 | "$$hashKey": "object:987", 1176 | "format": "short", 1177 | "label": null, 1178 | "logBase": 1, 1179 | "max": null, 1180 | "min": null, 1181 | "show": true 1182 | } 1183 | ], 1184 | "yaxis": { 1185 | "align": false, 1186 | "alignLevel": null 1187 | } 1188 | } 1189 | ], 1190 | "refresh": "1h", 1191 | "schemaVersion": 27, 1192 | "style": "dark", 1193 | "tags": [], 1194 | "templating": { 1195 | "list": [] 1196 | }, 1197 | "time": { 1198 | "from": "now-1h", 1199 | "to": "now" 1200 | }, 1201 | "timepicker": { 1202 | "refresh_intervals": [ 1203 | "5s", 1204 | "10s", 1205 | "30s", 1206 | "1m", 1207 | "5m", 1208 | "15m", 1209 | "30m", 1210 | "1h", 1211 | "2h", 1212 | "1d" 1213 | ], 1214 | "time_options": [ 1215 | "5m", 1216 | "15m", 1217 | "1h", 1218 | "6h", 1219 | "12h", 1220 | "24h", 1221 | "2d", 1222 | "7d", 1223 | "30d" 1224 | ] 1225 | }, 1226 | "timezone": "utc", 1227 | "title": "OpenStack VMs", 1228 | "uid": "aHb8Q7Rnz", 1229 | "version": 4 1230 | } 1231 | -------------------------------------------------------------------------------- /grafana-init/grafana.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import glob 16 | import json 17 | import logging 18 | import os 19 | import sys 20 | import time 21 | import urllib 22 | 23 | from requests import Session, RequestException 24 | 25 | LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO')) 26 | logging.basicConfig(level=LOG_LEVEL) 27 | 28 | logger = logging.getLogger(__name__) 29 | 30 | GRAFANA_URL = os.environ.get('GRAFANA_URL', 'http://grafana:3000') 31 | GRAFANA_ADMIN_USERNAME = os.environ.get('GRAFANA_ADMIN_USERNAME', 'admin') 32 | GRAFANA_ADMIN_PASSWORD = os.environ.get('GRAFANA_ADMIN_PASSWORD', 'admin') 33 | 34 | DATASOURCE_NAME = os.environ.get('DATASOURCE_NAME', 'monasca') 35 | DATASOURCE_URL = os.environ.get('DATASOURCE_URL', 'http://monasca:8070/') 36 | DATASOURCE_ACCESS_MODE = os.environ.get('DATASOURCE_ACCESS_MODE', 'proxy') 37 | DATASOURCE_AUTH = os.environ.get('DATASOURCE_AUTH', 'Keystone').capitalize() 38 | DATASOURCE_AUTH_TOKEN = os.environ.get('DATASOURCE_AUTH_TOKEN', '') 39 | 40 | DASHBOARDS_DIR = os.environ.get('DASHBOARDS_DIR', '/dashboards.d') 41 | 42 | 43 | def retry(retries=5, delay=2.0, exc_types=(RequestException,)): 44 | def decorator(func): 45 | def f_retry(*args, **kwargs): 46 | for i in range(retries): 47 | try: 48 | return func(*args, **kwargs) 49 | except exc_types as exc: 50 | if i < retries - 1: 51 | logger.debug('Caught exception, retrying...', 52 | exc_info=True) 53 | time.sleep(delay) 54 | else: 55 | logger.exception('Failed after %d attempts', retries) 56 | if isinstance(exc, RequestException): 57 | logger.debug('Response was: %r', exc.response) 58 | 59 | raise 60 | return f_retry 61 | return decorator 62 | 63 | 64 | def create_admin_login_payload(): 65 | return { 66 | 'user': GRAFANA_ADMIN_USERNAME, 67 | 'password': GRAFANA_ADMIN_PASSWORD, 68 | 'email': '' 69 | } 70 | 71 | 72 | @retry(retries=24, delay=5.0) 73 | def login(session, user): 74 | r = session.post('{url}/login'.format(url=GRAFANA_URL), 75 | json=user, 76 | timeout=5) 77 | r.raise_for_status() 78 | 79 | 80 | @retry(retries=12, delay=5.0) 81 | def change_user_context(admin_session, user_session, organisation): 82 | org = admin_session.get('{url}/api/orgs/name/{org_name}'.format( 83 | url=GRAFANA_URL, org_name=urllib.quote(organisation.encode('utf8')) 84 | ), timeout=5) 85 | org.raise_for_status() 86 | 87 | org_id = json.loads(org.text)['id'] 88 | logging.debug('Organisation "%s" id = %r', organisation, org_id) 89 | 90 | r = user_session.post('{url}/api/user/using/{org}'. 91 | format(url=GRAFANA_URL, org=org_id), 92 | timeout=5) 93 | r.raise_for_status() 94 | 95 | 96 | @retry(retries=12, delay=5.0) 97 | def check_initialized(session): 98 | r = session.get('{url}/api/datasources'.format(url=GRAFANA_URL), timeout=5) 99 | r.raise_for_status() 100 | 101 | logging.debug('existing datasources = %r', r.text) 102 | 103 | for datasource in r.json(): 104 | if datasource['name'] == DATASOURCE_NAME: 105 | return True 106 | 107 | return False 108 | 109 | @retry(retries=12, delay=5.0) 110 | def add_datasource(admin_session): 111 | r = admin_session.post('{url}/api/datasources'.format(url=GRAFANA_URL), 112 | json=create_datasource_payload()) 113 | logging.debug('Response: %r', r.text) 114 | r.raise_for_status() 115 | 116 | @retry(retries=12, delay=5.0) 117 | def create_dashboard(admin_session, path): 118 | r = admin_session.post('{url}/api/dashboards/db'.format(url=GRAFANA_URL), 119 | json=create_dashboard_payload(path)) 120 | logging.debug('Response: %r', r.text) 121 | r.raise_for_status() 122 | 123 | 124 | def create_datasource_payload(): 125 | payload = { 126 | 'name': DATASOURCE_NAME, 127 | 'url': DATASOURCE_URL, 128 | 'access': DATASOURCE_ACCESS_MODE, 129 | 'isDefault': True, 130 | } 131 | 132 | if DATASOURCE_AUTH not in ['Keystone', 'Horizon', 'Token']: 133 | logger.error('Unknown Keystone authentication option: %s', 134 | DATASOURCE_AUTH) 135 | sys.exit(1) 136 | 137 | keystone_auth = False 138 | if DATASOURCE_AUTH in ['Keystone']: 139 | keystone_auth = True 140 | 141 | payload.update({ 142 | 'monasca': { 143 | 'type': 'monasca-datasource', 144 | 'jsonData': { 145 | 'authMode': DATASOURCE_AUTH, 146 | 'keystoneAuth': keystone_auth, 147 | 'token': DATASOURCE_AUTH_TOKEN, 148 | } 149 | } 150 | }.get(DATASOURCE_NAME, {})) 151 | 152 | logging.debug('payload = %r', payload) 153 | 154 | return payload 155 | 156 | 157 | def create_dashboard_payload(json_path): 158 | with open(json_path, 'r') as f: 159 | dashboard = json.load(f) 160 | dashboard['id'] = None 161 | 162 | return { 163 | 'dashboard': dashboard, 164 | 'overwrite': False 165 | } 166 | 167 | 168 | def main(): 169 | admin_session = Session() 170 | admin_user = create_admin_login_payload() 171 | login(admin_session, admin_user) 172 | 173 | logging.info('Opening a Grafana session...') 174 | 175 | if check_initialized(admin_session): 176 | logging.info('Grafana has already been initialized, skipping!') 177 | return 178 | 179 | logging.info('Attempting to add configured datasource...') 180 | add_datasource(admin_session) 181 | 182 | for path in sorted(glob.glob('{dir}/*.json'.format(dir=DASHBOARDS_DIR))): 183 | logging.info('Creating dashboard from file: {path}'.format(path=path)) 184 | create_dashboard(admin_session, path) 185 | 186 | 187 | logging.info('Ending %r session...', admin_user.get('user')) 188 | admin_session.get('{url}/logout'.format(url=GRAFANA_URL)) 189 | 190 | logging.info('Finished successfully.') 191 | 192 | 193 | if __name__ == '__main__': 194 | main() 195 | -------------------------------------------------------------------------------- /grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:7.4.2 2 | 3 | ARG MONASCA_DATASOURCE_REPO=https://git.openstack.org/openstack/monasca-grafana-datasource 4 | ARG MONASCA_DATASOURCE_BRANCH=master 5 | 6 | # To force a rebuild, pass --build-arg REBUILD="$(DATE)" when running 7 | # `docker build` 8 | ARG REBUILD=1 9 | 10 | USER root 11 | 12 | # Install build dependencies 13 | RUN apk add --no-cache --virtual .build-deps git && \ 14 | apk add --no-cache python3 py3-jinja2 && \ 15 | chown -R root:root /var/lib/grafana/plugins && \ 16 | mkdir -p /var/lib/grafana/plugins/monasca-grafana-datasource/ && \ 17 | cd /var/lib/grafana/plugins/monasca-grafana-datasource/ && \ 18 | git clone --depth 1 $MONASCA_DATASOURCE_REPO -b $MONASCA_DATASOURCE_BRANCH . && \ 19 | cd / && \ 20 | apk del .build-deps && \ 21 | rm -rf /var/cache/apk/* && \ 22 | rm /etc/grafana/grafana.ini 23 | 24 | COPY grafana.ini.j2 /etc/grafana/grafana.ini.j2 25 | COPY template.py start.sh / 26 | COPY drilldown.js /usr/share/grafana/public/dashboards/drilldown.js 27 | RUN chmod +x /template.py /start.sh /etc/grafana/grafana.ini.j2 28 | RUN chmod 777 /etc/grafana 29 | EXPOSE 3000 30 | 31 | HEALTHCHECK --interval=10s --timeout=5s \ 32 | CMD wget -q http://localhost:3000 -O - > /dev/null 33 | ENTRYPOINT ["/start.sh"] 34 | -------------------------------------------------------------------------------- /grafana/README.md: -------------------------------------------------------------------------------- 1 | monasca/grafana Dockerfile 2 | ========================== 3 | 4 | This image contains Grafana built with Keystone support and the Monasca data 5 | source. For more information on the Monasca project, see [the wiki][1]. 6 | 7 | Sources: [twc-openstack/grafana][2] · [monasca-docker][3] · [Dockerfile][4] 8 | 9 | Tags 10 | ---- 11 | 12 | The images in this repository follow a few tagging conventions: 13 | 14 | * `7.4.3-master`: `[grafana version]-[monasca-grafana-datasource version]`. 15 | 16 | Usage 17 | ----- 18 | 19 | Grafana does not require any configuration to run normally: 20 | 21 | docker run -p 3000:3000 -it monasca/grafana:latest 22 | 23 | To make use of the Monasca datasource in Monasca's [docker-compose][5] 24 | environment, log in using a valid keystone user and password (by default, 25 | `mini-mon` and `password`). Create a new data source with the following 26 | properties: 27 | 28 | * Type: Monasca 29 | * Url: http://monasca:8070/ 30 | * Access: proxy 31 | * Token: leave empty 32 | * Keystone auth: enabled / checked 33 | 34 | The dashboard for monasca-ui, which is opened by clicking on "Graph Metric" 35 | of the 'Alarm' tab, is available. You can use this dashboard outside of monasca-ui 36 | with the following link. 37 | `http://your-grafana-url:3000/dashboard/script/drilldown.js` 38 | Specify the metric name with the key "metric" and dimensions as additional 39 | parameters if necessary as below. 40 | `http://your-grafana-url:3000/dashboard/script/drilldown.js?metric=sample&dim1=val1` 41 | 42 | Configuration 43 | ------------- 44 | 45 | | Variable | Default | Description | 46 | |--------------------------|---------|---------------------------------| 47 | | `GRAFANA_ADMIN_USER` | `admin` | Grafana admin user name | 48 | | `GRAFANA_ADMIN_PASSWORD` | `admin` | Grafana admin user password | 49 | 50 | Grafana can be configured using [environment variables][7], though a 51 | configuration file can also be mounted into the image at 52 | `/etc/grafana/grafana.ini`. Plugins should be placed in 53 | `/var/lib/grafana/plugins`. 54 | 55 | [1]: https://wiki.openstack.org/wiki/Monasca 56 | [2]: https://github.com/twc-openstack/grafana/tree/master-keystone 57 | [3]: https://github.com/hpcloud-mon/monasca-docker/ 58 | [4]: https://github.com/hpcloud-mon/monasca-docker/blob/master/grafana/Dockerfile 59 | [5]: https://github.com/hpcloud-mon/monasca-docker/blob/master/README.md 60 | [6]: https://github.com/hpcloud-mon/monasca-docker/tree/master/k8s 61 | [7]: http://docs.grafana.org/installation/configuration/#using-environment-variables 62 | -------------------------------------------------------------------------------- /grafana/build.yml: -------------------------------------------------------------------------------- 1 | repository: monasca/grafana 2 | variants: 3 | - tag: 7.4.3-master 4 | -------------------------------------------------------------------------------- /grafana/drilldown.js: -------------------------------------------------------------------------------- 1 | /* global _ */ 2 | 3 | /* 4 | * Complex scripted dashboard 5 | * This script generates a dashboard object that Grafana can load. It also takes a number of user 6 | * supplied URL parameters (in the ARGS variable) 7 | * 8 | * Return a dashboard object, or a function 9 | * 10 | * For async scripts, return a function, this function must take a single callback function as argument, 11 | * call this callback function with the dashboard object (see scripted_async.js for an example) 12 | */ 13 | 14 | 'use strict'; 15 | 16 | // accessible variables in this scope 17 | var window, document, ARGS, $, jQuery, moment, kbn; 18 | 19 | // Setup variables 20 | var dashboard, timespan; 21 | 22 | // All url parameters are available via the ARGS object 23 | var ARGS; 24 | 25 | // Set a default timespan if one isn't specified 26 | timespan = '1d'; 27 | 28 | // keys which should not be dimensions 29 | var exclusion_keys = [ 30 | 'metric', 31 | 'type', 32 | 'slug', 33 | 'fullscreen', 34 | 'edit', 35 | 'panelId', 36 | 'from', 37 | 'to' 38 | ]; 39 | 40 | // Intialize a skeleton with nothing but a rows array and service object 41 | dashboard = { 42 | rows : [], 43 | }; 44 | 45 | // Set a title 46 | dashboard.title = 'Alarm drilldown'; 47 | dashboard.time = { 48 | from: "now-" + (ARGS.from || timespan), 49 | to: "now" 50 | }; 51 | 52 | var metricName = 'metricname'; 53 | 54 | if(!_.isUndefined(ARGS.metric)) { 55 | metricName = ARGS.metric; 56 | } 57 | 58 | // Set dimensions 59 | var dimensions = []; 60 | for (var key in ARGS) { 61 | if (exclusion_keys.indexOf(key) == -1) { 62 | dimensions.push({'key': key, 'value': ARGS[key]}); 63 | } 64 | } 65 | 66 | dashboard.rows.push({ 67 | title: 'Chart', 68 | height: '300px', 69 | panels: [ 70 | { 71 | title: metricName, 72 | type: 'graph', 73 | span: 12, 74 | fill: 1, 75 | linewidth: 2, 76 | targets: [ 77 | { 78 | "metric": metricName, 79 | "aggregator": "avg", 80 | "period": 300, 81 | "dimensions": dimensions 82 | } 83 | ] 84 | } 85 | ] 86 | }); 87 | 88 | return dashboard; 89 | -------------------------------------------------------------------------------- /grafana/grafana.ini.j2: -------------------------------------------------------------------------------- 1 | ##################### Grafana Configuration Example ##################### 2 | # 3 | # Everything has defaults so you only need to uncomment things you want to 4 | # change 5 | 6 | # possible values : production, development 7 | ; app_mode = production 8 | 9 | # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty 10 | ; instance_name = ${HOSTNAME} 11 | 12 | #################################### Paths #################################### 13 | [paths] 14 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) 15 | # 16 | ;data = /var/lib/grafana 17 | # 18 | # Directory where grafana can store logs 19 | # 20 | ;logs = /var/log/grafana 21 | # 22 | # Directory where grafana will automatically scan and look for plugins 23 | # 24 | plugins = /var/lib/grafana/plugins 25 | 26 | # 27 | #################################### Server #################################### 28 | [server] 29 | # Protocol (http or https) 30 | ;protocol = http 31 | 32 | # The ip address to bind to, empty will bind to all interfaces 33 | ;http_addr = 34 | 35 | # The http port to use 36 | ;http_port = 3000 37 | 38 | # The public facing domain name used to access grafana from a browser 39 | ;domain = localhost 40 | 41 | # Redirect to correct domain if host header does not match domain 42 | # Prevents DNS rebinding attacks 43 | ;enforce_domain = false 44 | 45 | # The full public facing url 46 | root_url = %(protocol)s://%(domain)s/grafana 47 | 48 | # Log web requests 49 | ;router_logging = false 50 | 51 | # the path relative working path 52 | ;static_root_path = public 53 | 54 | # enable gzip 55 | ;enable_gzip = false 56 | 57 | # https certs & key file 58 | ;cert_file = 59 | ;cert_key = 60 | 61 | #################################### Database #################################### 62 | [database] 63 | # Either "mysql", "postgres" or "sqlite3", it's your choice 64 | ;type = sqlite3 65 | ;host = 127.0.0.1:3306 66 | ;name = grafana 67 | ;user = root 68 | ;password = 69 | 70 | # For "postgres" only, either "disable", "require" or "verify-full" 71 | ;ssl_mode = disable 72 | 73 | # For "sqlite3" only, path relative to data_path setting 74 | path = /var/lib/grafana/data/grafana.db 75 | 76 | #################################### Session #################################### 77 | [session] 78 | # Either "memory", "file", "redis", "mysql", "postgres", default is "file" 79 | ;provider = file 80 | 81 | # Provider config options 82 | # memory: not have any config yet 83 | # file: session dir path, is relative to grafana data_path 84 | # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` 85 | # mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name` 86 | # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable 87 | ;provider_config = sessions 88 | 89 | # Session cookie name 90 | ;cookie_name = grafana_sess 91 | 92 | # If you use session in https only, default is false 93 | ;cookie_secure = false 94 | 95 | # Session life time, default is 86400 96 | ;session_life_time = 86400 97 | 98 | #################################### Analytics #################################### 99 | [analytics] 100 | # Server reporting, sends usage counters to stats.grafana.org every 24 hours. 101 | # No ip addresses are being tracked, only simple counters to track 102 | # running instances, dashboard and error counts. It is very helpful to us. 103 | # Change this option to false to disable reporting. 104 | ;reporting_enabled = true 105 | 106 | # Set to false to disable all checks to https://grafana.net 107 | # for new vesions (grafana itself and plugins), check is used 108 | # in some UI views to notify that grafana or plugin update exists 109 | # This option does not cause any auto updates, nor send any information 110 | # only a GET request to http://grafana.net to get latest versions 111 | check_for_updates = true 112 | 113 | # Google Analytics universal tracking code, only enabled if you specify an id here 114 | ;google_analytics_ua_id = 115 | 116 | #################################### Security #################################### 117 | [security] 118 | # default admin user, created on startup 119 | admin_user = {{ GF_SECURITY_ADMIN_USER }} 120 | 121 | # default admin password, can be changed before first start of grafana, or in profile settings 122 | admin_password = {{ GF_SECURITY_ADMIN_PASSWORD }} 123 | 124 | # used for signing 125 | ;secret_key = SW2YcwTIb9zpOOhoPsMm 126 | 127 | # Auto-login remember days 128 | ;login_remember_days = 7 129 | ;cookie_username = grafana_user 130 | ;cookie_remember_name = grafana_remember 131 | 132 | # disable gravatar profile images 133 | ;disable_gravatar = false 134 | 135 | # data source proxy whitelist (ip_or_domain:port separated by spaces) 136 | ;data_source_proxy_whitelist = 137 | 138 | [snapshots] 139 | # snapshot sharing options 140 | ;external_enabled = true 141 | ;external_snapshot_url = https://snapshots-origin.raintank.io 142 | ;external_snapshot_name = Publish to snapshot.raintank.io 143 | 144 | #################################### Users #################################### 145 | [users] 146 | # disable user signup / registration 147 | allow_sign_up = false 148 | 149 | # Allow non admin users to create organizations 150 | ;allow_org_create = true 151 | 152 | # Set to true to automatically assign new users to the default organization (id 1) 153 | ;auto_assign_org = true 154 | 155 | # Default role new users will be automatically assigned (if disabled above is set to true) 156 | ;auto_assign_org_role = Viewer 157 | 158 | # Background text for the user field on the login page 159 | ;login_hint = email or username 160 | 161 | # Default UI theme ("dark" or "light") 162 | ;default_theme = dark 163 | 164 | #################################### Anonymous Auth ########################## 165 | [auth.anonymous] 166 | # enable anonymous access 167 | enabled = true 168 | 169 | # specify organization name that should be used for unauthenticated users 170 | org_name = Main Org. 171 | 172 | # specify role for unauthenticated users 173 | org_role = Viewer 174 | 175 | #################################### Github Auth ########################## 176 | [auth.github] 177 | ;enabled = false 178 | ;allow_sign_up = false 179 | ;client_id = some_id 180 | ;client_secret = some_secret 181 | ;scopes = user:email,read:org 182 | ;auth_url = https://github.com/login/oauth/authorize 183 | ;token_url = https://github.com/login/oauth/access_token 184 | ;api_url = https://api.github.com/user 185 | ;team_ids = 186 | ;allowed_organizations = 187 | 188 | #################################### Google Auth ########################## 189 | [auth.google] 190 | ;enabled = false 191 | ;allow_sign_up = false 192 | ;client_id = some_client_id 193 | ;client_secret = some_client_secret 194 | ;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email 195 | ;auth_url = https://accounts.google.com/o/oauth2/auth 196 | ;token_url = https://accounts.google.com/o/oauth2/token 197 | ;api_url = https://www.googleapis.com/oauth2/v1/userinfo 198 | ;allowed_domains = 199 | 200 | #################################### Auth Proxy ########################## 201 | [auth.proxy] 202 | ;enabled = false 203 | ;header_name = X-WEBAUTH-USER 204 | ;header_property = username 205 | ;auto_sign_up = true 206 | 207 | #################################### Basic Auth ########################## 208 | [auth.basic] 209 | ;enabled = true 210 | 211 | #################################### Auth LDAP ########################## 212 | [auth.ldap] 213 | ;enabled = false 214 | ;config_file = /etc/grafana/ldap.toml 215 | 216 | #################################### SMTP / Emailing ########################## 217 | [smtp] 218 | ;enabled = false 219 | ;host = localhost:25 220 | ;user = 221 | ;password = 222 | ;cert_file = 223 | ;key_file = 224 | ;skip_verify = false 225 | ;from_address = admin@grafana.localhost 226 | 227 | [emails] 228 | ;welcome_email_on_sign_up = false 229 | 230 | #################################### Logging ########################## 231 | [log] 232 | # Either "console", "file", "syslog". Default is console and file 233 | # Use space to separate multiple modes, e.g. "console file" 234 | mode = console 235 | 236 | # Either "trace", "debug", "info", "warn", "error", "critical", default is "info" 237 | level = {{ GRAFANA_LOG_LEVEL }} 238 | 239 | # For "console" mode only 240 | [log.console] 241 | level = {{ GRAFANA_LOG_LEVEL }} 242 | 243 | # log line format, valid options are text, console and json 244 | ;format = console 245 | 246 | # For "file" mode only 247 | [log.file] 248 | ;level = 249 | 250 | # log line format, valid options are text, console and json 251 | ;format = text 252 | 253 | # This enables automated log rotate(switch of following options), default is true 254 | ;log_rotate = true 255 | 256 | # Max line number of single file, default is 1000000 257 | ;max_lines = 1000000 258 | 259 | # Max size shift of single file, default is 28 means 1 << 28, 256MB 260 | ;max_size_shift = 28 261 | 262 | # Segment log daily, default is true 263 | ;daily_rotate = true 264 | 265 | # Expired days of log file(delete after max days), default is 7 266 | ;max_days = 7 267 | 268 | [log.syslog] 269 | ;level = 270 | 271 | # log line format, valid options are text, console and json 272 | ;format = text 273 | 274 | # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. 275 | ;network = 276 | ;address = 277 | 278 | # Syslog facility. user, daemon and local0 through local7 are valid. 279 | ;facility = 280 | 281 | # Syslog tag. By default, the process' argv[0] is used. 282 | ;tag = 283 | 284 | 285 | #################################### AMQP Event Publisher ########################## 286 | [event_publisher] 287 | ;enabled = false 288 | ;rabbitmq_url = amqp://localhost/ 289 | ;exchange = grafana_events 290 | 291 | ;#################################### Dashboard JSON files ########################## 292 | [dashboards.json] 293 | enabled = true 294 | path = /var/lib/grafana/public/dashboards 295 | 296 | #################################### Internal Grafana Metrics ########################## 297 | # Metrics available at HTTP API Url /api/metrics 298 | [metrics] 299 | # Disable / Enable internal metrics 300 | ;enabled = true 301 | 302 | # Publish interval 303 | ;interval_seconds = 10 304 | 305 | # Send internal metrics to Graphite 306 | ; [metrics.graphite] 307 | ; address = localhost:2003 308 | ; prefix = prod.grafana.%(instance_name)s. 309 | 310 | #################################### Internal Grafana Metrics ########################## 311 | # Url used to to import dashboards directly from Grafana.net 312 | [grafana_net] 313 | url = https://grafana.net 314 | 315 | [auth.keystone] 316 | enabled = true 317 | -------------------------------------------------------------------------------- /grafana/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # shellcheck disable=SC2153 3 | if [ -n "$GF_DATABASE_PORT" ]; then 4 | export GF_DATABASE_HOST=$GF_DATABASE_HOST":"$GF_DATABASE_PORT 5 | fi 6 | 7 | export GRAFANA_LOG_LEVEL=${GRAFANA_LOG_LEVEL:-"warn"} 8 | 9 | FILENAME=/usr/share/grafana/public/dashboards/drilldown.js 10 | if [ ! -f $FILENAME ]; then 11 | copy /drilldown.js /usr/share/grafana/public/dashboards/drilldown.js 12 | fi 13 | 14 | python3 /template.py /etc/grafana/grafana.ini.j2 /etc/grafana/grafana.ini 15 | exec /run.sh 16 | -------------------------------------------------------------------------------- /grafana/template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # (C) Copyright 2017 Hewlett Packard Enterprise Development LP 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | from __future__ import print_function 18 | 19 | import os 20 | import sys 21 | 22 | from jinja2 import Template 23 | 24 | 25 | def main(): 26 | if len(sys.argv) != 3: 27 | print('Usage: {} [input] [output]'.format(sys.argv[0])) 28 | sys.exit(1) 29 | 30 | in_path = sys.argv[1] 31 | out_path = sys.argv[2] 32 | 33 | with open(in_path, 'r') as in_file, open(out_path, 'w') as out_file: 34 | t = Template(in_file.read()) 35 | out_file.write(t.render(os.environ)) 36 | 37 | if __name__ == '__main__': 38 | main() 39 | --------------------------------------------------------------------------------