├── lib
├── __init__.py
├── Tools.py
├── Connmon.py
├── Elastic.py
├── WorkloadBase.py
└── Grafana.py
├── ansible
├── browbeat
│ ├── group_vars
│ │ └── all
│ ├── roles
│ │ ├── neutron-ovsdb
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── keystone-workers
│ │ │ ├── templates
│ │ │ │ ├── keystone_ports.conf.j2
│ │ │ │ └── keystone_wsgi.conf.j2
│ │ │ ├── files
│ │ │ │ └── keystone_httpd
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── keystone-token
│ │ │ ├── files
│ │ │ │ └── my-keystone.te
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── cinder-workers
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── nova-db
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── neutron-l3
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── neutron-firewall
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── nova-workers
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── neutron-workers
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ └── grafana-snapshot
│ │ │ ├── templates
│ │ │ └── index.html.j2
│ │ │ ├── vars
│ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ └── main.yml
│ ├── adjustment-db.yml
│ ├── cleanlogs.yml
│ ├── adjustment-l3.yml
│ ├── adjustment-keystone-token.yml
│ ├── adjustment-firewall_driver.yml
│ ├── adjustment-workers.yml
│ └── snapshot-general-performance-dashboard.yml
├── install
│ ├── roles
│ │ ├── graphite
│ │ │ ├── files
│ │ │ │ ├── storage-aggregation.conf
│ │ │ │ ├── storage-schemas.conf
│ │ │ │ └── setup-graphite-db.exp
│ │ │ ├── templates
│ │ │ │ └── graphite-web.conf.j2
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── grafana_docker
│ │ │ ├── files
│ │ │ │ ├── docker-ps-names.sh
│ │ │ │ └── docker.repo
│ │ │ ├── templates
│ │ │ │ └── grafana-server.service.j2
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── logstash
│ │ │ ├── files
│ │ │ │ ├── 30-lumberjack-output.conf
│ │ │ │ ├── 10-syslog.conf
│ │ │ │ ├── logstash.repo
│ │ │ │ ├── 01-lumberjack-input.conf
│ │ │ │ ├── 02-beats-input.conf
│ │ │ │ ├── 30-elasticsearch-output.conf
│ │ │ │ ├── 10-syslog-filter.conf
│ │ │ │ └── filebeat-index-template.json
│ │ │ └── templates
│ │ │ │ └── openssl_extras.cnf.j2
│ │ ├── kibana
│ │ │ └── files
│ │ │ │ ├── filebeat-dashboards.zip
│ │ │ │ ├── kibana.repo
│ │ │ │ └── logstash.repo
│ │ ├── collectd-generic
│ │ │ ├── files
│ │ │ │ ├── custom-collectd.pp
│ │ │ │ └── cfme-http.conf
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ ├── vars
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ ├── ose-metrics.py.j2
│ │ │ │ ├── guest.collectd.conf.j2
│ │ │ │ ├── baremetal.collectd.conf.j2
│ │ │ │ ├── graphite.collectd.conf.j2
│ │ │ │ └── ose.collectd.conf.j2
│ │ ├── collectd-openstack
│ │ │ ├── files
│ │ │ │ ├── custom-collectd.pp
│ │ │ │ └── collectd-redis.sh
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── ceph.collectd.conf.j2
│ │ ├── browbeat
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── 00-browbeat.conf.j2
│ │ ├── connmon
│ │ │ ├── templates
│ │ │ │ └── connmon.cfg.j2
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── graphite_docker
│ │ │ ├── files
│ │ │ │ ├── docker.repo
│ │ │ │ └── storage-schemas.conf
│ │ │ └── templates
│ │ │ │ ├── graphite-web.service.j2
│ │ │ │ └── carbon-cache.service.j2
│ │ ├── common
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── filebeat
│ │ │ ├── files
│ │ │ │ └── filebeat.repo
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── elasticsearch
│ │ │ ├── files
│ │ │ │ ├── elasticsearch.repo
│ │ │ │ └── elasticsearch.in.sh
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── shaker-check
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── grafana
│ │ │ ├── templates
│ │ │ │ └── data_source.json.j2
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── heat
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── neutron
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── nginx
│ │ │ └── templates
│ │ │ │ ├── kibana.conf.j2
│ │ │ │ └── nginx.conf.j2
│ │ ├── nova
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── cinder
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── handlers
│ │ │ │ └── main.yml
│ │ ├── keystone
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── dashboard-openstack
│ │ │ ├── fix-ids.py
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── dashboard-generic
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ └── browbeat-network
│ │ │ └── tasks
│ │ │ └── main.yml
│ ├── grafana.yml
│ ├── graphite.yml
│ ├── elk-client.yml
│ ├── grafana-docker.yml
│ ├── graphite-docker.yml
│ ├── elk.yml
│ ├── browbeat_network.yml
│ ├── elk-openstack-client.yml
│ ├── browbeat.yml
│ ├── connmon.yml
│ ├── shaker_build.yml
│ ├── dashboards-openstack.yml
│ ├── dashboards-generic.yml
│ ├── filter_plugins
│ │ └── browbeat_install_filters.py
│ ├── collectd-openstack.yml
│ └── collectd-generic.yml
├── check
│ ├── group_vars
│ │ ├── ceph
│ │ ├── all
│ │ ├── compute
│ │ ├── controller
│ │ └── undercloud
│ ├── templates
│ │ ├── mysql_report.j2
│ │ └── bug_report.j2
│ ├── roles
│ │ ├── neutron
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── compute
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── nova
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── common
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── ceph
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── undercloud
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── keystone
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ └── controller
│ │ │ └── tasks
│ │ │ └── main.yml
│ ├── README.md
│ ├── site.yml
│ └── browbeat-example-bug_report.log
├── tune
│ ├── group_vars
│ │ ├── compute
│ │ └── controller
│ ├── README.md
│ ├── tune.yml
│ └── roles
│ │ ├── tuned
│ │ └── tasks
│ │ │ └── main.yml
│ │ └── udev_dhcp_all_interfaces
│ │ └── tasks
│ │ └── main.yml
├── ansible.cfg
├── gather
│ ├── group_vars
│ │ └── all
│ ├── dump_facts.j2
│ ├── roles
│ │ ├── rabbitmq
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── mysql
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── compute
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── undercloud
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── nova
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ └── keystone
│ │ │ └── tasks
│ │ │ └── main.yml
│ └── site.yml
├── README.cfme-allinone.md
└── README.collectd-generic.md
├── rally
├── nova
│ ├── README.md
│ ├── nova-boot-list-cc.yml
│ └── nova-boot-snapshot-cc.yml
├── cinder
│ ├── README.md
│ └── cinder-create-and-attach-volume-cc.yml
├── neutron
│ ├── README.md
│ ├── neutron-create-list-network-cc.yml
│ ├── neutron-create-list-security-group-cc.yml
│ ├── neutron-create-list-port-cc.yml
│ ├── neutron-create-list-subnet-cc.yml
│ └── neutron-create-list-router-cc.yml
├── authenticate
│ ├── README.md
│ ├── keystone-cc.yml
│ ├── validate_heat-cc.yml
│ ├── validate_nova-cc.yml
│ ├── validate_cinder-cc.yml
│ ├── validate_glance-cc.yml
│ ├── validate_monasca-cc.yml
│ └── validate_neutron-cc.yml
├── keystonebasic
│ ├── README.md
│ ├── create_tenant-cc.yml
│ ├── create_user-cc.yml
│ ├── get_entities-cc.yml
│ ├── create_delete_user-cc.yml
│ ├── create_and_delete_role-cc.yml
│ ├── create_and_list_tenants-cc.yml
│ ├── create_and_list_users-cc.yml
│ ├── add_and_remove_user_role-cc.yml
│ ├── create_and_delete_service-cc.yml
│ ├── create_and_list_services-cc.yml
│ ├── create_user_update_password-cc.yml
│ ├── create_add_and_list_user_roles-cc.yml
│ ├── create_and_list_ec2credentials-cc.yml
│ ├── create_and_delete_ec2credentials-cc.yml
│ ├── create_update_and_delete_tenant-cc.yml
│ ├── create_tenant_with_users-cc.yml
│ └── create_user_set_enabled_and_delete-cc.yml
└── rally-plugins
│ ├── subnet-router-create
│ ├── subnet-router-create.yml
│ └── subnet-router-create.py
│ ├── netcreate-boot-ping
│ ├── netcreate_nova-boot-fip-ping.json
│ └── netcreate_nova-boot-fip-ping.py
│ ├── netcreate-boot
│ ├── netcreate_boot.yml
│ └── netcreate_boot.py
│ └── README.md
├── setup.cfg
├── log
└── .gitignore
├── requirements.txt
├── results
└── .gitignore
├── .gitreview
└── .gitignore
/lib/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ansible/browbeat/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/rally/nova/README.md:
--------------------------------------------------------------------------------
1 | # Nova scenarios
2 |
--------------------------------------------------------------------------------
/rally/cinder/README.md:
--------------------------------------------------------------------------------
1 | # Cinder scenarios
2 |
--------------------------------------------------------------------------------
/rally/neutron/README.md:
--------------------------------------------------------------------------------
1 | # Neutron scenarios
2 |
--------------------------------------------------------------------------------
/rally/authenticate/README.md:
--------------------------------------------------------------------------------
1 | # Authenticate scenarios
2 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite/files/storage-aggregation.conf:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/rally/keystonebasic/README.md:
--------------------------------------------------------------------------------
1 | # KeystoneBasic scenarios
2 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [pep8]
2 | ignore = E226,E302,E41,E111,E231,E203
3 | max-line-length = 100
4 |
--------------------------------------------------------------------------------
/log/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible
2 | matplotlib
3 | python-dateutil==2.4.2
4 | pykwalify
5 | elasticsearch
6 |
--------------------------------------------------------------------------------
/ansible/check/group_vars/ceph:
--------------------------------------------------------------------------------
1 | ---
2 | ansible_become: true
3 | tuned_profile: throughput-performance
4 |
--------------------------------------------------------------------------------
/results/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/.gitreview:
--------------------------------------------------------------------------------
1 | [gerrit]
2 | host=review.gerrithub.io
3 | port=29418
4 | project=jtaleric/browbeat
5 | defaultbranch=master
6 |
--------------------------------------------------------------------------------
/ansible/tune/group_vars/compute:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tuning vars for computes
4 | #
5 |
6 | tuned_profile: virtual-host
7 |
--------------------------------------------------------------------------------
/ansible/install/roles/grafana_docker/files/docker-ps-names.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | /usr/bin/docker ps -a --format '{{ .Names }}'
4 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/30-lumberjack-output.conf:
--------------------------------------------------------------------------------
1 | output {
2 | elasticsearch { hosts => ["localhost:9200"] }
3 | }
4 |
--------------------------------------------------------------------------------
/ansible/tune/group_vars/controller:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tuning vars for controllers
4 | #
5 |
6 | tuned_profile: throughput-performance
7 |
--------------------------------------------------------------------------------
/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | gathering = smart
3 | fact_caching_timeout = 86400
4 | fact_caching = jsonfile
5 | fact_caching_connection = /tmp/browbeat_fact_cache
6 |
--------------------------------------------------------------------------------
/ansible/install/roles/kibana/files/filebeat-dashboards.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jtaleric/browbeat/HEAD/ansible/install/roles/kibana/files/filebeat-dashboards.zip
--------------------------------------------------------------------------------
/ansible/install/grafana.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install grafana
4 | #
5 |
6 | - hosts: grafana
7 | remote_user: root
8 | roles:
9 | - { role: grafana }
10 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/files/custom-collectd.pp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jtaleric/browbeat/HEAD/ansible/install/roles/collectd-generic/files/custom-collectd.pp
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-openstack/files/custom-collectd.pp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jtaleric/browbeat/HEAD/ansible/install/roles/collectd-openstack/files/custom-collectd.pp
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Handlers for collectd-generic
4 | #
5 |
6 | - name: restart httpd
7 | service: name=httpd state=restarted
8 |
--------------------------------------------------------------------------------
/ansible/install/graphite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install graphite-web
4 | #
5 |
6 | - hosts: graphite
7 | remote_user: root
8 | roles:
9 | - { role: graphite }
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 |
3 | # Prevent hosts files from sneaking into the git repo
4 | ansible/hosts
5 | ansible/pbench-host-file
6 |
7 | log/*
8 |
9 | # Swap files
10 | *.sw[po]
11 |
--------------------------------------------------------------------------------
/ansible/install/elk-client.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install the ELK stack
4 | #
5 |
6 | - hosts: elk-client
7 | remote_user: root
8 | roles:
9 | - { role: filebeat }
10 |
--------------------------------------------------------------------------------
/ansible/install/grafana-docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install grafana
4 | #
5 |
6 | - hosts: grafana
7 | remote_user: root
8 | roles:
9 | - { role: grafana_docker }
10 |
--------------------------------------------------------------------------------
/ansible/install/roles/browbeat/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Browbeat installer handlers
4 | #
5 |
6 | - name: restart httpd
7 | service: name=httpd state=restarted
8 | become: true
9 |
--------------------------------------------------------------------------------
/ansible/check/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # vi:syntax=yaml
3 | result_dir: "{{inventory_dir}}/"
4 |
5 | mysql_tuner_script: https://raw.githubusercontent.com/major/MySQLTuner-perl/master/mysqltuner.pl
6 |
--------------------------------------------------------------------------------
/ansible/tune/README.md:
--------------------------------------------------------------------------------
1 | # Browbeat OSP Performance Tuning Playbook
2 | This playbook aims to tune OSP deployed on Red Hat Enterprise Linux.
3 |
4 | The playbook in here is currently experimental.
5 |
--------------------------------------------------------------------------------
/ansible/install/graphite-docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install graphite-web
4 | #
5 |
6 | - hosts: graphite
7 | remote_user: root
8 | roles:
9 | - { role: graphite_docker }
10 |
--------------------------------------------------------------------------------
/ansible/install/roles/connmon/templates/connmon.cfg.j2:
--------------------------------------------------------------------------------
1 | [connmon_service_default]
2 | name: default
3 | csv_dump: /tmp/connmon_results.csv
4 | nodes:
5 | node1 hostname={{ connmon_host }}:5800 bind=0.0.0.0
6 |
--------------------------------------------------------------------------------
/ansible/install/roles/grafana_docker/files/docker.repo:
--------------------------------------------------------------------------------
1 | [dockerrepo]
2 | name=Docker Repository
3 | baseurl=https://yum.dockerproject.org/repo/main/centos/7
4 | enabled=1
5 | gpgcheck=1
6 | gpgkey=https://yum.dockerproject.org/gpg
7 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite_docker/files/docker.repo:
--------------------------------------------------------------------------------
1 | [dockerrepo]
2 | name=Docker Repository
3 | baseurl=https://yum.dockerproject.org/repo/main/centos/7
4 | enabled=1
5 | gpgcheck=1
6 | gpgkey=https://yum.dockerproject.org/gpg
7 |
--------------------------------------------------------------------------------
/ansible/install/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Browbeat Install Common
4 | #
5 |
6 | - name: Add DNS record
7 | become: true
8 | lineinfile: dest=/etc/resolv.conf state=present line="nameserver {{ dns_server }}" insertafter="^search"
9 |
--------------------------------------------------------------------------------
/ansible/install/roles/filebeat/files/filebeat.repo:
--------------------------------------------------------------------------------
1 | [elk-client]
2 | name=Elastic FileBeat Repository
3 | baseurl=https://packages.elastic.co/beats/yum/el/$basearch
4 | enabled=1
5 | gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch
6 | gpgcheck=1
7 |
--------------------------------------------------------------------------------
/ansible/install/roles/kibana/files/kibana.repo:
--------------------------------------------------------------------------------
1 | [kibana-4.4]
2 | name=Kibana repository for 4.4.x packages
3 | baseurl=http://packages.elastic.co/kibana/4.4/centos
4 | gpgcheck=1
5 | gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
6 | enabled=1
7 |
--------------------------------------------------------------------------------
/ansible/tune/tune.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tunes overcloud for browbeat/performance
4 | #
5 |
6 | - hosts: controller:compute
7 | gather_facts: false
8 | remote_user: heat-admin
9 | roles:
10 | - udev_dhcp_all_interfaces
11 | - tuned
12 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/10-syslog.conf:
--------------------------------------------------------------------------------
1 | input {
2 | stdin {
3 | type => "syslog"
4 | }
5 | }
6 | output {
7 | # stdout {codec => rubydebug }
8 | elasticsearch {
9 | hosts => "localhost:9200"
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/ansible/install/elk.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install the ELK stack
4 | #
5 |
6 | - hosts: elk
7 | remote_user: root
8 | roles:
9 | - { role: elasticsearch }
10 | - { role: logstash }
11 | - { role: nginx }
12 | - { role: kibana }
13 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-ovsdb/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron handlers for browbeat adjustment
4 | #
5 |
6 | - name: restart neutron services
7 | service: name={{ item }} state=restarted
8 | with_items:
9 | - neutron-openvswitch-agent
10 |
--------------------------------------------------------------------------------
/ansible/install/roles/kibana/files/logstash.repo:
--------------------------------------------------------------------------------
1 | [logstash-2.2]
2 | name=logstash repository for 2.2 packages
3 | baseurl=http://packages.elasticsearch.org/logstash/2.2/centos
4 | gpgcheck=1
5 | gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
6 | enabled=1
7 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/logstash.repo:
--------------------------------------------------------------------------------
1 | [logstash-2.2]
2 | name=logstash repository for 2.2 packages
3 | baseurl=http://packages.elasticsearch.org/logstash/2.2/centos
4 | gpgcheck=1
5 | gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
6 | enabled=1
7 |
--------------------------------------------------------------------------------
/ansible/browbeat/adjustment-db.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | #
4 |
5 | - hosts: controller
6 | remote_user: heat-admin
7 | gather_facts: false
8 | vars:
9 | ansible_become: true
10 | greenlet_pool_size: 100
11 | max_overflow: 100
12 | roles:
13 | - nova-db
14 |
--------------------------------------------------------------------------------
/ansible/install/roles/elasticsearch/files/elasticsearch.repo:
--------------------------------------------------------------------------------
1 | [elasticsearch-2.x]
2 | name=Elasticsearch repository for 2.x packages
3 | baseurl=http://packages.elastic.co/elasticsearch/2.x/centos
4 | gpgcheck=1
5 | gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
6 | enabled=1
7 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/01-lumberjack-input.conf:
--------------------------------------------------------------------------------
1 | input {
2 | lumberjack {
3 | port => 5043
4 | type => "logs"
5 | ssl_certificate => "/etc/pki/tls/certs/filebeat-forwarder.crt"
6 | ssl_key => "/etc/pki/tls/private/filebeat-forwarder.key"
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/ansible/tune/roles/tuned/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Sets correct tuned profile on each host
4 | # See https://bugzilla.redhat.com/show_bug.cgi?id=1246645
5 | #
6 |
7 | - name: Set tuned profile
8 | become: true
9 | command: tuned-adm profile {{ tuned_profile }}
10 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/keystone-workers/templates/keystone_ports.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | Listen {{ item.public_ip_address }}:80
4 | {% if 'httpd' in item.deployment %}
5 | Listen {{ item.admin_ip_address }}:35357
6 | Listen {{ item.public_ip_address }}:5000
7 | {% endif %}
--------------------------------------------------------------------------------
/ansible/install/browbeat_network.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook for browbeat-network
4 | #
5 | # Creates public and private network for use with Perfkit and Shaker
6 | #
7 |
8 | - hosts: undercloud
9 | remote_user: "{{ local_remote_user }}"
10 | roles:
11 | - browbeat-network
12 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/02-beats-input.conf:
--------------------------------------------------------------------------------
1 | input {
2 | beats {
3 | port => 5044
4 | ssl => true
5 | ssl_certificate => "/etc/pki/tls/certs/filebeat-forwarder.crt"
6 | ssl_key => "/etc/pki/tls/private/filebeat-forwarder.key"
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/ansible/install/roles/shaker-check/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tasks to check requirements for installing shaker
4 | #
5 |
6 | - name: Check for heat
7 | service: name={{ item }} state=running
8 | become: true
9 | with_items:
10 | - openstack-heat-api
11 | - openstack-heat-engine
12 |
--------------------------------------------------------------------------------
/ansible/gather/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # Path of browbeat
3 | browbeat_path: /home/stack/browbeat
4 |
5 | # Home directory on undercloud
6 | home_dir: /home/stack
7 |
8 | # Login user for the remote hosts
9 | host_remote_user: heat-admin
10 |
11 | # Login user for the local/jump machine
12 | local_remote_user: stack
13 |
--------------------------------------------------------------------------------
/ansible/gather/dump_facts.j2:
--------------------------------------------------------------------------------
1 | {% for host in groups['controller'] %}
2 | {{hostvars[host]| to_nice_json}}
3 | {% endfor %}
4 | {% for host in groups['compute'] %}
5 | {{hostvars[host]| to_nice_json}}
6 | {% endfor %}
7 | {% for host in groups['undercloud'] %}
8 | {{hostvars[host]| to_nice_json}}
9 | {% endfor %}
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/ansible/install/elk-openstack-client.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install the ELK client
4 | #
5 | - hosts: undercloud
6 | remote_user: "{{ local_remote_user }}"
7 | roles:
8 | - { role: filebeat }
9 |
10 | - hosts: controller,compute,ceph
11 | remote_user: "{{ host_remote_user }}"
12 | roles:
13 | - { role: filebeat }
14 |
--------------------------------------------------------------------------------
/ansible/install/roles/grafana/templates/data_source.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "name":"graphite",
3 | "type":"graphite",
4 | "url":"http://{{graphite_host}}:{{graphite_port}}/",
5 | "access":"proxy",
6 | "isDefault":true,
7 | "basicAuth":true,
8 | "basicAuthUser":"{{graphite_username}}",
9 | "basicAuthPassword":"{{graphite_password}}"
10 | }
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/30-elasticsearch-output.conf:
--------------------------------------------------------------------------------
1 | output {
2 | elasticsearch {
3 | hosts => ["localhost:9200"]
4 | sniffing => true
5 | manage_template => false
6 | index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
7 | document_type => "%{[@metadata][type]}"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/ansible/tune/roles/udev_dhcp_all_interfaces/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Removes 99-dhcp-all-interfaces.rules to prevent creating failed systemd resources
4 | # See https://bugzilla.redhat.com/show_bug.cgi?id=1293712
5 | #
6 |
7 | - name: Remove 99-dhcp-all-interfaces.rules
8 | become: true
9 | file: path=/etc/udev/rules.d/99-dhcp-all-interfaces.rules state=absent
10 |
--------------------------------------------------------------------------------
/ansible/install/browbeat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install Browbeat (Rally + Shaker) on undercloud
4 | #
5 |
6 | - hosts: controller
7 | remote_user: "{{ host_remote_user }}"
8 | roles:
9 | - shaker-check
10 |
11 | - hosts: undercloud
12 | remote_user: "{{ local_remote_user }}"
13 | vars:
14 | results_in_httpd: true
15 | roles:
16 | - common
17 | - browbeat
18 |
--------------------------------------------------------------------------------
/ansible/browbeat/cleanlogs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to clean log files on controller nodes
4 | #
5 |
6 | - hosts: controller
7 | remote_user: heat-admin
8 | gather_facts: false
9 | tasks:
10 | - name: Clean Logs
11 | shell: for i in $(ls {{ item }}); do echo "" > $i; done
12 | with_items:
13 | - /var/log/keystone/*.log
14 | - /var/log/nova/*.log
15 | - /var/log/neutron/*.log
16 |
--------------------------------------------------------------------------------
/ansible/browbeat/adjustment-l3.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to change number of Neutron l3 agents
4 | #
5 | # Change l3 agents Example:
6 | # ansible-playbook -i hosts browbeat/adjustment-l3.yml -e "max_l3_agents=3 min_l3_agents=3"
7 | #
8 |
9 | - hosts: controller
10 | remote_user: heat-admin
11 | gather_facts: false
12 | vars:
13 | ansible_become: true
14 | max_l3_agents: 3
15 | min_l3_agents: 2
16 | roles:
17 | - neutron-l3
18 |
--------------------------------------------------------------------------------
/ansible/gather/roles/rabbitmq/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tasks to set rabbitmq facts for controllers
4 | #
5 | - name : Get rabbitmq file descriptors
6 | shell: rabbitmqctl status | grep file_descriptors | awk -F',' '{print $3}' | sed 's/.$//'
7 | register: rabbitmq_desc
8 | ignore_errors: true
9 |
10 | - name: Set rabbitmq file descriptors
11 | set_fact:
12 | openstack_rabbitmq_file_descriptors: "{{ rabbitmq_desc.stdout }}"
13 |
14 |
15 |
--------------------------------------------------------------------------------
/ansible/browbeat/adjustment-keystone-token.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to change token type from uuid to fernet and back for keystone.
4 | #
5 | # Examples:
6 | # ansible-playbook -i hosts browbeat/adjustment-keystone-token.yml -e "token_provider=fernet"
7 | # ansible-playbook -i hosts browbeat/adjustment-keystone-token.yml -e "token_provider=uuid"
8 | #
9 |
10 | - hosts: controller
11 | remote_user: heat-admin
12 | vars:
13 | ansible_become: true
14 | roles:
15 | - keystone-token
16 |
--------------------------------------------------------------------------------
/ansible/install/roles/browbeat/templates/00-browbeat.conf.j2:
--------------------------------------------------------------------------------
1 | # Browbeat httpd config to serve results on undercloud
2 | # Installed via browbeat installer
3 |
4 | Listen 9000
5 |
6 | ServerName browbeat-results
7 | DocumentRoot "{{ browbeat_path }}/results"
8 |
9 | Options Indexes FollowSymLinks
10 | IndexOptions NameWidth=*
11 | AllowOverride None
12 | Require all granted
13 |
14 |
15 |
--------------------------------------------------------------------------------
/ansible/browbeat/adjustment-firewall_driver.yml:
--------------------------------------------------------------------------------
1 | - hosts: controller
2 | remote_user: heat-admin
3 | gather_facts: false
4 | vars:
5 | ansible_become: true
6 | driver: neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver
7 | roles:
8 | - neutron-firewall
9 |
10 | - hosts: compute
11 | remote_user: heat-admin
12 | gather_facts: false
13 | vars:
14 | ansible_become: true
15 | driver: neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver
16 | roles:
17 | - neutron-firewall
18 |
19 |
20 |
--------------------------------------------------------------------------------
/ansible/check/templates/mysql_report.j2:
--------------------------------------------------------------------------------
1 | # MYSQL Tuner Report
2 |
3 | {% for host in groups['undercloud'] %}
4 | -------------------------------------------
5 | | Results for host : {{ host }}
6 | -------------------------------------------
7 | {{hostvars[host]['mysql_out']['stdout']}}
8 | {% endfor %}
9 |
10 |
11 | {% for host in groups['controller'] %}
12 | -------------------------------------------
13 | | Results for host : {{ host }}
14 | -------------------------------------------
15 | {{hostvars[host]['mysql_out']['stdout']}}
16 | {% endfor %}
17 |
--------------------------------------------------------------------------------
/ansible/install/connmon.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install connmon on undercloud/overcloud
4 | #
5 |
6 | - hosts: undercloud
7 | remote_user: "{{ local_remote_user }}"
8 | vars:
9 | ansible_become: true
10 | undercloud: true
11 | roles:
12 | - common
13 | - connmon
14 |
15 | - hosts: controller
16 | remote_user: "{{ host_remote_user }}"
17 | vars:
18 | ansible_become: true
19 | undercloud: false
20 | roles:
21 | - common
22 | - connmon
23 | - nova
24 | - neutron
25 | - keystone
26 | - cinder
27 | - heat
28 |
--------------------------------------------------------------------------------
/ansible/install/roles/heat/tasks/main.yml:
--------------------------------------------------------------------------------
1 | #
2 | ## Heat connmon tasks
3 | ##
4 | #
5 | - name: Check for connmon in heat.conf
6 | shell: grep -Eq 'connection\s?=\s?mysql:' /etc/heat/heat.conf
7 | register: heat_mysql
8 | ignore_errors: true
9 | changed_when: false
10 |
11 | - name: Enable Connmon in heat.conf
12 | shell: sed -i 's/mysql:/mysql+connmon:/g' /etc/heat/heat.conf
13 | when: heat_mysql.rc == 0
14 | notify:
15 | - unmanage heat services
16 | - restart heat services
17 | - manage heat services
18 | - cleanup heat services
19 |
--------------------------------------------------------------------------------
/ansible/install/roles/neutron/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron handlers for browbeat install connmon
4 | #
5 |
6 | - name: unmanage neutron-server
7 | command: pcs resource unmanage neutron-server
8 | ignore_errors: true
9 |
10 | - name: restart neutron-server
11 | service: name=neutron-server state=restarted
12 |
13 | - name: manage neutron-server
14 | command: pcs resource manage neutron-server
15 | ignore_errors: true
16 |
17 | - name: cleanup neutron-server
18 | command: pcs resource cleanup neutron-server
19 | ignore_errors: true
20 |
--------------------------------------------------------------------------------
/ansible/install/roles/nginx/templates/kibana.conf.j2:
--------------------------------------------------------------------------------
1 | server {
2 | listen {{nginx_kibana_port}};
3 |
4 | server_name {{ansible_hostname}};
5 |
6 | auth_basic "Restricted Access";
7 | auth_basic_user_file /etc/nginx/htpasswd.users;
8 |
9 | location / {
10 | proxy_pass http://localhost:5601;
11 | proxy_http_version 1.1;
12 | proxy_set_header Upgrade $http_upgrade;
13 | proxy_set_header Connection 'upgrade';
14 | proxy_set_header Host $host;
15 | proxy_cache_bypass $http_upgrade;
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/ansible/install/roles/nova/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Nova connmon tasks
4 | #
5 |
6 | - name: Check for connmon in nova.conf
7 | shell: grep -Eq 'connection\s?=\s?mysql:' /etc/nova/nova.conf
8 | register: nova_mysql
9 | ignore_errors: true
10 | changed_when: false
11 |
12 | - name: Enable Connmon in nova.conf
13 | shell: sed -i 's/mysql:/mysql+connmon:/g' /etc/nova/nova.conf
14 | when: nova_mysql.rc == 0
15 | notify:
16 | - unmanage nova services
17 | - restart nova services
18 | - manage nova services
19 | - cleanup nova services
20 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/keystone-token/files/my-keystone.te:
--------------------------------------------------------------------------------
1 | module my-keystone 1.0;
2 |
3 | require {
4 | type etc_t;
5 | type keystone_t;
6 | class process execmem;
7 | class dir write;
8 | }
9 |
10 | #============= keystone_t ==============
11 | allow keystone_t etc_t:dir write;
12 |
13 | #!!!! This avc is allowed in the current policy
14 | allow keystone_t self:process execmem;
15 |
16 | require {
17 | type httpd_t;
18 | type etc_t;
19 | class dir write;
20 | }
21 |
22 | #============= httpd_t ==============
23 | allow httpd_t etc_t:dir write;
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-ovsdb/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Configure the ovsdb driver
2 | ini_file:
3 | dest: "{{ item.file }}"
4 | mode: 0640
5 | section: "{{ item.section }}"
6 | option: "{{ item.option }}"
7 | value: "{{ item.value }}"
8 | backup: yes
9 | with_items:
10 | - { file: /etc/neutron/plugins/ml2/openvswitch_agent.ini, section: ovs, option: ovsdb_interface, value: "{{ driver }}" }
11 | notify:
12 | - unmanage neutron services
13 | - restart neutron services
14 | - manage neutron services
15 | - cleanup neutron services
16 |
--------------------------------------------------------------------------------
/ansible/check/roles/neutron/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check nova is configured in neutron config
3 | command: crudini --get /etc/neutron/neutron.conf DEFAULT nova_admin_tenant_id
4 | register: neutron_nova_creds
5 | failed_when: neutron_nova_creds.rc == 1
6 | changed_when: false
7 | ignore_errors: True
8 |
9 | - name: Check for rootwrap daemon
10 | command: crudini --get /etc/neutron/neutron.conf agent root_helper_daemon
11 | register: neutron_rootwrap_daemon
12 | failed_when: neutron_rootwrap_daemon.rc == 1
13 | changed_when: false
14 | ignore_errors: True
15 |
--------------------------------------------------------------------------------
/ansible/check/roles/compute/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tests specific to compute hosts
4 | #
5 |
6 | - name: Check if swap device exists
7 | command: swapon -s
8 | register: bz1245714
9 | changed_when: no
10 | failed_when: "'dev' not in '{{ bz1245714.stdout }}'"
11 | ignore_errors: True
12 |
13 | - name: Check reserved_host_memory_mb
14 | shell: grep reserved_host_memory /etc/nova/nova.conf | grep -v "#" | cut -f2 -d =
15 | register: bz1282644
16 | failed_when: bz1282644.stdout|int < reserved_host_memory_check
17 | changed_when: no
18 | ignore_errors: True
19 |
20 |
--------------------------------------------------------------------------------
/ansible/check/roles/nova/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check Nova vif_plugging
3 | command: crudini --get /etc/nova/nova.conf DEFAULT vif_plugging_is_fatal
4 | register: bz1264740
5 | failed_when: "'True' not in '{{ bz1264740.stdout }}'"
6 | changed_when: false
7 | ignore_errors: True
8 |
9 | - name: Check Nova vif_plugging_timeout
10 | command: crudini --get /etc/nova/nova.conf DEFAULT vif_plugging_timeout
11 | register: nova_vif_timeout_result
12 | failed_when: nova_vif_timeout > nova_vif_timeout_result.stdout|int
13 | changed_when: false
14 | ignore_errors: True
15 |
--------------------------------------------------------------------------------
/ansible/install/roles/cinder/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Cinder connmon tasks
4 | #
5 |
6 | - name: Check for connmon in cinder.conf
7 | shell: grep -Eq 'connection\s?=\s?mysql:' /etc/cinder/cinder.conf
8 | register: cinder_mysql
9 | ignore_errors: true
10 | changed_when: false
11 |
12 | - name: Enable Connmon in cinder.conf
13 | shell: sed -i 's/mysql:/mysql+connmon:/g' /etc/cinder/cinder.conf
14 | when: cinder_mysql.rc == 0
15 | notify:
16 | - unmanage cinder services
17 | - restart cinder services
18 | - manage cinder services
19 | - cleanup cinder services
20 |
--------------------------------------------------------------------------------
/ansible/install/shaker_build.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to build shaker-image
4 | #
5 |
6 | - hosts: undercloud
7 | remote_user: "{{ local_remote_user }}"
8 | tasks:
9 | - name: build shaker image
10 | shell: >
11 | source {{ overcloudrc }}; source {{ shaker_venv }}/bin/activate;
12 | shaker-image-builder --flavor-name {{ shaker_flavor }} --image-builder-template
13 | {{ shaker_centos }}
14 | --os-region-name {{ shaker_region }}
15 | become: true
16 | register: image_result
17 | failed_when: image_result.rc != 0
18 |
--------------------------------------------------------------------------------
/ansible/install/roles/neutron/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron connmon tasks
4 | #
5 |
6 | - name: Check for connmon in neutron.conf
7 | shell: grep -Eq 'connection\s?=\s?mysql:' /etc/neutron/neutron.conf
8 | register: neutron_mysql
9 | ignore_errors: true
10 | changed_when: false
11 |
12 | - name: Enable Connmon in neutron.conf
13 | shell: sed -i 's/mysql:/mysql+connmon:/g' /etc/neutron/neutron.conf
14 | when: neutron_mysql.rc == 0
15 | notify:
16 | - unmanage neutron-server
17 | - restart neutron-server
18 | - manage neutron-server
19 | - cleanup neutron-server
20 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite/files/storage-schemas.conf:
--------------------------------------------------------------------------------
1 | # Schema definitions for Whisper files. Entries are scanned in order,
2 | # and first match wins. This file is scanned for changes every 60 seconds.
3 | #
4 | # [name]
5 | # pattern = regex
6 | # retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ...
7 | #
8 | # Carbon's internal metrics. This entry should match what is specified in
9 | # CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings
10 | [carbon]
11 | pattern = ^carbon\.
12 | retentions = 60:90d
13 |
14 | [default]
15 | pattern = .*
16 | retentions = 10s:7d,60s:90d,1h:180d
17 |
18 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/10-syslog-filter.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | if [type] == "syslog" {
3 | grok {
4 | match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
5 | add_field => [ "received_at", "%{@timestamp}" ]
6 | add_field => [ "received_from", "%{host}" ]
7 | }
8 | syslog_pri { }
9 | date {
10 | match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
11 | }
12 | }
13 | }
14 |
15 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite_docker/files/storage-schemas.conf:
--------------------------------------------------------------------------------
1 | # Schema definitions for Whisper files. Entries are scanned in order,
2 | # and first match wins. This file is scanned for changes every 60 seconds.
3 | #
4 | # [name]
5 | # pattern = regex
6 | # retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ...
7 | #
8 | # Carbon's internal metrics. This entry should match what is specified in
9 | # CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings
10 | [carbon]
11 | pattern = ^carbon\.
12 | retentions = 60:90d
13 |
14 | [default]
15 | pattern = .*
16 | retentions = 10s:7d,60s:90d,1h:180d
17 |
18 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/cinder-workers/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Cinder tasks for Browbeat
4 | # * Can change worker count
5 | #
6 |
7 | - name: Configure cinder.conf
8 | ini_file:
9 | dest: /etc/cinder/cinder.conf
10 | mode: 0640
11 | section: "{{ item.section }}"
12 | option: "{{ item.option }}"
13 | value: "{{ item.value }}"
14 | backup: yes
15 | with_items:
16 | - { section: DEFAULT, option: osapi_volume_workers, value: "{{ workers }}" }
17 | notify:
18 | - unmanage cinder services
19 | - restart cinder services
20 | - manage cinder services
21 | - cleanup cinder services
22 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/templates/openssl_extras.cnf.j2:
--------------------------------------------------------------------------------
1 | [req]
2 | distinguished_name = req_distinguished_name
3 | x509_extensions = v3_req
4 | prompt = no
5 |
6 | [req_distinguished_name]
7 | C = TG
8 | ST = Togo
9 | L = Lome
10 | O = Private company
11 | CN = *
12 |
13 | [v3_req]
14 | subjectKeyIdentifier = hash
15 | authorityKeyIdentifier = keyid,issuer
16 | basicConstraints = CA:TRUE
17 | subjectAltName = @alt_names
18 |
19 | [alt_names]
20 | DNS.1 = *
21 | DNS.2 = *.*
22 | DNS.3 = *.*.*
23 | DNS.4 = *.*.*.*
24 | DNS.5 = *.*.*.*.*
25 | DNS.6 = *.*.*.*.*.*
26 | DNS.7 = *.*.*.*.*.*.*
27 | IP.1 = {{ ansible_default_ipv4.address }}
28 |
--------------------------------------------------------------------------------
/ansible/check/README.md:
--------------------------------------------------------------------------------
1 | # Browbeat Ansible Checks
2 | The Browbeat Ansible checks playbook will
3 | scan a given deployment to check for known
4 | configuration issues.
5 |
6 | ## Output from Checks
7 | Once the Checks playbook completes there will
8 | be a report generated in the checks directory.
9 |
10 | ```
11 | bug_report.log
12 | ```
13 |
14 | This will contain each of the hosts that were
15 | scanned, and their known issues. Some of the
16 | issues will have BZ's associated with them, use
17 | the BZ link to learn more about the bug, and how
18 | it can impact your deployment. There will be
19 | Checks that do not have a BZ associated, these
20 | are simply suggestions.
21 |
22 |
--------------------------------------------------------------------------------
/ansible/check/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tests common to Director/Controller/Compute/Ceph
4 | #
5 |
6 | - name: Get selinux mode
7 | command: getenforce
8 | changed_when: no
9 | register: sestatus
10 |
11 | - name: Check tuned running on host
12 | command: tuned-adm active
13 | register: tuned_result
14 | changed_when: no
15 | failed_when: tuned_result.rc == -1
16 | ignore_errors: True
17 |
18 | - name: Check tuned for correct profile on host
19 | command: tuned-adm active
20 | register: tuned_profile_result
21 | changed_when: no
22 | failed_when: "'{{ tuned_profile }}' not in '{{ tuned_profile_result.stdout }}'"
23 | ignore_errors: True
24 |
25 |
--------------------------------------------------------------------------------
/ansible/browbeat/adjustment-workers.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to change number of workers for nova,cinder and keystone services
4 | #
5 | # Change Workers Example:
6 | # ansible-playbook -i hosts browbeat/adjustment-workers.yml -e "workers=12"
7 | #
8 | # Change Workers and Keystone Deployment Example:
9 | # ansible-playbook -i hosts browbeat/adjustment-workers.yml -e "workers=12 keystone_deployment=httpd"
10 | #
11 |
12 | - hosts: controller
13 | remote_user: heat-admin
14 | gather_facts: false
15 | vars:
16 | ansible_become: true
17 | workers: 24
18 | threads: 6
19 | roles:
20 | - nova-workers
21 | - neutron-workers
22 | - keystone-workers
23 | - cinder-workers
24 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/nova-db/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Nova tasks for Browbeat
4 | #
5 |
6 | - name: Ensure nova.conf is properly configured
7 | ini_file:
8 | dest: /etc/nova/nova.conf
9 | mode: 0640
10 | section: "{{ item.section }}"
11 | option: "{{ item.option }}"
12 | value: "{{ item.value }}"
13 | backup: yes
14 | with_items:
15 | - { section: DEFAULT, option: wsgi_default_pool_size, value: "{{ greenlet_pool_size }}" }
16 | - { section: api_database, option: max_overflow, value: "{{ max_overflow }}" }
17 | notify:
18 | - unmanage nova services
19 | - restart nova services
20 | - manage nova services
21 | - cleanup nova services
22 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Vars for collectd-generic
4 | #
5 |
6 | collectd_packages:
7 | baremetal:
8 | - collectd
9 | - collectd-turbostat
10 | guest:
11 | - collectd
12 | cfme:
13 | - collectd
14 | - collectd-apache
15 | cfme-all-in-one:
16 | - collectd
17 | - collectd-postgresql
18 | - collectd-apache
19 | cfme-vmdb:
20 | - collectd
21 | - collectd-postgresql
22 | - collectd-apache
23 | graphite:
24 | - collectd
25 | - collectd-turbostat
26 | ose:
27 | - collectd
28 | - collectd-turbostat
29 | satellite6:
30 | - collectd
31 | - collectd-turbostat
32 | - collectd-postgresql
33 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-l3/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron tasks for Browbeat
4 | #
5 |
6 | - name: Configure min_l3_agents
7 | ini_file:
8 | dest: /etc/neutron/neutron.conf
9 | mode: 0640
10 | section: "{{ item.section }}"
11 | option: "{{ item.option }}"
12 | value: "{{ item.value }}"
13 | backup: yes
14 | with_items:
15 | - { section: DEFAULT, option: max_l3_agents_per_router, value: "{{ max_l3_agents }}" }
16 | - { section: DEFAULT, option: min_l3_agents_per_router, value: "{{ min_l3_agents }}" }
17 | notify:
18 | - unmanage neutron services
19 | - restart neutron services
20 | - manage neutron services
21 | - cleanup neutron services
22 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-firewall/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Configure the firewall driver
2 | ini_file:
3 | dest: "{{ item.file }}"
4 | mode: 0640
5 | section: "{{ item.section }}"
6 | option: "{{ item.option }}"
7 | value: "{{ item.value }}"
8 | backup: yes
9 | with_items:
10 | - { file: /etc/neutron/plugins/ml2/ml2_conf.ini, section: securitygroup, option: firewall_driver, value: "{{ driver }}" }
11 | - { file: /etc/neutron/plugins/ml2/openvswitch_agent.ini, section: securitygroup, option: firewall_driver, value: "{{ driver }}" }
12 | notify:
13 | - unmanage neutron services
14 | - restart neutron services
15 | - manage neutron services
16 | - cleanup neutron services
17 |
18 |
19 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/cinder-workers/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Cinder handlers for browbeat adjustment
4 | #
5 |
6 | - name: unmanage cinder services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - openstack-cinder-api
10 | ignore_errors: true
11 |
12 | - name: restart cinder services
13 | service: name={{ item }} state=restarted
14 | with_items:
15 | - openstack-cinder-api
16 |
17 | - name: manage cinder services
18 | command: pcs resource manage {{ item }}
19 | with_items:
20 | - openstack-cinder-api
21 | ignore_errors: true
22 |
23 | - name: cleanup cinder services
24 | command: pcs resource cleanup {{ item }}
25 | with_items:
26 | - openstack-cinder-api
27 | ignore_errors: true
28 |
--------------------------------------------------------------------------------
/ansible/gather/roles/mysql/tasks/main.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Get mysql facts
3 | #
4 | - name: Get max_connections on the database
5 | shell: mysql -e "show variables like 'max_connections';" | grep max_connections | awk '{print $2}'
6 | register: max_conn
7 | ignore_errors: true
8 |
9 | - name: Set max database connections
10 | set_fact:
11 | openstack_mysql_max_connections: "{{ max_conn.stdout }}"
12 |
13 | - name : Get file descriptors for the mysql process
14 | shell: cat /proc/$(pgrep mysqld_safe)/limits | grep "open files" | awk '{print $4}'
15 | register: mysql_desc
16 |
17 | - name: Set file descriptors fact for mysql
18 | set_fact:
19 | openstack_mysql_file_descriptors: "{{ mysql_desc.stdout }}"
20 |
21 |
22 |
--------------------------------------------------------------------------------
/ansible/gather/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: compute
3 | remote_user: "{{ host_remote_user }}"
4 | become: true
5 | roles:
6 | - compute
7 |
8 | - hosts: controller
9 | remote_user: "{{ host_remote_user }}"
10 | become: true
11 | roles:
12 | - nova
13 | - neutron
14 | - mysql
15 | - rabbitmq
16 |
17 | - hosts: undercloud
18 | remote_user: "{{ local_remote_user }}"
19 | become: true
20 | roles:
21 | - undercloud
22 |
23 |
24 | - hosts: localhost
25 | become: true
26 | tasks:
27 | - name: Dump all vars
28 | local_action: template src=dump_facts.j2 dest={{ browbeat_path }}/machine_facts.json
29 | - name: Generate metadata jsons
30 | shell : python {{ browbeat_path }}/lib/Metadata.py {{ browbeat_path }}
31 |
32 |
33 |
--------------------------------------------------------------------------------
/rally/authenticate/keystone-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | Authenticate.keystone:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_tenant-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_tenant:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_user-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_user:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/get_entities-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.get_entities:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_delete_user-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_delete_user:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_and_delete_role-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_and_delete_role:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_and_list_tenants-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_and_list_tenants:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_and_list_users-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_and_list_users:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/add_and_remove_user_role-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.add_and_remove_user_role:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_and_delete_service-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_and_delete_service:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_and_list_services-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_and_list_services:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_user_update_password-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_user_update_password:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_add_and_list_user_roles-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_add_and_list_user_roles:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_and_list_ec2credentials-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_and_list_ec2credentials:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_and_delete_ec2credentials-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_and_delete_ec2credential:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_update_and_delete_tenant-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | KeystoneBasic.create_update_and_delete_tenant:
6 | -
7 | args: {}
8 | context:
9 | users:
10 | project_domain: "default"
11 | resource_management_workers: 30
12 | tenants: 1
13 | user_domain: "default"
14 | users_per_tenant: 8
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | sla:
20 | max_avg_duration: {{sla_max_avg_duration}}
21 | max_seconds_per_iteration: {{sla_max_seconds}}
22 | failure_rate:
23 | max: {{sla_max_failure}}
24 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-l3/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron handlers for browbeat adjustment
4 | #
5 |
6 | - name: unmanage neutron services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - neutron-server
10 | - neutron-metadata-agent
11 | ignore_errors: true
12 |
13 | - name: restart neutron services
14 | service: name={{ item }} state=restarted
15 | with_items:
16 | - neutron-server
17 | - neutron-metadata-agent
18 |
19 | - name: manage neutron services
20 | command: pcs resource manage {{ item }}
21 | with_items:
22 | - neutron-server
23 | - neutron-metadata-agent
24 | ignore_errors: true
25 |
26 | - name: cleanup neutron services
27 | command: pcs resource cleanup {{ item }}
28 | with_items:
29 | - neutron-server
30 | - neutron-metadata-agent
31 | ignore_errors: true
32 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/nova-workers/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Nova tasks for Browbeat
4 | # * Can change worker count
5 | #
6 |
7 | - name: Ensure nova.conf is properly configured
8 | ini_file:
9 | dest: /etc/nova/nova.conf
10 | mode: 0640
11 | section: "{{ item.section }}"
12 | option: "{{ item.option }}"
13 | value: "{{ item.value }}"
14 | backup: yes
15 | with_items:
16 | - { section: DEFAULT, option: ec2_workers, value: "{{ workers }}" }
17 | - { section: DEFAULT, option: osapi_compute_workers, value: "{{ workers }}" }
18 | - { section: DEFAULT, option: metadata_workers, value: "{{ workers }}" }
19 | - { section: conductor, option: workers, value: "{{ workers }}" }
20 | notify:
21 | - unmanage nova services
22 | - restart nova services
23 | - manage nova services
24 | - cleanup nova services
25 |
--------------------------------------------------------------------------------
/ansible/install/roles/heat/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | ## Heat handlers for browbeat install connmon
4 | ##
5 | #
6 | - name: unmanage heat services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - openstack-heat-api
10 | - openstack-heat-engine
11 | ignore_errors: true
12 |
13 | - name: restart heat services
14 | service: name={{ item }} state=restarted
15 | with_items:
16 | - openstack-heat-api
17 | - openstack-heat-engine
18 |
19 | - name: manage heat services
20 | command: pcs resource manage {{ item }}
21 | with_items:
22 | - openstack-heat-api
23 | - openstack-heat-engine
24 | ignore_errors: true
25 |
26 | - name: cleanup heat services
27 | command: pcs resource cleanup {{ item }}
28 | with_items:
29 | - openstack-heat-api
30 | - openstack-heat-engine
31 | ignore_errors: true
32 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-workers/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron handlers for browbeat adjustment
4 | #
5 |
6 | - name: unmanage neutron services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - neutron-server
10 | - neutron-metadata-agent
11 | ignore_errors: true
12 |
13 | - name: restart neutron services
14 | service: name={{ item }} state=restarted
15 | with_items:
16 | - neutron-server
17 | - neutron-metadata-agent
18 |
19 | - name: manage neutron services
20 | command: pcs resource manage {{ item }}
21 | with_items:
22 | - neutron-server
23 | - neutron-metadata-agent
24 | ignore_errors: true
25 |
26 | - name: cleanup neutron services
27 | command: pcs resource cleanup {{ item }}
28 | with_items:
29 | - neutron-server
30 | - neutron-metadata-agent
31 | ignore_errors: true
32 |
--------------------------------------------------------------------------------
/rally/authenticate/validate_heat-cc.yml:
--------------------------------------------------------------------------------
1 | {% set repetitions = repetitions or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | Authenticate.validate_heat:
7 | -
8 | args:
9 | repetitions: {{repetitions}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/rally/authenticate/validate_nova-cc.yml:
--------------------------------------------------------------------------------
1 | {% set repetitions = repetitions or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | Authenticate.validate_nova:
7 | -
8 | args:
9 | repetitions: {{repetitions}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/rally/neutron/neutron-create-list-network-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | NeutronNetworks.create_and_list_networks:
6 | -
7 | args:
8 | network_create_args: ""
9 | runner:
10 | concurrency: {{concurrency}}
11 | times: {{times}}
12 | type: "constant"
13 | context:
14 | users:
15 | tenants: 1
16 | users_per_tenant: 8
17 | quotas:
18 | neutron:
19 | network: -1
20 | port: -1
21 | router: -1
22 | subnet: -1
23 | sla:
24 | max_avg_duration: {{sla_max_avg_duration}}
25 | max_seconds_per_iteration: {{sla_max_seconds}}
26 | failure_rate:
27 | max: {{sla_max_failure}}
28 |
--------------------------------------------------------------------------------
/rally/authenticate/validate_cinder-cc.yml:
--------------------------------------------------------------------------------
1 | {% set repetitions = repetitions or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | Authenticate.validate_cinder:
7 | -
8 | args:
9 | repetitions: {{repetitions}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/rally/authenticate/validate_glance-cc.yml:
--------------------------------------------------------------------------------
1 | {% set repetitions = repetitions or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | Authenticate.validate_glance:
7 | -
8 | args:
9 | repetitions: {{repetitions}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/rally/authenticate/validate_monasca-cc.yml:
--------------------------------------------------------------------------------
1 | {% set repetitions = repetitions or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | Authenticate.validate_monasca:
7 | -
8 | args:
9 | repetitions: {{repetitions}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/rally/authenticate/validate_neutron-cc.yml:
--------------------------------------------------------------------------------
1 | {% set repetitions = repetitions or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | Authenticate.validate_neutron:
7 | -
8 | args:
9 | repetitions: {{repetitions}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/rally/rally-plugins/subnet-router-create/subnet-router-create.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | NeutronPlugin.create_router_and_net:
6 | -
7 | args:
8 | network_create_args: {}
9 | num_networks: {{num_networks}}
10 | runner:
11 | concurrency: {{concurrency}}
12 | times: {{times}}
13 | type: "constant"
14 | context:
15 | users:
16 | tenants: 1
17 | users_per_tenant: 8
18 | quotas:
19 | neutron:
20 | network: -1
21 | port: -1
22 | router: -1
23 | subnet: -1
24 | sla:
25 | max_avg_duration: {{sla_max_avg_duration}}
26 | max_seconds_per_iteration: {{sla_max_seconds}}
27 | failure_rate:
28 | max: {{sla_max_failure}}
29 |
--------------------------------------------------------------------------------
/rally/rally-plugins/netcreate-boot-ping/netcreate_nova-boot-fip-ping.json:
--------------------------------------------------------------------------------
1 | {
2 | "NeutronBootFipPingPlugin.create_network_nova_boot_ping": [
3 | {
4 | "args": {
5 | "floating": True,
6 | "flavor": {
7 | "name": "{{flavor_name}}"
8 | },
9 | "image": {
10 | "name": "{{image_name}}"
11 | },
12 | "ext_net": {
13 | "id": "{{net_id}}"
14 | },
15 | "network_create_args": {}
16 | },
17 | "runner": {
18 | "type": "serial",
19 | "times": 1
20 | },
21 | "context": {
22 | "users": {
23 | "tenants": 1,
24 | "users_per_tenant": 1
25 | }
26 | }
27 | }
28 | ]
29 | }
30 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_tenant_with_users-cc.yml:
--------------------------------------------------------------------------------
1 | {% set users_per_tenant = users_per_tenant or 5 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | KeystoneBasic.create_tenant_with_users:
7 | -
8 | args:
9 | users_per_tenant: {{users_per_tenant}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/rally/keystonebasic/create_user_set_enabled_and_delete-cc.yml:
--------------------------------------------------------------------------------
1 | {% set enabled_flag = enabled_flag or true %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | KeystoneBasic.create_user_set_enabled_and_delete:
7 | -
8 | args:
9 | enabled: {{enabled_flag}}
10 | context:
11 | users:
12 | project_domain: "default"
13 | resource_management_workers: 30
14 | tenants: 1
15 | user_domain: "default"
16 | users_per_tenant: 8
17 | runner:
18 | concurrency: {{concurrency}}
19 | times: {{times}}
20 | type: "constant"
21 | sla:
22 | max_avg_duration: {{sla_max_avg_duration}}
23 | max_seconds_per_iteration: {{sla_max_seconds}}
24 | failure_rate:
25 | max: {{sla_max_failure}}
26 |
--------------------------------------------------------------------------------
/ansible/install/dashboards-openstack.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Builds dashboards based on your ansible hosts for Openstack
4 | #
5 |
6 | - hosts: localhost
7 | gather_facts: false
8 | vars:
9 | ansible_connection: local
10 | overwrite_existing: true
11 | dashboards:
12 | - template_name: openstack
13 | template_node_type: undercloud
14 | process_list_name: Openstack-Undercloud
15 | - template_name: openstack
16 | template_node_type: controller
17 | process_list_name: Openstack-Controller
18 | - template_name: openstack
19 | template_node_type: compute
20 | process_list_name: Openstack-Compute
21 | - template_name: openstack
22 | template_node_type: ceph
23 | process_list_name: Openstack-Ceph
24 | - template_name: openstack
25 | template_node_type: "*"
26 | process_list_name: Openstack
27 | roles:
28 | - dashboard-openstack
29 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/keystone-token/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Keystone change token provider handlers
4 | #
5 |
6 | - name: pacemaker default unmanaged
7 | command: pcs property set is-managed-default=false
8 |
9 | - name: stop keystone service
10 | service: name=openstack-keystone state=stopped
11 | when: "'httpd' in '{{ keystone_deployment }}'"
12 |
13 | - name: restart httpd service
14 | service: name=httpd state=restarted
15 | when: "'httpd' in '{{ keystone_deployment }}'"
16 |
17 | - name: restart keystone service
18 | service: name=openstack-keystone state=restarted
19 | when: "'eventlet' in '{{ keystone_deployment }}'"
20 |
21 | - name: pacemaker default managed
22 | command: pcs property set is-managed-default=true
23 | when: "'eventlet' in '{{ keystone_deployment }}'"
24 |
25 | - name: pacemaker cleanup keystone
26 | command: pcs resource cleanup openstack-keystone
27 | when: "'eventlet' in '{{ keystone_deployment }}'"
--------------------------------------------------------------------------------
/ansible/gather/roles/compute/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Compute Tasks for gathering facts
4 | #
5 | - name: Get ovs version
6 | shell: ovs-vswitchd --version | grep vSwitch | awk {'print$4'}
7 | register: ovs_version
8 |
9 | - name: Set ovs version fact
10 | set_fact:
11 | openstack_ovs_version: "{{ ovs_version.stdout }}"
12 |
13 | - name: Get neutron ovs agent ovsdb setting
14 | shell: crudini --get /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs ovsdb_interface
15 | register: ovsdb_status
16 | ignore_errors: true
17 |
18 | - name: Set Neutron OVS ovsdb fact
19 | set_fact:
20 | openstack_neutron_ovsdb: "{{ ovsdb_status.stdout }}"
21 | when: (ovsdb_status.stdout.find('native') != -1 or ovsdb_status.stdout.find('vsctl') != -1)
22 |
23 | - name: Set Neutron OVS ovsdb fact
24 | set_fact:
25 | openstack_neutron_ovsdb: "vsctl"
26 | when: (ovsdb_status.stdout.find('native') == -1 and ovsdb_status.stdout.find('vsctl') == -1)
27 |
--------------------------------------------------------------------------------
/rally/neutron/neutron-create-list-security-group-cc.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | NeutronSecurityGroup.create_and_list_security_groups:
6 | -
7 | args:
8 | security_group_create_args: ""
9 | runner:
10 | concurrency: {{concurrency}}
11 | times: {{times}}
12 | type: "constant"
13 | context:
14 | users:
15 | tenants: 1
16 | users_per_tenant: 8
17 | quotas:
18 | neutron:
19 | network: -1
20 | port: -1
21 | router: -1
22 | subnet: -1
23 | security_group: -1
24 | sla:
25 | max_avg_duration: {{sla_max_avg_duration}}
26 | max_seconds_per_iteration: {{sla_max_seconds}}
27 | failure_rate:
28 | max: {{sla_max_failure}}
29 |
--------------------------------------------------------------------------------
/rally/rally-plugins/netcreate-boot/netcreate_boot.yml:
--------------------------------------------------------------------------------
1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
2 | {% set sla_max_failure = sla_max_failure or 0 %}
3 | {% set sla_max_seconds = sla_max_seconds or 60 %}
4 | ---
5 | NeutronPlugin.create_network_nova_boot:
6 | -
7 | args:
8 | flavor:
9 | name: '{{flavor_name}}'
10 | image:
11 | name: '{{image_name}}'
12 | network_create_args: {}
13 | runner:
14 | concurrency: {{concurrency}}
15 | times: {{times}}
16 | type: "constant"
17 | context:
18 | users:
19 | tenants: 1
20 | users_per_tenant: 8
21 | quotas:
22 | neutron:
23 | network: -1
24 | port: -1
25 | router: -1
26 | subnet: -1
27 | sla:
28 | max_avg_duration: {{sla_max_avg_duration}}
29 | max_seconds_per_iteration: {{sla_max_seconds}}
30 | failure_rate:
31 | max: {{sla_max_failure}}
32 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-firewall/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron handlers for browbeat adjustment
4 | #
5 |
6 | - name: unmanage neutron services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - neutron-openvswitch-agent
10 | - neutron-server
11 | - neutron-l3-agent
12 | ignore_errors: true
13 |
14 | - name: restart neutron services
15 | service: name={{ item }} state=restarted
16 | with_items:
17 | - neutron-openvswitch-agent
18 | - neutron-server
19 | - neutron-l3-agent
20 |
21 | - name: manage neutron services
22 | command: pcs resource manage {{ item }}
23 | with_items:
24 | - neutron-openvswitch-agent
25 | - neutron-server
26 | - neutron-l3-agent
27 | ignore_errors: true
28 |
29 | - name: cleanup neutron services
30 | command: pcs resource cleanup {{ item }}
31 | with_items:
32 | - neutron-openvswitch-agent
33 | - neutron-server
34 | - neutron-l3-agent
35 | ignore_errors: true
36 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/files/cfme-http.conf:
--------------------------------------------------------------------------------
1 | # Deployed by Browbeat collectd-generic for cfme nodes.
2 | ## CFME HTTP Virtual Host Context
3 |
4 | # Timeout: The number of seconds before receives and sends time out.
5 | Timeout 120
6 |
7 | # HTTP Start-up error log
8 | ErrorLog /var/www/miq/vmdb/log/apache/miq_apache.log
9 |
10 | # Disable this section if using HTTP only
11 | RewriteEngine On
12 | Options SymLinksIfOwnerMatch
13 | RewriteCond %{SERVER_PORT} !^443$
14 | RewriteRule ^.*$ https://%{SERVER_NAME}%{REQUEST_URI} [L,R]
15 |
16 | # Collectd configuration enable mod_status and virtual host on port 80
17 |
18 | # Enable this section if using HTTP only
19 |
20 | # Include conf.d/cfme-redirects-ui
21 | # Include conf.d/cfme-redirects-ws
22 | ProxyPreserveHost on
23 |
24 |
25 | ExtendedStatus on
26 |
27 | SetHandler server-status
28 | Order deny,allow
29 | Deny from all
30 | Allow from 127.0.0.1
31 |
32 |
--------------------------------------------------------------------------------
/rally/neutron/neutron-create-list-port-cc.yml:
--------------------------------------------------------------------------------
1 | {% set ports_per_network = ports_per_network or 4 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | NeutronNetworks.create_and_list_ports:
7 | -
8 | args:
9 | network_create_args: ""
10 | ports_per_network: {{ports_per_network}}
11 | runner:
12 | concurrency: {{concurrency}}
13 | times: {{times}}
14 | type: "constant"
15 | context:
16 | users:
17 | tenants: 1
18 | users_per_tenant: 8
19 | quotas:
20 | neutron:
21 | network: -1
22 | port: -1
23 | router: -1
24 | subnet: -1
25 | sla:
26 | max_avg_duration: {{sla_max_avg_duration}}
27 | max_seconds_per_iteration: {{sla_max_seconds}}
28 | failure_rate:
29 | max: {{sla_max_failure}}
30 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/nova-db/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Nova handlers for browbeat adjustment
4 | #
5 |
6 | - name: unmanage nova services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - openstack-nova-api
10 | - openstack-nova-scheduler
11 | - openstack-nova-conductor
12 | ignore_errors: true
13 |
14 | - name: restart nova services
15 | service: name={{ item }} state=restarted
16 | with_items:
17 | - openstack-nova-api
18 | - openstack-nova-scheduler
19 | - openstack-nova-conductor
20 |
21 | - name: manage nova services
22 | command: pcs resource manage {{ item }}
23 | with_items:
24 | - openstack-nova-api
25 | - openstack-nova-scheduler
26 | - openstack-nova-conductor
27 | ignore_errors: true
28 |
29 | - name: cleanup nova services
30 | command: pcs resource cleanup {{ item }}
31 | with_items:
32 | - openstack-nova-api
33 | - openstack-nova-scheduler
34 | - openstack-nova-conductor
35 | ignore_errors: true
36 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/nova-workers/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Nova handlers for browbeat adjustment
4 | #
5 |
6 | - name: unmanage nova services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - openstack-nova-api
10 | - openstack-nova-scheduler
11 | - openstack-nova-conductor
12 | ignore_errors: true
13 |
14 | - name: restart nova services
15 | service: name={{ item }} state=restarted
16 | with_items:
17 | - openstack-nova-api
18 | - openstack-nova-scheduler
19 | - openstack-nova-conductor
20 |
21 | - name: manage nova services
22 | command: pcs resource manage {{ item }}
23 | with_items:
24 | - openstack-nova-api
25 | - openstack-nova-scheduler
26 | - openstack-nova-conductor
27 | ignore_errors: true
28 |
29 | - name: cleanup nova services
30 | command: pcs resource cleanup {{ item }}
31 | with_items:
32 | - openstack-nova-api
33 | - openstack-nova-scheduler
34 | - openstack-nova-conductor
35 | ignore_errors: true
36 |
--------------------------------------------------------------------------------
/ansible/install/roles/nova/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Nova handlers for browbeat install connmon
4 | #
5 |
6 | - name: unmanage nova services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - openstack-nova-api
10 | - openstack-nova-scheduler
11 | - openstack-nova-conductor
12 | ignore_errors: true
13 |
14 | - name: restart nova services
15 | service: name={{ item }} state=restarted
16 | with_items:
17 | - openstack-nova-api
18 | - openstack-nova-scheduler
19 | - openstack-nova-conductor
20 |
21 | - name: manage nova services
22 | command: pcs resource manage {{ item }}
23 | with_items:
24 | - openstack-nova-api
25 | - openstack-nova-scheduler
26 | - openstack-nova-conductor
27 | ignore_errors: true
28 |
29 | - name: cleanup nova services
30 | command: pcs resource cleanup {{ item }}
31 | with_items:
32 | - openstack-nova-api
33 | - openstack-nova-scheduler
34 | - openstack-nova-conductor
35 | ignore_errors: true
36 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite/files/setup-graphite-db.exp:
--------------------------------------------------------------------------------
1 | #!/usr/bin/expect
2 | #
3 | # non-interactive setup of the initial graphite.db sqlite3 database
4 | #
5 | # if run manually, the following is the expected output.
6 | # Would you like to create one now? (yes/no): yes
7 | # Username (leave blank to use 'root'):
8 | # Email address:
9 | # Password:
10 | # Password (again):
11 | # Superuser created successfully.
12 | # Installing custom SQL ...
13 | # Installing indexes ...
14 | # Installed 0 object(s) from 0 fixture(s)
15 |
16 | set timeout 20
17 | set superuser [lindex $argv 0]
18 | set password [lindex $argv 1]
19 | spawn /usr/lib/python2.7/site-packages/graphite/manage.py syncdb
20 | expect "Would you like to create one now? (yes/no):"
21 | send "yes\r";
22 | expect "Username (leave blank to use 'root'):"
23 | send "$superuser\r";
24 | expect "Email address:"
25 | send "\r";
26 | expect "Password:"
27 | send "$password\r";
28 | expect "Password (again):"
29 | send "$password\r";
30 |
31 | interact
32 |
33 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/templates/ose-metrics.py.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import requests
4 | import time
5 |
6 | from requests.packages.urllib3.exceptions import InsecureRequestWarning
7 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
8 |
9 | interval = {{collectd_interval}}
10 | hostname = "{{inventory_hostname}}"
11 |
12 | while True:
13 | time_stamp = int(time.time())
14 | next_start = time_stamp + interval
15 | r = requests.get('https://127.0.0.1:8443/metrics', cert=('/etc/origin/master/admin.crt', '/etc/origin/master/admin.key'), verify=False)
16 | for values in [ line.strip() for line in r.text.splitlines()]:
17 | if '#' not in values[0] and '{' not in values:
18 | metric_name = values.split(' ')[0]
19 | metric_value = values.split(' ')[1]
20 | print 'PUTVAL {0}/ose/gauge-{1} {2}:{3}'.format(hostname, metric_name, time_stamp, metric_value)
21 | endtime = time.time()
22 | if endtime < next_start:
23 | time.sleep(next_start - endtime)
24 |
--------------------------------------------------------------------------------
/ansible/install/roles/keystone/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Keystone handlers for browbeat install connmon
4 | #
5 |
6 | #
7 | # Restart keystone when in httpd
8 | #
9 |
10 | - name: restart httpd
11 | service: name=httpd state=restarted
12 | when: "'httpd' == '{{ keystone_deployment }}'"
13 |
14 | #
15 | # Restart keystone when in eventlet
16 | #
17 |
18 | - name: unmanage keystone
19 | command: pcs resource unmanage openstack-keystone
20 | when: "'eventlet' == '{{ keystone_deployment }}'"
21 | ignore_errors: true
22 |
23 | - name: restart keystone
24 | service: name=openstack-keystone state=restarted
25 | when: "'eventlet' == '{{ keystone_deployment }}'"
26 |
27 | - name: manage keystone
28 | command: pcs resource manage openstack-keystone
29 | when: "'eventlet' == '{{ keystone_deployment }}'"
30 | ignore_errors: true
31 |
32 | - name: cleanup keystone
33 | command: pcs resource cleanup openstack-keystone
34 | when: "'eventlet' == '{{ keystone_deployment }}'"
35 | ignore_errors: true
36 |
--------------------------------------------------------------------------------
/ansible/check/roles/ceph/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Checks specific to ceph nodes
4 | #
5 |
6 | - name: Check Ceph cluster health status
7 | shell: ceph status
8 | register: ceph_status
9 | failed_when: "'HEALTH_OK' not in '{{ ceph_status.stdout }}'"
10 | changed_when: false
11 | ignore_errors: True
12 |
13 | - name: Verify RBD caching
14 | shell: ceph --admin-daemon `ls /var/run/ceph/ceph-osd.*.asok|tail -1` config show|grep '"rbd_cache":'|grep -i true|awk '{print tolower($0)}'
15 | register: ceph_rbd_caching
16 | failed_when : "'true' not in '{{ ceph_rbd_caching.stdout }}'"
17 | changed_when: false
18 | ignore_errors: True
19 |
20 | - name: Verify RBD cache writethrough
21 | shell: ceph --admin-daemon `ls /var/run/ceph/ceph-osd.*.asok|tail -1` config show|grep "rbd_cache_writethrough"|grep -i true|awk '{print tolower($0)}'
22 | register: ceph_rbd_cache_writethrough
23 | failed_when: "'false' not in '{{ ceph_rbd_cache_writethrough.stdout }}'"
24 | changed_when: false
25 | ignore_errors: True
26 |
--------------------------------------------------------------------------------
/ansible/install/roles/cinder/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Cinder handlers for browbeat install connmon
4 | #
5 |
6 | - name: unmanage cinder services
7 | command: pcs resource unmanage {{ item }}
8 | with_items:
9 | - openstack-cinder-api
10 | - openstack-cinder-scheduler
11 | - openstack-cinder-volume
12 | ignore_errors: true
13 |
14 | - name: restart cinder services
15 | service: name={{ item }} state=restarted
16 | with_items:
17 | - openstack-cinder-api
18 | - openstack-cinder-scheduler
19 | - openstack-cinder-volume
20 |
21 | - name: manage cinder services
22 | command: pcs resource manage {{ item }}
23 | with_items:
24 | - openstack-cinder-api
25 | - openstack-cinder-scheduler
26 | - openstack-cinder-volume
27 | ignore_errors: true
28 |
29 | - name: cleanup cinder services
30 | command: pcs resource cleanup {{ item }}
31 | with_items:
32 | - openstack-cinder-api
33 | - openstack-cinder-scheduler
34 | - openstack-cinder-volume
35 | ignore_errors: true
36 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/keystone-workers/templates/keystone_wsgi.conf.j2:
--------------------------------------------------------------------------------
1 |
2 | ServerName keystone_wsgi_{{ item.interface }}
3 |
4 | ## Vhost docroot
5 | DocumentRoot "/var/www/cgi-bin/keystone"
6 |
7 | ## Directories, there should at least be a declaration for /var/www/cgi-bin/keystone
8 |
9 |
10 | Options Indexes FollowSymLinks MultiViews
11 | AllowOverride None
12 | Require all granted
13 |
14 |
15 | ## Logging
16 | ErrorLog "/var/log/httpd/keystone_wsgi_{{ item.interface }}_error.log"
17 | LogLevel info
18 | ServerSignature Off
19 | CustomLog "/var/log/httpd/keystone_wsgi_{{ item.interface }}_access.log" combined
20 | WSGIDaemonProcess keystone_{{ item.interface }} display-name=keystone-{{ item.interface }} group=keystone processes={{ item.processes }} threads={{ item.threads }} user=keystone
21 | WSGIProcessGroup keystone_{{ item.interface }}
22 | WSGIScriptAlias / "/var/www/cgi-bin/keystone/{{ item.interface }}"
23 |
--------------------------------------------------------------------------------
/rally/neutron/neutron-create-list-subnet-cc.yml:
--------------------------------------------------------------------------------
1 | {% set subnets_per_network = subnets_per_network or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | NeutronNetworks.create_and_list_subnets:
7 | -
8 | args:
9 | network_create_args: ""
10 | subnet_create_args: ""
11 | subnet_cidr_start: "1.1.0.0/30"
12 | subnets_per_network: {{subnets_per_network}}
13 | runner:
14 | concurrency: {{concurrency}}
15 | times: {{times}}
16 | type: "constant"
17 | context:
18 | users:
19 | tenants: 1
20 | users_per_tenant: 8
21 | quotas:
22 | neutron:
23 | network: -1
24 | port: -1
25 | router: -1
26 | subnet: -1
27 | sla:
28 | max_avg_duration: {{sla_max_avg_duration}}
29 | max_seconds_per_iteration: {{sla_max_seconds}}
30 | failure_rate:
31 | max: {{sla_max_failure}}
32 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/keystone-workers/files/keystone_httpd:
--------------------------------------------------------------------------------
1 | # Copyright 2013 OpenStack Foundation
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 | # not use this file except in compliance with the License. You may obtain
5 | # a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 | # License for the specific language governing permissions and limitations
13 | # under the License.
14 |
15 | import os
16 |
17 | from keystone.server import wsgi as wsgi_server
18 |
19 |
20 | name = os.path.basename(__file__)
21 |
22 | # NOTE(ldbragst): 'application' is required in this context by WSGI spec.
23 | # The following is a reference to Python Paste Deploy documentation
24 | # http://pythonpaste.org/deploy/
25 | application = wsgi_server.initialize_application(name)
26 |
--------------------------------------------------------------------------------
/ansible/check/group_vars/compute:
--------------------------------------------------------------------------------
1 | ---
2 | ansible_become: true
3 | reserved_host_memory_check: 2048
4 | tuned_profile: virtual-host
5 | nova_vif_timeout: 300
6 |
7 | checks:
8 | bz1245714:
9 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1245714"
10 | name: "No Swap Space allocated"
11 | bz1282644:
12 | url : "https://bugzilla.redhat.com/show_bug.cgi?id=1282644"
13 | name : "increase reserved_host_memory_mb"
14 | tuned_profile_result :
15 | url: "none"
16 | name: "Ensure Tuned Profile is set to virtual-host"
17 | nova_vif_timeout_result:
18 | url: "none"
19 | name: "Nova VIF timeout should be >= 300"
20 | # neutron_nova_creds:
21 | # url: "https://bugzilla.redhat.com/show_bug.cgi?id=1264740"
22 | # name: "RHEL OSP Director must be configure with nova-event-callback by default"
23 | bz1264740:
24 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1264740"
25 | name: "RHEL OSP Director must be configure with nova-event-callback by default"
26 |
27 |
28 |
29 | # vi:syntax=yaml
30 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/keystone-workers/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Keystone handlers for browbeat adjustment
4 | #
5 |
6 | - name: pacemaker unmanaged default
7 | command: pcs property set is-managed-default=false
8 | ignore_errors: true
9 |
10 | - name: stop keystone eventlet
11 | service: name=openstack-keystone state=stopped
12 | when: "'httpd' in '{{ keystone_deployment }}'"
13 | ignore_errors: true
14 |
15 | - name: restart httpd
16 | service: name=httpd state=restarted
17 |
18 | - name: restart keystone
19 | service: name=openstack-keystone state=restarted
20 | when: "'eventlet' in '{{ keystone_deployment }}'"
21 |
22 | - name: pacemaker managed default
23 | command: pcs property set is-managed-default=true
24 | when: "'eventlet' in '{{ keystone_deployment }}'"
25 | ignore_errors: true
26 |
27 | - name: cleanup keystone
28 | command: pcs resource cleanup openstack-keystone
29 | when: "'eventlet' in '{{ keystone_deployment }}'"
30 | ignore_errors: true
31 |
32 | - name: cleanup httpd
33 | command: pcs resource cleanup httpd
34 | ignore_errors: true
35 |
--------------------------------------------------------------------------------
/rally/neutron/neutron-create-list-router-cc.yml:
--------------------------------------------------------------------------------
1 | {% set subnets_per_network = subnets_per_network or 2 %}
2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
3 | {% set sla_max_failure = sla_max_failure or 0 %}
4 | {% set sla_max_seconds = sla_max_seconds or 60 %}
5 | ---
6 | NeutronNetworks.create_and_list_routers:
7 | -
8 | args:
9 | network_create_args: ""
10 | subnet_create_args: ""
11 | subnet_cidr_start: "1.1.0.0/30"
12 | subnets_per_network: {{subnets_per_network}}
13 | router_create_args: ""
14 | runner:
15 | concurrency: {{concurrency}}
16 | times: {{times}}
17 | type: "constant"
18 | context:
19 | users:
20 | tenants: 1
21 | users_per_tenant: 8
22 | quotas:
23 | neutron:
24 | network: -1
25 | port: -1
26 | router: -1
27 | subnet: -1
28 | sla:
29 | max_avg_duration: {{sla_max_avg_duration}}
30 | max_seconds_per_iteration: {{sla_max_seconds}}
31 | failure_rate:
32 | max: {{sla_max_failure}}
33 |
--------------------------------------------------------------------------------
/rally/nova/nova-boot-list-cc.yml:
--------------------------------------------------------------------------------
1 | {% set image_name = image_name or "centos7" %}
2 | {% set flavor_name = flavor_name or "m1.small" %}
3 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
4 | {% set sla_max_failure = sla_max_failure or 0 %}
5 | {% set sla_max_seconds = sla_max_seconds or 60 %}
6 | ---
7 | NovaServers.boot_and_list_server:
8 | -
9 | args:
10 | flavor:
11 | name: {{flavor_name}}
12 | image:
13 | name: {{image_name}}
14 | detailed: true
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | context:
20 | users:
21 | tenants: 1
22 | users_per_tenant: 1
23 | quotas:
24 | neutron:
25 | network: -1
26 | port: -1
27 | nova:
28 | instances: -1
29 | cores: -1
30 | ram: -1
31 | sla:
32 | max_avg_duration: {{sla_max_avg_duration}}
33 | max_seconds_per_iteration: {{sla_max_seconds}}
34 | failure_rate:
35 | max: {{sla_max_failure}}
36 |
--------------------------------------------------------------------------------
/ansible/install/roles/keystone/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Keystone connmon tasks
4 | #
5 |
6 | - name: Determine if keystone is deployed in eventlet
7 | shell: ps afx | grep "[Kk]eystone-all" -c
8 | register: deployed
9 | ignore_errors: true
10 | changed_when: false
11 |
12 | - name: Set keystone_deployment variable/fact to httpd
13 | set_fact: keystone_deployment='httpd'
14 | when: deployed.stdout|int == 0
15 |
16 | - name: Set keystone_deployment variable/fact to eventlet
17 | set_fact: keystone_deployment='eventlet'
18 | when: deployed.stdout|int > 0
19 |
20 | #
21 | # Configure connmon in keystone.conf
22 | #
23 |
24 | - name: Check for connmon in keystone.conf
25 | shell: grep -Eq 'connection\s?=\s?mysql:' /etc/keystone/keystone.conf
26 | register: keystone_mysql
27 | ignore_errors: true
28 | changed_when: false
29 |
30 | - name: Enable connmon in keystone.conf
31 | shell: sed -i 's/mysql:/mysql+connmon:/g' /etc/keystone/keystone.conf
32 | when: keystone_mysql.rc == 0
33 | notify:
34 | - restart httpd
35 | - unmanage keystone
36 | - restart keystone
37 | - manage keystone
38 | - cleanup keystone
39 |
--------------------------------------------------------------------------------
/rally/nova/nova-boot-snapshot-cc.yml:
--------------------------------------------------------------------------------
1 | {% set image_name = image_name or "centos7" %}
2 | {% set flavor_name = flavor_name or "m1.small" %}
3 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
4 | {% set sla_max_failure = sla_max_failure or 0 %}
5 | {% set sla_max_seconds = sla_max_seconds or 60 %}
6 | ---
7 | NovaServers.snapshot_server:
8 | -
9 | args:
10 | flavor:
11 | name: "{{flavor_name}}"
12 | image:
13 | name: "{{image_name}}"
14 | force_delete: false
15 | detailed: true
16 | runner:
17 | concurrency: {{concurrency}}
18 | times: {{times}}
19 | type: "constant"
20 | context:
21 | users:
22 | tenants: 1
23 | users_per_tenant: 1
24 | quotas:
25 | neutron:
26 | network: -1
27 | port: -1
28 | nova:
29 | instances: -1
30 | cores: -1
31 | ram: -1
32 | sla:
33 | max_avg_duration: {{sla_max_avg_duration}}
34 | max_seconds_per_iteration: {{sla_max_seconds}}
35 | failure_rate:
36 | max: {{sla_max_failure}}
37 |
38 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/neutron-workers/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Neutron tasks for Browbeat
4 | # * Can change worker count
5 | #
6 |
7 | - name: Configure neutron.conf
8 | ini_file:
9 | dest: /etc/neutron/neutron.conf
10 | mode: 0640
11 | section: "{{ item.section }}"
12 | option: "{{ item.option }}"
13 | value: "{{ item.value }}"
14 | backup: yes
15 | with_items:
16 | - { section: DEFAULT, option: api_workers, value: "{{ workers }}" }
17 | - { section: DEFAULT, option: rpc_workers, value: "{{ workers }}" }
18 | notify:
19 | - unmanage neutron services
20 | - restart neutron services
21 | - manage neutron services
22 | - cleanup neutron services
23 |
24 | - name: Configure metadata_agent.ini
25 | ini_file:
26 | dest: /etc/neutron/metadata_agent.ini
27 | mode: 0640
28 | section: "{{ item.section }}"
29 | option: "{{ item.option }}"
30 | value: "{{ item.value }}"
31 | backup: yes
32 | with_items:
33 | - { section: DEFAULT, option: metadata_workers, value: "{{ workers }}" }
34 | notify:
35 | - unmanage neutron services
36 | - restart neutron services
37 | - manage neutron services
38 | - cleanup neutron services
39 |
--------------------------------------------------------------------------------
/ansible/install/roles/dashboard-openstack/fix-ids.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import argparse
3 | import json
4 |
5 |
6 | def main():
7 | """Script used to fix panel ids in static dashboards. Typically adding a new panel or row into
8 | a static dashboard will involve re-ordering all subsequent panels. This script automates that.
9 | """
10 | parser = argparse.ArgumentParser(description="Fix panel ids in grafana json dashboard.")
11 | parser.add_argument('inputfile', help='Input json file')
12 | parser.add_argument('outputfile', help='Output json file')
13 | args = parser.parse_args()
14 |
15 | with open(args.inputfile) as data_file:
16 | data = json.load(data_file)
17 |
18 | index = 0
19 | for row in data['dashboard']['rows']:
20 | for panel in row['panels']:
21 | index += 1
22 | if index != panel['id']:
23 | print "Found error in panel({}): {}".format(index, panel['title'])
24 | panel['id'] = index
25 |
26 | with open(args.outputfile, 'w') as outputfile:
27 | json.dump(data, outputfile, sort_keys=True, indent=2, separators=(',', ': '))
28 |
29 | if __name__ == "__main__":
30 | main()
31 |
--------------------------------------------------------------------------------
/rally/cinder/cinder-create-and-attach-volume-cc.yml:
--------------------------------------------------------------------------------
1 | {% set image_name = image_name or "centos7" %}
2 | {% set flavor_name = flavor_name or "m1.small" %}
3 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
4 | {% set sla_max_failure = sla_max_failure or 0 %}
5 | {% set sla_max_seconds = sla_max_seconds or 60 %}
6 | ---
7 | CinderVolumes.create_and_attach_volume:
8 | -
9 | args:
10 | size: 1
11 | image:
12 | name: {{image_name}}
13 | flavor:
14 | name: {{flavor_name}}
15 | runner:
16 | concurrency: {{concurrency}}
17 | times: {{times}}
18 | type: "constant"
19 | context:
20 | users:
21 | tenants: 2
22 | users_per_tenant: 2
23 | quotas:
24 | neutron:
25 | network: -1
26 | port: -1
27 | nova:
28 | instances: -1
29 | cores: -1
30 | ram: -1
31 | cinder:
32 | gigabytes: -1
33 | volumes: -1
34 | sla:
35 | max_avg_duration: {{sla_max_avg_duration}}
36 | max_seconds_per_iteration: {{sla_max_seconds}}
37 | failure_rate:
38 | max: {{sla_max_failure}}
39 |
--------------------------------------------------------------------------------
/ansible/install/roles/logstash/files/filebeat-index-template.json:
--------------------------------------------------------------------------------
1 | {
2 | "mappings": {
3 | "_default_": {
4 | "_all": {
5 | "enabled": true,
6 | "norms": {
7 | "enabled": false
8 | }
9 | },
10 | "dynamic_templates": [
11 | {
12 | "template1": {
13 | "mapping": {
14 | "doc_values": true,
15 | "ignore_above": 1024,
16 | "index": "not_analyzed",
17 | "type": "{dynamic_type}"
18 | },
19 | "match": "*"
20 | }
21 | }
22 | ],
23 | "properties": {
24 | "@timestamp": {
25 | "type": "date"
26 | },
27 | "message": {
28 | "type": "string",
29 | "index": "analyzed"
30 | },
31 | "offset": {
32 | "type": "long",
33 | "doc_values": "true"
34 | },
35 | "geoip" : {
36 | "type" : "object",
37 | "dynamic": true,
38 | "properties" : {
39 | "location" : { "type" : "geo_point" }
40 | }
41 | }
42 | }
43 | }
44 | },
45 | "settings": {
46 | "index.refresh_interval": "5s"
47 | },
48 | "template": "filebeat-*"
49 | }
50 |
--------------------------------------------------------------------------------
/ansible/install/roles/connmon/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Connmon Install
4 | #
5 |
6 | - name: Install pip
7 | easy_install: name=pip
8 |
9 | - name: Install connmon
10 | pip: name=connmon
11 |
12 | #
13 | # Connmon Setup
14 | #
15 |
16 | - name: Configure Connmon Host IP Address
17 | template:
18 | src: connmon.cfg.j2
19 | dest: /etc/connmon.cfg
20 | owner: root
21 | group: root
22 | mode: 0644
23 |
24 | - name: Install Screen for connmon
25 | yum: name=screen state=latest
26 | when: undercloud
27 |
28 | # To remove the screen session: screen -X -S connmond kill
29 | - name: Run connmond in screen session on undercloud
30 | command: screen -d -S connmond -m connmond
31 | when: undercloud
32 | changed_when: false
33 |
34 | - name: Change connmon result owner
35 | command: chown "{{ local_remote_user }}":"{{ local_remote_user }}" /tmp/connmon_results.csv
36 | when: undercloud
37 | changed_when: false
38 | ignore_errors: true
39 |
40 | - name: check iptables
41 | shell: iptables -nvL | grep -q "dpt:5800"
42 | changed_when: false
43 | when: undercloud
44 | register: connmon_port
45 | ignore_errors: true
46 |
47 | - name: open up iptables
48 | shell: /usr/sbin/iptables -I INPUT 1 -p tcp --dport 5800 -j ACCEPT
49 | when: undercloud and connmon_port.rc == 1
50 |
--------------------------------------------------------------------------------
/ansible/install/roles/dashboard-generic/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Upload Generic Machine Dashboards to Grafana
4 | #
5 |
6 | - name: Remove existing dashboards
7 | command: "curl -X DELETE -H 'Content-Type: application/json' http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db/{{item.process_list_name|lower}}-general-system-performance"
8 | when: overwrite_existing
9 | with_items: "{{dashboards}}"
10 |
11 | - name: Ensure {{role_path}}/files directory exists
12 | file: path={{role_path}}/files state=directory
13 |
14 | - name: Generate dashboards
15 | template:
16 | src: "{{item.template_name}}_general_system_performance.json.j2"
17 | dest: "{{role_path}}/files/{{item.process_list_name}}_general_system_performance.json"
18 | with_items: "{{dashboards}}"
19 |
20 | - name: Upload dashboards to grafana
21 | command: "curl -X POST -H 'Content-Type: application/json' -d @{{role_path}}/files/{{item.process_list_name}}_general_system_performance.json http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db"
22 | with_items: "{{dashboards}}"
23 |
24 | - name: Remove leftover json file(s)
25 | file: path={{role_path}}/files/{{item.process_list_name}}_general_system_performance.json state=absent
26 | with_items: "{{dashboards}}"
27 |
--------------------------------------------------------------------------------
/ansible/install/dashboards-generic.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Builds and uploads dashboards to your grafana server for several host types
4 | #
5 |
6 | - hosts: localhost
7 | gather_facts: false
8 | vars:
9 | ansible_connection: local
10 | overwrite_existing: true
11 | dashboards:
12 | - template_name: baremetal
13 | process_list_name: Baremetal
14 | - template_name: guest
15 | process_list_name: Guest
16 | - template_name: cfme
17 | process_list_name: CFME
18 | - template_name: cfmeallinone
19 | process_list_name: CFME-All-In-One
20 | # - template_name: cfme
21 | # process_list_name: CFME-Amazon
22 | # - template_name: cfme
23 | # process_list_name: CFME-Azure
24 | - template_name: cfme
25 | process_list_name: CFME-Containers
26 | - template_name: cfme
27 | process_list_name: CFME-Microsoft
28 | - template_name: cfme
29 | process_list_name: CFME-Openstack
30 | - template_name: cfme
31 | process_list_name: CFME-RedHat
32 | - template_name: cfme
33 | process_list_name: CFME-VMware
34 | - template_name: graphite
35 | process_list_name: Graphite
36 | - template_name: ose
37 | process_list_name: OpenShift-Enterprise
38 | - template_name: satellite6
39 | process_list_name: Satellite6
40 | roles:
41 | - dashboard-generic
42 |
--------------------------------------------------------------------------------
/ansible/check/templates/bug_report.j2:
--------------------------------------------------------------------------------
1 | # Browbeat generated bug report
2 |
3 | {% for host in groups['undercloud'] %}
4 | ---------------------------------------
5 | | Issues for host : {{ host }}
6 | ---------------------------------------
7 | {% for check in hostvars[host]['checks'] %}
8 | {% if hostvars[host][check]['failed'] == true %}
9 | Bug: {{ check }}
10 | Name: {{ hostvars[host]['checks'][check]['name'] }}
11 | URL: {{ hostvars[host]['checks'][check]['url'] }}
12 |
13 | {% endif %}
14 | {% endfor %}
15 | {% endfor %}
16 |
17 | {% for host in groups['controller'] %}
18 | ---------------------------------------
19 | | Issues for host : {{ host }}
20 | ---------------------------------------
21 | {% for check in hostvars[host]['checks'] %}
22 | {% if hostvars[host][check]['failed'] == true %}
23 | Bug: {{ check }}
24 | Name: {{ hostvars[host]['checks'][check]['name'] }}
25 | URL: {{ hostvars[host]['checks'][check]['url'] }}
26 |
27 | {% endif %}
28 | {% endfor %}
29 | {% endfor %}
30 |
31 | {% for host in groups['compute'] %}
32 | ---------------------------------------
33 | | Issues for host : {{ host }}
34 | ---------------------------------------
35 | {% for check in hostvars[host]['checks'] %}
36 | {% if hostvars[host][check]['failed'] == true %}
37 | Bug: {{ check }}
38 | Name: {{ hostvars[host]['checks'][check]['name'] }}
39 | URL: {{ hostvars[host]['checks'][check]['url'] }}
40 |
41 | {% endif %}
42 | {% endfor %}
43 | {% endfor %}
44 |
--------------------------------------------------------------------------------
/rally/rally-plugins/netcreate-boot/netcreate_boot.py:
--------------------------------------------------------------------------------
1 | from rally.task import atomic
2 | from rally.task import scenario
3 | from rally.plugins.openstack.scenarios.nova import utils as nova_utils
4 | from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
5 | from rally.task import types
6 | from rally.task import utils as task_utils
7 | from rally.task import validation
8 |
9 | class NeutronPlugin(neutron_utils.NeutronScenario,
10 | nova_utils.NovaScenario,
11 | scenario.Scenario):
12 | @types.set(image=types.ImageResourceType,
13 | flavor=types.FlavorResourceType)
14 | @validation.image_valid_on_flavor("flavor", "image")
15 | @validation.required_openstack(users=True)
16 | @scenario.configure(context={"cleanup": ["nova","neutron"]})
17 | def create_network_nova_boot(self,image,flavor,num_networks=1,network_create_args=None,
18 | subnet_create_args=None,**kwargs):
19 | nets=[]
20 | for net in range(1,num_networks):
21 | network = self._create_network(network_create_args or {})
22 | subnet = self._create_subnet(network, subnet_create_args or {})
23 | nets.append(network)
24 |
25 | kwargs["nics"] = []
26 | for net in nets:
27 | kwargs["nics"].append({'net-id':net['network']['id']})
28 |
29 | self._boot_server(image, flavor, **kwargs)
30 |
--------------------------------------------------------------------------------
/ansible/install/roles/grafana_docker/templates/grafana-server.service.j2:
--------------------------------------------------------------------------------
1 | # cat /etc/systemd/system/grafana-server.service
2 | # This is a systemd file to run this docker container under systemd.
3 | # To make this work:
4 | # * place this file in /etc/systemd/system and run the commands:
5 | #
6 | # systemctl daemon-reload
7 | # systemctl enable grafana-server
8 | # systemctl start grafana-server
9 | #
10 | [Unit]
11 | Description=grafana-server
12 | Requires=docker.service
13 | After=docker.service
14 |
15 | [Service]
16 | Type=simple
17 | TimeoutStartSec=5m
18 | # systemd syntax '=-' ignore errors from return codes.
19 | ExecStartPre=-/usr/bin/docker kill "grafana"
20 | ExecStartPre=-/usr/bin/docker rm "grafana"
21 | ExecStartPre=-/usr/bin/mkdir -p {{ persistent_grafana_data_path }}
22 | ExecStartPrs=-/usr/bin/chmod 777 {{ persistent_grafana_data_path }}
23 |
24 | ExecStart=/usr/bin/docker run --name grafana -p {{ docker_grafana_port }}:3000 -v {{ persistent_grafana_data_path }}:/var/lib/grafana {{ grafana_docker_image }}
25 |
26 | ExecReload=-/usr/bin/docker stop "grafana"
27 | ExecReload=-/usr/bin/docker rm "grafana"
28 | ExecReload=/usr/bin/docker run --name grafana -p {{ docker_grafana_port }}:3000 -v {{ persistent_grafana_data_path }}:/var/lib/grafana {{ grafana_docker_image }}
29 |
30 | ExecStop=-/usr/bin/docker stop "grafana"
31 | ExecStop=-/usr/bin/docker rm "grafana"
32 |
33 | Restart=always
34 | RestartSec=30
35 |
36 | [Install]
37 | WantedBy=default.target
38 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite_docker/templates/graphite-web.service.j2:
--------------------------------------------------------------------------------
1 | # cat /etc/systemd/system/graphite-web.service
2 | # This is a systemd file to run this docker container under systemd.
3 | # To make this work:
4 | # * place this file in /etc/systemd/system and run the commands:
5 | #
6 | # systemctl daemon-reload
7 | # systemctl enable graphite-web
8 | # systemctl start graphite-web
9 | #
10 | [Unit]
11 | Description=graphite-web
12 | Requires=docker.service
13 | After=docker.service
14 |
15 | [Service]
16 | Type=simple
17 | TimeoutStartSec=5m
18 | # systemd syntax '=-' ignore errors from return codes.
19 | ExecStartPre=-/usr/bin/docker kill "graphite-web"
20 | ExecStartPre=-/usr/bin/docker rm "graphite-web"
21 | ExecStartPre=-/usr/bin/mkdir -p {{ persistent_carbon_data_path }}
22 | ExecStartPre=-/usr/bin/chmod 777 {{ persistent_carbon_data_path }}
23 |
24 | ExecStart=/usr/bin/docker run --name graphite-web -p {{ docker_graphite_port }}:80 -v {{ persistent_carbon_data_path }}:/var/lib/carbon/whisper {{ graphite_web_docker_image }}
25 |
26 | ExecReload=-/usr/bin/docker stop "graphite-web"
27 | ExecReload=-/usr/bin/docker rm "graphite-web"
28 | ExecReload=/usr/bin/docker run --name graphite-web -p {{ docker_graphite_port }}:80 -v {{ persistent_carbon_data_path }}:/var/lib/carbon/whisper {{ graphite_web_docker_image }}
29 |
30 | ExecStop=-/usr/bin/docker stop "graphite-web"
31 | ExecStop=-/usr/bin/docker rm "graphite-web"
32 |
33 | Restart=always
34 | RestartSec=30
35 |
36 | [Install]
37 | WantedBy=default.target
38 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite_docker/templates/carbon-cache.service.j2:
--------------------------------------------------------------------------------
1 | # cat /etc/systemd/system/carbon-cache.service
2 | # This is a systemd file to run this docker container under systemd.
3 | # To make this work:
4 | # * place this file in /etc/systemd/system and run the commands:
5 | #
6 | # systemctl daemon-reload
7 | # systemctl enable carbon-cache
8 | # systemctl start carbon-cache
9 | #
10 | [Unit]
11 | Description=carbon-cache
12 | Requires=docker.service
13 | After=docker.service
14 |
15 | [Service]
16 | Type=simple
17 | TimeoutStartSec=5m
18 | # systemd syntax '=-' ignore errors from return codes.
19 | ExecStartPre=-/usr/bin/docker kill "carbon-cache"
20 | ExecStartPre=-/usr/bin/docker rm "carbon-cache"
21 | ExecStartPre=-/usr/bin/mkdir -p {{ persistent_carbon_data_path }}
22 | ExecStartPre=-/usr/bin/chmod 777 {{ persistent_carbon_data_path }}
23 |
24 | ExecStart=/usr/bin/docker run --name carbon-cache -p {{ docker_carbon_cache_port }}:2003 -v {{ persistent_carbon_data_path }}:/var/lib/carbon/whisper {{ carbon_cache_docker_image }}
25 |
26 | ExecReload=-/usr/bin/docker stop "carbon-cache"
27 | ExecReload=-/usr/bin/docker rm "carbon-cache"
28 | ExecReload=/usr/bin/docker run --name carbon-cache -p {{ docker_carbon_cache_port }}:2003 -v {{ persistent_carbon_data_path }}:/var/lib/carbon/whisper {{ carbon_cache_docker_image }}
29 |
30 | ExecStop=-/usr/bin/docker stop "carbon-cache"
31 | ExecStop=-/usr/bin/docker rm "carbon-cache"
32 |
33 | Restart=always
34 | RestartSec=30
35 |
36 | [Install]
37 | WantedBy=default.target
38 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite/templates/graphite-web.conf.j2:
--------------------------------------------------------------------------------
1 | # Graphite Web Basic mod_wsgi vhost
2 | {% if graphite_port != 80 %}
3 | Listen {{graphite_port}}
4 | {% endif %}
5 |
6 | DocumentRoot "/usr/share/graphite/webapp"
7 | ErrorLog /var/log/httpd/graphite-web-error.log
8 | CustomLog /var/log/httpd/graphite-web-access.log common
9 |
10 | # Header set Access-Control-Allow-Origin "*"
11 | # Header set Access-Control-Allow-Methods "GET, OPTIONS"
12 | # Header set Access-Control-Allow-Headers "origin, authorization, accept"
13 | # Header set Access-Control-Allow-Credentials true
14 |
15 | WSGIScriptAlias / /usr/share/graphite/graphite-web.wsgi
16 | WSGIImportScript /usr/share/graphite/graphite-web.wsgi process-group=%{GLOBAL} application-group=%{GLOBAL}
17 |
18 |
19 | SetHandler None
20 |
21 |
22 | Alias /media/ "/usr/lib/python2.7/site-packages/django/contrib/admin/media/"
23 |
24 | SetHandler None
25 |
26 |
27 |
28 |
29 | # Apache 2.4
30 | Require all granted
31 | Require local
32 |
33 |
34 | # Apache 2.2
35 | Order Deny,Allow
36 | Deny from all
37 | Allow from 127.0.0.1
38 | Allow from ::1
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/ansible/install/roles/browbeat-network/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Setup up network for browbeat
4 | #
5 |
6 | - name: Create browbeat public network
7 | shell: ". {{overcloudrc}}; neutron net-create {{browbeat_pub_net_name}} --router:external | grep -E ' id ' | awk '{print $4}'"
8 | register: public_net_id
9 |
10 | - name: Create browbeat public subnet
11 | shell: ". {{overcloudrc}}; neutron subnet-create {{public_net_id.stdout}} {{browbeat_pub_subnet}} --allocation-pool start={{browbeat_pub_pool_start}},end={{browbeat_pub_pool_end}} --gateway={{browbeat_pub_pool_gw}} --disable-dhcp"
12 |
13 | - name: Create browbeat private network
14 | shell: ". {{overcloudrc}}; neutron net-create {{browbeat_pri_net_name}} | grep -E ' id ' | awk '{print $4}'"
15 | register: private_net_id
16 |
17 | - name: Create browbeat private subnet
18 | shell: ". {{overcloudrc}}; neutron subnet-create {{private_net_id.stdout}} {{browbeat_pri_subnet}} --allocation-pool start={{browbeat_pri_pool_start}},end={{browbeat_pri_pool_end}} --gateway={{browbeat_pri_pool_gw}} --dns-nameserver {{browbeat_pri_pool_dns}} | grep -E ' id ' | awk '{print $4}'"
19 | register: private_subnet_id
20 |
21 | - name: Create browbeat router
22 | shell: ". {{overcloudrc}}; neutron router-create {{browbeat_router_name}} | grep -E ' id ' | awk '{print $4}'"
23 | register: router_id
24 |
25 | - name: Set browbeat router gateway
26 | shell: ". {{overcloudrc}}; neutron router-gateway-set {{router_id.stdout}} {{public_net_id.stdout}}"
27 |
28 | - name: Add browbeat router interface to browbeat private network
29 | shell: ". {{overcloudrc}}; neutron router-interface-add {{router_id.stdout}} {{private_subnet_id.stdout}}"
30 |
--------------------------------------------------------------------------------
/ansible/gather/roles/undercloud/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tasks to set undercloud facts
4 | #
5 | - name: Get max_connections on the database
6 | shell: mysql -e "show variables like 'max_connections';" | grep max_connections | awk '{print $2}'
7 | register: max_conn
8 | ignore_errors: true
9 |
10 | - name: Set max database connections
11 | set_fact:
12 | openstack_mysql_max_connections: "{{ max_conn.stdout }}"
13 |
14 | - name : Get file descriptors for the mysql process
15 | shell: cat /proc/$(pgrep mysqld_safe)/limits | grep "open files" | awk '{print $4}'
16 | register: mysql_desc
17 |
18 | - name: Set file descriptors fact for mysql
19 | set_fact:
20 | openstack_mysql_file_descriptors: "{{ mysql_desc.stdout }}"
21 |
22 | - name : Get rabbitmq file descriptors
23 | shell: rabbitmqctl status | grep total_limit | awk -F',' '{print $2}' | sed 's/.$//'
24 | register: rabbitmq_desc
25 | ignore_errors: true
26 |
27 | - name: Set rabbitmq file descriptors
28 | set_fact:
29 | openstack_rabbitmq_file_descriptors: "{{ rabbitmq_desc.stdout }}"
30 |
31 | - name: Get Controller Nodes number
32 | shell: source ~/stackrc; nova list | grep controller | grep ACTIVE | wc -l
33 | register: controller_count
34 |
35 | - name : Set Controler number fact
36 | set_fact:
37 | osp_controllers_number: "{{ controller_count.stdout }}"
38 |
39 | - name: Get Compute Nodes number
40 | shell: source ~/stackrc; nova list | grep compute | grep ACTIVE | wc -l
41 | register: compute_count
42 |
43 | - name : Set Commpute number fact
44 | set_fact:
45 | osp_computes_number: "{{ compute_count.stdout }}"
46 |
--------------------------------------------------------------------------------
/ansible/check/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to check OSP install for common performance tuning issues
4 | #
5 |
6 | - hosts: localhost
7 | name: Get MySQL Tuner Script
8 | gather_facts: false
9 | vars:
10 | ansible_connection: local
11 | tasks:
12 | - name: Get MySQL Tuner Script
13 | get_url: url={{mysql_tuner_script}} dest={{playbook_dir}}
14 |
15 | - hosts: undercloud
16 | name: Checking Undercloud for common Performance Issues
17 | remote_user: stack
18 | roles:
19 | - common
20 | - controller
21 | - undercloud
22 | - keystone
23 | - neutron
24 | - nova
25 |
26 | - hosts: controller
27 | name: Checking Controller Nodes for common Performance Issues
28 | remote_user: heat-admin
29 | roles:
30 | - common
31 | - controller
32 | - keystone
33 | - neutron
34 | - nova
35 |
36 | - hosts: compute
37 | name: Checking Compute Nodes for common Performance Issues
38 | remote_user: heat-admin
39 | roles:
40 | - common
41 | - compute
42 | - nova
43 |
44 | - hosts: ceph
45 | name: Checking Ceph Hosts for common Performance Issues
46 | remote_user: heat-admin
47 | roles:
48 | - common
49 | - ceph
50 |
51 | - hosts: localhost
52 | gather_facts: False
53 | become: false
54 | name: Generating bug report
55 | tasks:
56 | - local_action: template src=templates/bug_report.j2 dest={{result_dir}}/bug_report.log
57 | become: false
58 | - local_action: template src=templates/mysql_report.j2 dest={{result_dir}}/mysql_report.log
59 | become: false
60 | - replace: dest={{result_dir}}/mysql_report.log regexp='\[([^\s+]+)' replace=''
61 | - replace: dest={{result_dir}}/mysql_report.log regexp='\r' replace=''
62 |
--------------------------------------------------------------------------------
/rally/rally-plugins/subnet-router-create/subnet-router-create.py:
--------------------------------------------------------------------------------
1 | from rally.task import atomic
2 | from rally.task import scenario
3 | from rally.plugins.openstack.scenarios.nova import utils as nova_utils
4 | from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
5 | from rally.task import types
6 | from rally.task import utils as task_utils
7 | from rally.task import validation
8 |
9 | class NeutronPlugin(neutron_utils.NeutronScenario,
10 | scenario.Scenario):
11 | @types.set(image=types.ImageResourceType,
12 | flavor=types.FlavorResourceType)
13 | @validation.required_openstack(users=True)
14 | @scenario.configure(context={"cleanup": ["neutron"]})
15 | def create_router_and_net(self,num_networks=1,network_create_args=None,
16 | subnet_create_args=None,**kwargs):
17 | router = self._create_router({})
18 | subnets = []
19 | if num_networks == 1 :
20 | network = self._create_network(network_create_args or {})
21 | subnet = self._create_subnet(network, subnet_create_args or {})
22 | subnets.append(subnet)
23 | self._add_interface_router(subnet['subnet'],router['router'])
24 | else :
25 | for net in range(1,num_networks):
26 | network = self._create_network(network_create_args or {})
27 | subnet = self._create_subnet(network, subnet_create_args or {})
28 | subnets.append(subnet)
29 | self._add_interface_router(subnet['subnet'],router['router'])
30 | for subnet in subnets :
31 | self._remove_interface_router(subnet['subnet'],router['router'])
32 |
--------------------------------------------------------------------------------
/rally/rally-plugins/README.md:
--------------------------------------------------------------------------------
1 | # Rally Plugins Browbeat can use
2 | ## Current plugins
3 | ### neutron-netcreate_nova-boot
4 | This Rally plugin utilizes both Neutron and Nova utilities This Rally plugin will create a network then. launch a guest within that network. This plugin will also attempt to ping the guest, to make sure connectivity works.
5 |
6 | #### Assumptions
7 | For this work, we suggest using the admin tenant. With Rally this can be done by creating a env file with the ExistingUsers field - [example json](rally-neutron/admintenant-env.json) . This plugin also assumes the following networking toplogy :
8 | ```
9 | [ Rally Host ] --- Link to Rally tenant nework --- [ Router ] -- [ tenant networks ] -- Guests
10 | ```
11 | We suggest this method, so you do not have to have a 1:1 connection:tenant network.
12 |
13 | *** The below JSON needs updating to show that we need to pass a router t othe plugin.
14 |
15 | #### Example json
16 | ```
17 | {% set flavor_name = flavor_name or "m1.flavorname" %}
18 | {
19 | "NeutronPlugin.create_network_nova_boot": [
20 | {
21 | "args": {
22 | "flavor": {
23 | "name": "{{flavor_name}}"
24 | },
25 | "image": {
26 | "name": "image_name"
27 | },
28 | "network_create_args": {},
29 | },
30 | "runner": {
31 | "type": "serial",
32 | "times": 5,
33 | },
34 | "context": {
35 | "users": {
36 | "tenants": 1,
37 | "users_per_tenant": 1
38 | },
39 | },
40 | }
41 | ]
42 | }
43 | ```
44 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-openstack/files/collectd-redis.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | HOSTNAME="${COLLECTD_HOSTNAME:-`hostname -f`}"
4 | INTERVAL="${COLLECTD_INTERVAL:-10}"
5 | PORT=6379
6 |
7 | while true
8 | do
9 |
10 | info=$((echo info ; sleep 2) |nc -w 1 $HOSTNAME $PORT 2>&1)
11 | connected_clients=$(echo "$info" | egrep ^connected_clients| awk -F: '{ print $2 }' | sed 's/
//g')
12 | connected_slaves=$(echo "$info" | egrep ^connected_slaves| awk -F: '{ print $2 }' | sed 's/
//g')
13 | uptime=$(echo "$info" | egrep ^uptime_in_seconds| awk -F: '{ print $2 }' | sed 's/
//g')
14 | used_memory=$(echo "$info" | egrep ^used_memory:| awk -F: '{ print $2 }' | sed 's/
//g')
15 | changes_since_last_save=$(echo "$info" | egrep ^rdb_changes_since_last_save| awk -F: '{ print $2 }' | sed 's/
//g')
16 | total_commands_processed=$(echo "$info" | egrep ^total_commands_processed| awk -F: '{ print $2 }' | sed 's/
//g')
17 | keys=$(echo "$info" | egrep ^db0:keys| awk -F= '{ print $2 }' | awk -F, '{ print $1 }' | sed 's/
//g')
18 |
19 | echo "PUTVAL $HOSTNAME/redis-$PORT/memcached_connections-clients interval=$INTERVAL N:$connected_clients"
20 | echo "PUTVAL $HOSTNAME/redis-$PORT/memcached_connections-slaves interval=$INTERVAL N:$connected_slaves"
21 | echo "PUTVAL $HOSTNAME/redis-$PORT/uptime interval=$INTERVAL N:$uptime"
22 | echo "PUTVAL $HOSTNAME/redis-$PORT/df-memory interval=$INTERVAL N:$used_memory:U"
23 | echo "PUTVAL $HOSTNAME/redis-$PORT/files-unsaved_changes interval=$INTERVAL N:$changes_since_last_save"
24 | echo "PUTVAL $HOSTNAME/redis-$PORT/memcached_command-total interval=$INTERVAL N:$total_commands_processed"
25 | echo "PUTVAL $HOSTNAME/redis-$PORT/memcached_items-db0 interval=$INTERVAL N:$keys"
26 |
27 | sleep "$INTERVAL"
28 | done
29 |
30 |
--------------------------------------------------------------------------------
/ansible/browbeat/snapshot-general-performance-dashboard.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Snapshot Dashboard
4 | #
5 | # Example Usage:
6 | # ansible-playbook -i hosts browbeat/snapshot-general-performance-dashboard.yml -e "grafana_ip=1.1.1.1 grafana_port=3000 from=1455649200000 to=1455656400000 results_dir=results/ var_cloud=openstack"
7 | #
8 | # Append snapshot_compute=true to run snapshots against computes.
9 | #
10 |
11 | - hosts: localhost
12 | gather_facts: false
13 | remote_user: stack
14 | vars:
15 | ansible_connection: local
16 | ansible_python_interpreter: "/usr/bin/python"
17 | host_type: undercloud
18 | host_suffix: ""
19 | hosts_in_group: "{{groups['undercloud']}}"
20 | disks_in_group: "{{disks['undercloud']}}"
21 | interfaces_in_group: "{{interfaces['undercloud']}}"
22 | roles:
23 | - grafana-snapshot
24 |
25 | - hosts: localhost
26 | gather_facts: false
27 | remote_user: stack
28 | vars:
29 | ansible_connection: local
30 | ansible_python_interpreter: "/usr/bin/python"
31 | host_type: controller
32 | host_suffix: ""
33 | hosts_in_group: "{{groups['controller']}}"
34 | disks_in_group: "{{disks['controller']}}"
35 | interfaces_in_group: "{{interfaces['controller']}}"
36 | roles:
37 | - grafana-snapshot
38 |
39 | - hosts: localhost
40 | gather_facts: false
41 | remote_user: stack
42 | vars:
43 | ansible_connection: local
44 | ansible_python_interpreter: "/usr/bin/python"
45 | host_type: compute
46 | host_suffix: ""
47 | hosts_in_group: "{{groups['compute']}}"
48 | disks_in_group: "{{disks['compute']}}"
49 | interfaces_in_group: "{{interfaces['compute']}}"
50 | roles:
51 | - { role: grafana-snapshot, when: snapshot_compute is defined }
52 |
--------------------------------------------------------------------------------
/ansible/README.cfme-allinone.md:
--------------------------------------------------------------------------------
1 | # Setting up a CFME or ManageIQ VM for All-In-One Performance Monitoring
2 |
3 | 1. Deploy ManageIQ/CFME appliance
4 | 2. Add additional disk to host Graphite's whisper database, mount disk at /var/lib/carbon
5 | 3. Clone browbeat
6 |
7 | ```
8 | [root@manageiq ~]# git clone https://github.com/jtaleric/browbeat.git
9 | [root@manageiq ~]# cd browbeat/ansible
10 | ```
11 | 4. Create ansible inventory file
12 |
13 | ```
14 | [graphite]
15 | localhost ansible_connection=local
16 |
17 | [grafana]
18 | localhost ansible_connection=local
19 |
20 | [cfme-all-in-one]
21 | localhost ansible_connection=local
22 | ```
23 | 5. Install ansible
24 |
25 | ```
26 | [root@manageiq ansible]# easy_install pip
27 | [root@manageiq ansible]# yum install -y python-devel gcc-c++
28 | [root@manageiq ansible]# pip install ansible
29 | ```
30 | 6. Setup installation variables at install/group_vars/all by modifying following variables
31 |
32 | ```
33 | graphite_host: localhost
34 | graphite_port: 9000
35 | graphite_prefix: manageiq
36 | grafana_host: localhost
37 | grafana_port: 9001
38 | ```
39 | 7. Run playbooks for collectd/graphite/grafana install
40 |
41 | ```
42 | [root@manageiq ansible]# ansible-playbook -i hosts install/graphite.yml
43 | [root@manageiq ansible]# ansible-playbook -i hosts install/grafana.yml
44 | [root@manageiq ansible]# ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme-all-in-one"
45 | ```
46 | 8. Upload dashboards via ansible
47 |
48 | ```
49 | [root@manageiq ansible]# ansible-playbook -i hosts install/dashboards-generic.yml
50 | ```
51 |
52 | 9. Enjoy your now performance monitored CFME/ManageIQ appliance, view grafana dashboards at http://(manageiq-ip-address):9001/
53 |
--------------------------------------------------------------------------------
/ansible/install/filter_plugins/browbeat_install_filters.py:
--------------------------------------------------------------------------------
1 | def dict_remove(the_dict, item):
2 | """Remove an item from a dictionary."""
3 | del the_dict[item]
4 | return the_dict
5 |
6 |
7 | def hosts_to_dictionary(arg):
8 | """Changes list format of hosts to dictionary format. The key of the dictionary is the index
9 | of the host. The index is defined by the host's suffix, example: overcloud-controller-10 is 10.
10 | If there is no suffix, I use an incremented value above 1000000."""
11 |
12 | dictionary = {}
13 | nonindex = 1000000
14 | for item in arg:
15 | if '-' in item:
16 | idx = item.rindex('-')
17 | dictionary[int(item[idx + 1:])] = item
18 | else:
19 | nonindex += 1
20 | dictionary[nonindex] = item
21 | return dictionary
22 |
23 |
24 | def ini_value(key_value):
25 | """Strips key= from key=value from ini configuration data"""
26 | equals_idx = key_value.index('=') + 1
27 | return key_value[equals_idx:]
28 |
29 |
30 | def to_grafana_refid(number):
31 | """Convert a number to a string starting at character a and incrementing. This only accounts
32 | for a to zz, anything greater than zz is probably too much to graph anyway."""
33 | character1 = ''
34 | idx = -1
35 | while number > 25:
36 | idx = idx + 1
37 | number -= 26
38 | else:
39 | if idx != -1:
40 | character1 = chr(idx + 65)
41 | return character1 + chr(number + 65)
42 |
43 |
44 | class FilterModule(object):
45 | def filters(self):
46 | return {
47 | 'dict_remove': dict_remove,
48 | 'ini_value': ini_value,
49 | 'hosts_to_dictionary': hosts_to_dictionary,
50 | 'to_grafana_refid': to_grafana_refid,
51 | }
52 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/templates/guest.collectd.conf.j2:
--------------------------------------------------------------------------------
1 | # Installed by Browbeat Ansible Installer
2 | # Config type: {{config_type}}
3 |
4 | # Interval default is 10s
5 | Interval {{collectd_interval}}
6 |
7 | # Hostname for this machine, if not defined, use gethostname(2) system call
8 | Hostname "{{inventory_hostname}}"
9 |
10 | # Allow collectd to log
11 | LoadPlugin syslog
12 |
13 | # Loaded Plugins:
14 | LoadPlugin write_graphite
15 | LoadPlugin cpu
16 | LoadPlugin df
17 | LoadPlugin disk
18 | LoadPlugin interface
19 | LoadPlugin irq
20 | LoadPlugin load
21 | LoadPlugin memory
22 | LoadPlugin processes
23 | LoadPlugin swap
24 | LoadPlugin unixsock
25 | LoadPlugin uptime
26 |
27 | # Open unix domain socket for collectdctl
28 |
29 | SocketFile "/var/run/collectd-unixsock"
30 | SocketGroup "collectd"
31 | SocketPerms "0770"
32 | DeleteSocket true
33 |
34 |
35 | # Graphite Host Configuration
36 |
37 |
38 | Host "{{graphite_host}}"
39 | Port "2003"
40 | Prefix "{{graphite_prefix}}."
41 | Protocol "tcp"
42 | LogSendErrors true
43 | StoreRates true
44 | AlwaysAppendDS false
45 | EscapeCharacter "_"
46 |
47 |
48 |
49 |
50 | ValuesPercentage true
51 |
52 |
53 |
54 | Disk "/^[hsv]d[a-z]+[0-9]?$/"
55 | IgnoreSelected false
56 |
57 |
58 |
59 | # Example, collect on all httpd processes:
60 | Process "httpd"
61 |
62 | # Collect on collectd process
63 | ProcessMatch "collectd" "/usr/sbin/collectd.+-C.+/etc/collectd.conf"
64 |
65 |
66 |
67 | ReportBytes true
68 | ValuesPercentage true
69 |
70 |
71 | # Include other collectd configuration files
72 | Include "/etc/collectd.d"
73 |
--------------------------------------------------------------------------------
/ansible/install/collectd-openstack.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Playbook to install collectd on undercloud/overcloud
4 | #
5 |
6 | - hosts: undercloud
7 | remote_user: "{{ local_remote_user }}"
8 | vars:
9 | config_type: undercloud
10 | roles:
11 | - { role: common, when: collectd_undercloud }
12 | - { role: collectd-openstack, when: collectd_undercloud }
13 | tasks:
14 | - name: Collectd off if not collectd_undercloud
15 | service: name=collectd state=stopped enabled=false
16 | become: true
17 | when: not collectd_undercloud
18 |
19 | - hosts: controller
20 | remote_user: "{{ host_remote_user }}"
21 | vars:
22 | config_type: controller
23 | roles:
24 | - { role: common, when: collectd_controller }
25 | - { role: collectd-openstack, when: collectd_controller }
26 | tasks:
27 | - name: Collectd off if not collectd_controller
28 | service: name=collectd state=stopped enabled=false
29 | become: true
30 | when: not collectd_controller
31 |
32 | - hosts: ceph
33 | remote_user: "{{ host_remote_user }}"
34 | vars:
35 | config_type: ceph
36 | roles:
37 | - { role: common, when: collectd_ceph }
38 | - { role: collectd-openstack, when: collectd_ceph }
39 | tasks:
40 | - name: Collectd off if not collectd_ceph
41 | service: name=collectd state=stopped enabled=false
42 | become: true
43 | when: not collectd_ceph
44 |
45 | - hosts: compute
46 | remote_user: "{{ host_remote_user }}"
47 | vars:
48 | config_type: compute
49 | roles:
50 | - { role: common, when: collectd_compute }
51 | - { role: collectd-openstack, when: collectd_compute }
52 | tasks:
53 | - name: Collectd off if not collectd_compute
54 | service: name=collectd state=stopped enabled=false
55 | become: true
56 | when: not collectd_compute
57 |
--------------------------------------------------------------------------------
/ansible/install/roles/nginx/templates/nginx.conf.j2:
--------------------------------------------------------------------------------
1 | # For more information on configuration, see:
2 | # * Official English Documentation: http://nginx.org/en/docs/
3 | # * Official Russian Documentation: http://nginx.org/ru/docs/
4 |
5 | user nginx;
6 | worker_processes auto;
7 | error_log /var/log/nginx/error.log;
8 | pid /run/nginx.pid;
9 |
10 | events {
11 | worker_connections 1024;
12 | }
13 |
14 | http {
15 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 |
27 | include /etc/nginx/mime.types;
28 | default_type application/octet-stream;
29 |
30 | # Load modular configuration files from the /etc/nginx/conf.d directory.
31 | # See http://nginx.org/en/docs/ngx_core_module.html#include
32 | # for more information.
33 | include /etc/nginx/conf.d/*.conf;
34 |
35 | server {
36 | listen {{elk_server_ssl_cert_port}} default_server;
37 | listen [::]:{{elk_server_ssl_cert_port}} default_server;
38 | server_name _;
39 | root /usr/share/nginx/html;
40 |
41 | # Load configuration files for the default server block.
42 | include /etc/nginx/default.d/*.conf;
43 |
44 | location / {
45 | }
46 |
47 | error_page 404 /404.html;
48 | location = /40x.html {
49 | }
50 |
51 | error_page 500 502 503 504 /50x.html;
52 | location = /50x.html {
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/ansible/install/roles/filebeat/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # install/run filebeat elk client for browbeat
4 | #
5 |
6 | - name: Copy filebeat yum repo file
7 | copy:
8 | src=filebeat.repo
9 | dest=/etc/yum.repos.d/filebeat.repo
10 | owner=root
11 | group=root
12 | mode=0644
13 | become: true
14 |
15 | - name: Import Filebeat GPG Key
16 | rpm_key: key=http://packages.elastic.co/GPG-KEY-elasticsearch
17 | state=present
18 | become: true
19 |
20 | - name: Install filebeat rpms
21 | yum: name={{ item }} state=present
22 | become: true
23 | with_items:
24 | - filebeat
25 |
26 | - name: Generate filebeat configuration template
27 | template:
28 | src=filebeat.yml.j2
29 | dest=/etc/filebeat/filebeat.yml
30 | owner=root
31 | group=root
32 | mode=0644
33 | become: true
34 | register: filebeat_needs_restart
35 |
36 | - name: Check ELK server SSL client certificate
37 | stat: path=/etc/pki/tls/certs/filebeat-forwarder.crt
38 | ignore_errors: true
39 | register: elk_client_ssl_cert_exists
40 |
41 | # Set standard nginx ports if we're not pointing towards an undercloud
42 | - name: Assign ELK nginx port value for SSL client certificate
43 | set_fact:
44 | elk_server_ssl_cert_port: 8080
45 | when: elk_server_ssl_cert_port is none
46 |
47 | - name: Install ELK server SSL client certificate
48 | shell: curl http://"{{ elk_server }}":{{ elk_server_ssl_cert_port }}/filebeat-forwarder.crt > /etc/pki/tls/certs/filebeat-forwarder.crt
49 | become: true
50 | when: elk_client_ssl_cert_exists != 0
51 |
52 | - name: Start filebeat service
53 | command: systemctl start filebeat.service
54 | ignore_errors: true
55 | become: true
56 | when: filebeat_needs_restart != 0
57 |
58 | - name: Setup filebeat service
59 | service: name=filebeat state=started enabled=true
60 | become: true
61 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/templates/baremetal.collectd.conf.j2:
--------------------------------------------------------------------------------
1 | # Installed by Browbeat Ansible Installer
2 | # Config type: {{config_type}}
3 |
4 | # Interval default is 10s
5 | Interval {{collectd_interval}}
6 |
7 | # Hostname for this machine, if not defined, use gethostname(2) system call
8 | Hostname "{{inventory_hostname}}"
9 |
10 | # Allow collectd to log
11 | LoadPlugin syslog
12 |
13 | # Loaded Plugins:
14 | LoadPlugin write_graphite
15 | LoadPlugin cpu
16 | LoadPlugin df
17 | LoadPlugin disk
18 | LoadPlugin interface
19 | LoadPlugin irq
20 | LoadPlugin load
21 | LoadPlugin memory
22 | LoadPlugin numa
23 | LoadPlugin processes
24 | LoadPlugin swap
25 | LoadPlugin turbostat
26 | LoadPlugin unixsock
27 | LoadPlugin uptime
28 |
29 | # Open unix domain socket for collectdctl
30 |
31 | SocketFile "/var/run/collectd-unixsock"
32 | SocketGroup "collectd"
33 | SocketPerms "0770"
34 | DeleteSocket true
35 |
36 |
37 | # Graphite Host Configuration
38 |
39 |
40 | Host "{{graphite_host}}"
41 | Port "2003"
42 | Prefix "{{graphite_prefix}}."
43 | Protocol "tcp"
44 | LogSendErrors true
45 | StoreRates true
46 | AlwaysAppendDS false
47 | EscapeCharacter "_"
48 |
49 |
50 |
51 |
52 | ValuesPercentage true
53 |
54 |
55 |
56 | Disk "/^[hsv]d[a-z]+[0-9]?$/"
57 | IgnoreSelected false
58 |
59 |
60 |
61 | # Example, collect on all httpd processes:
62 | Process "httpd"
63 |
64 | # Collect on collectd process
65 | ProcessMatch "collectd" "/usr/sbin/collectd.+-C.+/etc/collectd.conf"
66 |
67 |
68 |
69 | ReportBytes true
70 | ValuesPercentage true
71 |
72 |
73 | # Include other collectd configuration files
74 | Include "/etc/collectd.d"
75 |
--------------------------------------------------------------------------------
/ansible/gather/roles/nova/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tasks to get nova facts
4 | #
5 | - name: Get Nova API Workers
6 | command: crudini --get /etc/nova/nova.conf DEFAULT osapi_compute_workers
7 | register: nova_api
8 | ignore_errors: true
9 |
10 | - name: Set nova API Workers fact
11 | set_fact:
12 | openstack_nova_api_workers: "{{ facter_processorcount }}"
13 | when: (nova_api.stdout =="" or nova_api.stdout|int < 1)
14 |
15 | - name: Set nova API Workers fact
16 | set_fact:
17 | openstack_nova_api_workers: "{{ nova_api.stdout }}"
18 | when: (nova_api.stdout !="" and nova_api.stdout|int >= 1)
19 |
20 | - name: Get Nova conductor workers
21 | command: crudini --get /etc/nova/nova.conf conductor workers
22 | register: nova_conductor
23 | ignore_errors: true
24 |
25 | - name: Set Nova conductor workers
26 | set_fact:
27 | openstack_nova_conductor_workers: "{{ facter_processorcount }}"
28 | when: (nova_conductor.stdout == "" or nova_conductor.stdout|int < 1)
29 |
30 | - name: Set Nova conductor workers
31 | set_fact:
32 | openstack_nova_conductor_workers: "{{ nova_conductor.stdout }}"
33 | when: (nova_conductor.stdout != "" and nova_conductor.stdout|int >= 1)
34 |
35 | - name: Get Nova metadata workers
36 | command: crudini --get /etc/nova/nova.conf DEFAULT metadata_workers
37 | register: nova_metadata
38 | ignore_errors: true
39 |
40 | - name: Set Nova metadata workers
41 | set_fact:
42 | openstack_nova_metadata_workers: "{{ facter_processorcount }}"
43 | when: (nova_metadata.stdout == "" or nova_metadata.stdout|int < 1)
44 |
45 | - name: Set Nova metadata workers
46 | set_fact:
47 | openstack_nova_metadata_workers: "{{ nova_metadata.stdout }}"
48 | when: (nova_metadata.stdout != "" and nova_metadata.stdout|int >= 1)
49 |
50 |
51 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/templates/graphite.collectd.conf.j2:
--------------------------------------------------------------------------------
1 | # Installed by Browbeat Ansible Installer
2 | # Config type: {{config_type}}
3 |
4 | # Interval default is 10s
5 | Interval {{collectd_interval}}
6 |
7 | # Hostname for this machine, if not defined, use gethostname(2) system call
8 | Hostname "{{inventory_hostname}}"
9 |
10 | # Allow collectd to log
11 | LoadPlugin syslog
12 |
13 | # Loaded Plugins:
14 | LoadPlugin write_graphite
15 | LoadPlugin cpu
16 | LoadPlugin df
17 | LoadPlugin disk
18 | LoadPlugin interface
19 | LoadPlugin irq
20 | LoadPlugin load
21 | LoadPlugin memory
22 | LoadPlugin numa
23 | LoadPlugin processes
24 | LoadPlugin swap
25 | LoadPlugin turbostat
26 | LoadPlugin unixsock
27 | LoadPlugin uptime
28 |
29 | # Open unix domain socket for collectdctl
30 |
31 | SocketFile "/var/run/collectd-unixsock"
32 | SocketGroup "collectd"
33 | SocketPerms "0770"
34 | DeleteSocket true
35 |
36 |
37 | # Graphite Host Configuration
38 |
39 |
40 | Host "{{graphite_host}}"
41 | Port "2003"
42 | Prefix "{{graphite_prefix}}."
43 | Protocol "tcp"
44 | LogSendErrors true
45 | StoreRates true
46 | AlwaysAppendDS false
47 | EscapeCharacter "_"
48 |
49 |
50 |
51 |
52 | ValuesPercentage true
53 |
54 |
55 |
56 | Disk "/^[hsv]d[a-z]+[0-9]?$/"
57 | IgnoreSelected false
58 |
59 |
60 |
61 | ProcessMatch "carbon-cache" "python.+carbon-cache"
62 | Process "grafana-server"
63 | Process "httpd"
64 | # Collect on collectd process
65 | ProcessMatch "collectd" "/usr/sbin/collectd.+-C.+/etc/collectd.conf"
66 |
67 |
68 |
69 | ReportBytes true
70 | ValuesPercentage true
71 |
72 |
73 | # Include other collectd configuration files
74 | Include "/etc/collectd.d"
75 |
--------------------------------------------------------------------------------
/ansible/README.collectd-generic.md:
--------------------------------------------------------------------------------
1 | # Installing and configuring collectd agent on other machines
2 |
3 | Collectd configurations are built for these types of machines:
4 | * baremetal
5 | * guest
6 | * cfme
7 | * cfme-vmdb
8 | * cfme-all-in-one
9 | * graphite/grafana
10 | * ose
11 | * satellite6
12 |
13 | To install collectd agent and configure collectd to send metrics to your Graphite server, simply add the host to your ansible inventory file under the correct group.
14 |
15 | Complete Example Inventory file:
16 | ```
17 | [undercloud]
18 | undercloud
19 |
20 | [controller]
21 | overcloud-controller-0
22 | overcloud-controller-1
23 | overcloud-controller-2
24 |
25 | [compute]
26 | overcloud-compute-0
27 | overcloud-compute-1
28 |
29 | [ceph]
30 | overcloud-cephstorage-0
31 |
32 | [baremetal]
33 | x.x.x.x # An ip adddress or fqdn or specificed host in ~/.ssh/config
34 |
35 | [guest]
36 | x.x.x.x # An ip adddress or fqdn or specificed vm in ~/.ssh/config
37 |
38 | [cfme]
39 | x.x.x.x # An ip address of a Red Hat Cloud Forms appliance or ManageIQ appliance
40 |
41 | [cfme-vmdb]
42 | x.x.x.x # An ip address of a Red Hat Cloud Forms appliance with vmdb
43 |
44 | [cfme-all-in-one]
45 | x.x.x.x # An ip address of a Red Hat Cloud Forms appliance or ManageIQ appliance with Graphite and Grafana
46 |
47 | [graphite]
48 | x.x.x.x # An ip address of a Graphite/Grafana Server
49 |
50 | [ose]
51 | x.x.x.x # An ip address of a Red Hat Openshift Enterprise Node
52 |
53 | [satellite6]
54 | x.x.x.x # An ip address of a Red Hat Satellite 6 Server
55 | ```
56 |
57 | Example running the collectd-generic playbook on the above specified cfme machine:
58 | ```
59 | # ansible-playbook -i hosts install/collectd-generic.yml --tags "cfme"
60 | ```
61 | Replace "cfme" with whatever machines you intend to install collectd on.
62 |
63 |
64 | Note: Openstack host groups (undercloud, controller, compute, ceph) are ignored with the collectd-generic.yml playbook.
65 |
--------------------------------------------------------------------------------
/ansible/install/roles/elasticsearch/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Install/run elasticsearch for browbeat
4 | #
5 |
6 | - name: Copy elasticsearch yum repo file
7 | copy:
8 | src=elasticsearch.repo
9 | dest=/etc/yum.repos.d/elasticsearch.repo
10 | owner=root
11 | group=root
12 | mode=0644
13 | become: true
14 |
15 | - name: Install elasticsearch and openjdk
16 | yum: name={{ item }} state=present
17 | become: true
18 | with_items:
19 | - elasticsearch
20 | - java-openjdk-headless
21 |
22 | - name: Check if system memory is greater than 64G
23 | debug: msg="System memory is {{ansible_memory_mb.real.total | int}} so setting heapsize to 32G upper limit"
24 | when: ansible_memory_mb.real.total|int >= 65536
25 |
26 | - name: Apply heapsize tuning for systems with greater than 64G memory
27 | lineinfile: dest=/usr/share/elasticsearch/bin/elasticsearch.in.sh \
28 | line="ES_HEAP_SIZE=32g" insertafter="^ES_CLASSPATH="
29 | when: ansible_memory_mb.real.total|int >= 65536
30 | register: elasticsearch_updated
31 |
32 | - name: Print extended documentation for heapsize tuning
33 | debug: msg="Refer to https://www.elastic.co/guide/en/elasticsearch/guide/current/_limiting_memory_usage.html"
34 | when: ansible_memory_mb.real.total|int >= 65536
35 |
36 | - name: Update elasticsearch startup with heap size
37 | become: true
38 | lineinfile: dest=/usr/share/elasticsearch/bin/elasticsearch.in.sh \
39 | line="ES_HEAP_SIZE={{ (ansible_memory_mb.real.total / 2) | int }}m" insertafter="^ES_CLASSPATH="
40 | when: ansible_memory_mb.real.total|int < 65536
41 | register: elasticsearch_updated
42 |
43 | - name: Start elasticsearch service
44 | command: systemctl start elasticsearch.service
45 | ignore_errors: true
46 | when: elasticsearch_updated != 0
47 |
48 | - name: Setup elasticsearch service
49 | service: name=elasticsearch state=started enabled=true
50 | become: true
51 |
--------------------------------------------------------------------------------
/ansible/check/roles/undercloud/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Performance checks specific to controller hosts
4 | #
5 |
6 | - name: Check max_connections on the database
7 | shell: mysql -e "show variables like 'max_connections';" | grep max_connections | awk '{print $2}'
8 | register: bz1266253
9 | changed_when: no
10 | failed_when: bz1266253.stdout|int < mariadb_max_connections
11 | ignore_errors: yes
12 |
13 | - name: Suggested buffer_pool_size
14 | shell: mysql -Bse "SELECT CEILING(Total_InnoDB_Bytes*1.6/POWER(1024,2)) RIBPS FROM (SELECT SUM(data_length+index_length) Total_InnoDB_Bytes FROM information_schema.tables WHERE engine='InnoDB') A;"
15 | register: suggested_buffer_pool_size
16 | changed_when: no
17 | ignore_errors: yes
18 |
19 | - name : Current buffer_pool_size
20 | shell: echo $(mysql -Bse " select @@innodb_buffer_pool_size")/1024/1024 | bc
21 | register: buffer_pool_size
22 | failed_when: buffer_pool_size.stdout|int < suggested_buffer_pool_size.stdout|int
23 | changed_when: no
24 | ignore_errors: yes
25 |
26 | - name : File descriptors for the mysql process
27 | shell: cat /proc/$(pgrep mysqld_safe)/limits | grep "open files" | awk '{print $4}'
28 | register: mysqld_safe_soft_fd
29 | failed_when: mysqld_safe_soft_fd.stdout|int < mysqld_soft_fd
30 | changed_when: no
31 | ignore_errors: yes
32 |
33 | - name : Check rabbitmq file descriptors
34 | shell: rabbitmqctl status | grep total_limit | awk -F',' '{print $2}' | sed 's/.$//'
35 | register: bz1282491
36 | changed_when: no
37 | failed_when: bz1282491.stdout|int < rabbitmq_fd
38 | ignore_errors: yes
39 |
40 | - name : Check rabbitmq for partitions
41 | shell: rabbitmqctl cluster_status | grep partitions -A 1 | grep -q controller
42 | register: rabbit_partitioned
43 | changed_when: no
44 | failed_when: rabbit_partitioned.rc == 0
45 |
46 | - name: Run MySQL Tuner script
47 | script: mysqltuner.pl --nocolor
48 | register: mysql_out
49 | ignore_errors: yes
50 |
--------------------------------------------------------------------------------
/lib/Tools.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import shutil
4 | from subprocess import Popen, PIPE
5 |
6 |
7 | class Tools:
8 |
9 | def __init__(self, config=None):
10 | self.logger = logging.getLogger('browbeat.Tools')
11 | self.config = config
12 | return None
13 |
14 | # Run command, return stdout as result
15 | def run_cmd(self, cmd):
16 | self.logger.debug("Running command : %s" % cmd)
17 | process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
18 | stdout, stderr = process.communicate()
19 | if len(stderr) > 0:
20 | return None
21 | else:
22 | return stdout.strip()
23 |
24 | # Find Command on host
25 | def find_cmd(self, cmd):
26 | _cmd = "which %s" % cmd
27 | self.logger.debug('Find Command : Command : %s' % _cmd)
28 | command = self.run_cmd(_cmd)
29 | if command is None:
30 | self.logger.error("Unable to find %s" % cmd)
31 | raise Exception("Unable to find command : '%s'" % cmd)
32 | return False
33 | else:
34 | return command.strip()
35 |
36 | def create_run_dir(self, results_dir, run):
37 | try:
38 | os.makedirs("%s/run-%s" % (results_dir, run))
39 | return "%s/run-%s" % (results_dir, run)
40 | except OSError as e:
41 | return False
42 |
43 | # Create directory for results
44 | def create_results_dir(self, results_dir, timestamp, service, scenario):
45 | try:
46 | os.makedirs("{}/{}/{}/{}".format(results_dir,
47 | timestamp, service, scenario))
48 | self.logger.debug("{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service,
49 | scenario))
50 | return "{}/{}/{}/{}".format(os.path.dirname(results_dir), timestamp, service, scenario)
51 | except OSError as e:
52 | return False
53 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/grafana-snapshot/templates/index.html.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | {{item}}
4 |
10 |
11 |
12 |
13 |
{{item}} - System Performance Snapshot
14 |
15 | Undercloud:
16 | {% for host in groups['undercloud'] %}
17 |
{{host}}
18 | {% endfor %}
19 |
20 | Controllers:
21 | {% for host in groups['controller'] %}
22 |
{{host}}
23 | {% endfor %}
24 | {% if snapshot_compute is defined and snapshot_compute %}
25 |
26 | Computes:
27 | {% for host in groups['compute'] %}
28 |
{{host}}
29 | {% endfor %}
30 | {% endif %}
31 |
32 |
33 | Disks :
34 | {% for disk in disks_in_group %}
35 | {{disk}}
36 | {% endfor %}
37 |
38 | Interfaces :
39 | {% for interface in interfaces_in_group %}
40 | {{interface}}
41 | {% endfor %}
42 |
43 | {% for panel in general_panels %}
44 | 
45 | {% endfor %}
46 |
47 | {% for disk in disks_in_group %}
48 | {% for panel in disk_panels %}
49 |
50 |

51 | {% endfor %}
52 | {% endfor %}
53 |
54 | {% for interface in interfaces_in_group %}
55 | {% for panel in interface_panels %}
56 |
57 |

58 | {% endfor %}
59 | {% endfor %}
60 |
61 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/grafana-snapshot/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Grafana Snapshot vars
4 | #
5 |
6 | # Task prepends "disk-" prefix for you
7 | disks:
8 | undercloud:
9 | - vda
10 | controller:
11 | - sda
12 | - sda2
13 | compute:
14 | - sda
15 | - sda2
16 |
17 | disk_panels:
18 | - panelId: 9
19 | name: disk-iops
20 | - panelId: 10
21 | name: disk-throughput
22 | - panelId: 14
23 | name: disk-io-time
24 |
25 | # Task prepends "interface-" prefix for you
26 | interfaces:
27 | undercloud:
28 | - br-ctlplane
29 | # - br-int
30 | - ens3
31 | - ens7
32 | - lo
33 | # - ovs-system
34 | controller:
35 | - br-ex
36 | - br-int
37 | - br-tun
38 | - br-vlan
39 | - enp3s0f0
40 | - enp3s0f1
41 | - enp4s0f0
42 | - enp4s0f1
43 | # - ens1f0
44 | # - ens1f1
45 | - lo
46 | # - ovs-system
47 | - vlan201
48 | - vlan202
49 | - vlan203
50 | - vlan204
51 | compute:
52 | - br-ex
53 | - br-int
54 | - br-tun
55 | - br-vlan
56 | - enp3s0f0
57 | - enp3s0f1
58 | - enp4s0f0
59 | - enp4s0f1
60 | # - ens1f0
61 | # - ens1f1
62 | - lo
63 | # - ovs-system
64 | - vlan201
65 | - vlan202
66 | - vlan203
67 | - vlan204
68 |
69 | interface_panels:
70 | - panelId: 16
71 | name: network-pps
72 | - panelId: 17
73 | name: network-throughput
74 |
75 | general_panels:
76 | - panelId: 1
77 | name: all-cpu
78 | - panelId: 2
79 | name: all-cpu-sum
80 | - panelId: 5
81 | name: memory-bytes
82 | - panelId: 6
83 | name: memory-percentage
84 | - panelId: 7
85 | name: swap-usage
86 | - panelId: 19
87 | name: conntrack
88 | - panelId: 25
89 | name: numa
90 | - panelId: 26
91 | name: irq
92 | - panelId: 27
93 | name: interrupts
94 | - panelId: 28
95 | name: load-uptime
96 | - panelId: 29
97 | name: processes
98 | - panelId: 126
99 | name: tail-errors
100 |
--------------------------------------------------------------------------------
/ansible/check/group_vars/controller:
--------------------------------------------------------------------------------
1 | ---
2 | ansible_become: true
3 | tuned_profile: throughput-performance
4 | rabbitmq_fd: 1600
5 | haproxy_max_connections: 4096
6 | mariadb_max_connections: 4096
7 | mysqld_soft_fd: 16384
8 | nova_vif_timeout: 300
9 | netdev_max_backlog: 100000
10 |
11 | checks :
12 | bz1095811 :
13 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1095811"
14 | name: "Network connectivity issues after 1000 netns"
15 | bz1282491 :
16 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1282491"
17 | name: "update default file descriptor setting"
18 | bz1281584 :
19 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1281584"
20 | name: "Director does not create an haproxy configuration that conforms to our best-practice recommendations"
21 | bz1266253 :
22 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1266253"
23 | name: "increase mariadb max_connection default value"
24 | buffer_pool_size:
25 | url: "none"
26 | name: "mariadb buffer pool size tuning"
27 | mysqld_safe_soft_fd:
28 | url: "none"
29 | name: "mariadb file descriptor setting not high enough"
30 | bz1293712 :
31 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1293712"
32 | name: "/etc/udev/rules.d/99-dhcp-all-interfaces.rules causes a slow and miserable degradation until things fail"
33 | nova_vif_timeout_result:
34 | url: "none"
35 | name: "Nova VIF timeout should be >= 300"
36 | bz1264740:
37 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1264740"
38 | name: "RHEL OSP Director must be configure with nova-event-callback by default"
39 | rabbit_partitioned:
40 | url: "none"
41 | name: "Rabbit is currently partitioned - YMMV... Good luck."
42 | tuned_profile_result:
43 | url: "none"
44 | name: "Ensure Tuned Profile is set to throughput-performance"
45 | neutron_rootwrap_daemon:
46 | url: "none"
47 | name: "Ensure rootwrap has daemon mode enabled"
48 |
49 | # vi:syntax=yaml
50 |
--------------------------------------------------------------------------------
/ansible/check/group_vars/undercloud:
--------------------------------------------------------------------------------
1 | ---
2 | ansible_become: true
3 | tuned_profile: throughput-performance
4 | rabbitmq_fd: 1600
5 | haproxy_max_connections: 4096
6 | mariadb_max_connections: 4096
7 | mysqld_soft_fd: 16384
8 | nova_vif_timeout: 300
9 | netdev_max_backlog: 100000
10 | keystone_threads: 6
11 | keystone_processes: 2
12 |
13 | checks :
14 | bz1282491 :
15 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1282491"
16 | name: "update default file descriptor setting"
17 | bz1281584 :
18 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1281584"
19 | name: "Director does not create an haproxy configuration that conforms to our best-practice recommendations"
20 | bz1266253 :
21 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1266253"
22 | name: "increase mariadb max_connection default value"
23 | buffer_pool_size:
24 | url: "none"
25 | name: "mariadb buffer pool size tuning"
26 | mysqld_safe_soft_fd:
27 | url: "none"
28 | name: "mariadb file descriptor setting not high enough"
29 | bz1293712 :
30 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1293712"
31 | name: "/etc/udev/rules.d/99-dhcp-all-interfaces.rules causes a slow and miserable degradation until things fail"
32 | nova_vif_timeout_result:
33 | url: "none"
34 | name: "Nova VIF timeout should be >= 300"
35 | bz1264740:
36 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1264740"
37 | name: "RHEL OSP Director must be configure with nova-event-callback by default"
38 | rabbit_partitioned:
39 | url: "none"
40 | name: "Rabbit is currently partitioned - YMMV... Good luck."
41 | tuned_profile_result:
42 | url: "none"
43 | name: "Ensure Tuned Profile is set to throughput-performance"
44 | neutron_rootwrap_daemon:
45 | url: "none"
46 | name: "Ensure rootwrap has daemon mode enabled"
47 | bz1330980:
48 | url: "https://bugzilla.redhat.com/show_bug.cgi?id=1330980"
49 | name: "Undercloud deployed with 1 keystone worker and cpu_count for threads"
50 | # vi:syntax=yaml
51 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/grafana-snapshot/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Generate Snapshots
4 | #
5 |
6 | - name: Generate General Snapshots
7 | shell: "mkdir -p /home/stack/browbeat/{{results_dir}}/{{item[0]}}/; curl -X GET 'http://{{grafana_ip}}:{{grafana_port}}/render/dashboard-solo/db/openstack-general-system-performance?panelId={{item[1].panelId}}&from={{from}}&to={{to}}&var-Cloud={{var_cloud}}&var-Node={{item[0]}}{{host_suffix}}&var-Interface=interface-test&var-Disk=disk-sda&width=1200' > /home/stack/browbeat/{{results_dir}}/{{item[0]}}/{{item[0]}}-{{item[1].name}}.png"
8 | with_nested:
9 | - "{{ hosts_in_group }}"
10 | - "{{ general_panels }}"
11 |
12 | - name: Generate Disk Snapshots
13 | shell: "mkdir -p /home/stack/browbeat/{{results_dir}}/{{item[0]}}/;curl -X GET 'http://{{grafana_ip}}:{{grafana_port}}/render/dashboard-solo/db/openstack-general-system-performance?panelId={{item[2].panelId}}&from={{from}}&to={{to}}&var-Cloud={{var_cloud}}&var-Node={{item[0]}}{{host_suffix}}&var-Interface=interface-test&var-Disk=disk-{{item[1]}}&width=1200' > /home/stack/browbeat/{{results_dir}}/{{item[0]}}/{{item[0]}}-{{item[2].name}}-{{item[1]}}.png"
14 | with_nested:
15 | - "{{ hosts_in_group }}"
16 | - "{{ disks_in_group }}"
17 | - "{{ disk_panels }}"
18 |
19 | - name: Generate Interface Snapshots
20 | shell: "mkdir -p /home/stack/browbeat/{{results_dir}}/{{item[0]}}/;curl -X GET 'http://{{grafana_ip}}:{{grafana_port}}/render/dashboard-solo/db/openstack-general-system-performance?panelId={{item[2].panelId}}&from={{from}}&to={{to}}&var-Cloud={{var_cloud}}&var-Node={{item[0]}}{{host_suffix}}&var-Interface=interface-{{item[1]}}&var-Disk=disk-sda&width=1200' > /home/stack/browbeat/{{results_dir}}/{{item[0]}}/{{item[0]}}-{{item[2].name}}-{{item[1]}}.png"
21 | with_nested:
22 | - "{{ hosts_in_group }}"
23 | - "{{ interfaces_in_group }}"
24 | - "{{ interface_panels }}"
25 |
26 | - name: Generate index.html per host
27 | template:
28 | src=index.html.j2
29 | dest=/home/stack/browbeat/{{results_dir}}/{{item}}/index.html
30 | owner=stack
31 | group=stack
32 | mode=0644
33 | with_items:
34 | - "{{groups[host_type]}}"
35 |
--------------------------------------------------------------------------------
/lib/Connmon.py:
--------------------------------------------------------------------------------
1 | from Tools import *
2 |
3 |
4 | class Connmon:
5 |
6 | def __init__(self, config):
7 | self.logger = logging.getLogger('browbeat.Connmon')
8 | self.config = config
9 | self.tools = Tools(self.config)
10 | return None
11 |
12 | # Start connmond
13 | def start_connmon(self, retry=None):
14 | self.stop_connmon()
15 | tool = "connmond"
16 | connmond = self.tools.find_cmd(tool)
17 | if not connmond:
18 | self.logger.error("Unable to find {}".format(tool))
19 | as_sudo = self.config['connmon']['sudo']
20 | cmd = ""
21 | if as_sudo:
22 | cmd += "sudo "
23 | cmd += "screen -X -S connmond kill"
24 | self.tools.run_cmd(cmd)
25 | self.logger.info("Starting connmond")
26 | cmd = ""
27 | cmd += "{} --config /etc/connmon.cfg > /tmp/connmond 2>&1 &".format(
28 | connmond)
29 | self.tools.run_cmd(cmd)
30 | if self.check_connmon_results is False:
31 | if retry is None:
32 | self.start_connmon(retry=True)
33 | else:
34 | return False
35 | else:
36 | return True
37 |
38 | def check_connmon_results(self, result_file='/tmp/connmon_results.csv'):
39 | return os.path.isfile(result_file)
40 |
41 | # Stop connmond
42 | def stop_connmon(self):
43 | self.logger.info("Stopping connmond")
44 | return self.tools.run_cmd("pkill -9 connmond")
45 |
46 | # Create Connmon graphs
47 | def connmon_graphs(self, result_dir, test_name):
48 | cmd = "python graphing/connmonplot.py {}/connmon/{}.csv".format(result_dir,
49 | test_name)
50 | return self.tools.run_cmd(cmd)
51 |
52 | # Move connmon results
53 | def move_connmon_results(self, result_dir, test_name):
54 | path = "%s/connmon" % result_dir
55 | if not os.path.exists(path):
56 | os.mkdir(path)
57 | return shutil.move("/tmp/connmon_results.csv",
58 | "{}/connmon/{}.csv".format(result_dir, test_name))
59 |
--------------------------------------------------------------------------------
/ansible/install/collectd-generic.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Generic Playbook to install collectd on generic machine types, use tags to seperate machine type:
4 | #
5 | # Examples:
6 | #
7 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="baremetal"
8 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="guest"
9 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme-all-in-one"
10 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme-vmdb"
11 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme"
12 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="graphite"
13 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="ose"
14 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="satellite6"
15 | # ansible-playbook -i hosts install/collectd-generic.yml --tags="baremetal,guest"
16 | #
17 |
18 | - hosts: baremetal
19 | remote_user: root
20 | vars:
21 | config_type: baremetal
22 | roles:
23 | - collectd-generic
24 | tags: baremetal
25 |
26 | - hosts: guest
27 | remote_user: root
28 | vars:
29 | config_type: guest
30 | roles:
31 | - collectd-generic
32 | tags: guest
33 |
34 | # Cloud Forms Database appliances with Graphite/Grafana
35 | - hosts: cfme-all-in-one
36 | remote_user: root
37 | vars:
38 | config_type: cfme-all-in-one
39 | roles:
40 | - collectd-generic
41 | tags: cfme-all-in-one
42 |
43 | # Cloud Forms Database appliances
44 | - hosts: cfme-vmdb
45 | remote_user: root
46 | vars:
47 | config_type: cfme-vmdb
48 | roles:
49 | - collectd-generic
50 | tags: cfme-vmdb
51 |
52 | # Cloud Forms Worker appliances
53 | - hosts: cfme
54 | remote_user: root
55 | vars:
56 | config_type: cfme
57 | roles:
58 | - collectd-generic
59 | tags: cfme
60 |
61 | - hosts: graphite
62 | remote_user: root
63 | vars:
64 | config_type: graphite
65 | roles:
66 | - collectd-generic
67 | tags: graphite
68 |
69 | - hosts: ose
70 | remote_user: root
71 | vars:
72 | config_type: ose
73 | roles:
74 | - collectd-generic
75 | tags: ose
76 |
77 | - hosts: satellite6
78 | remote_user: root
79 | vars:
80 | config_type: satellite6
81 | roles:
82 | - collectd-generic
83 | tags: satellite6
84 |
--------------------------------------------------------------------------------
/lib/Elastic.py:
--------------------------------------------------------------------------------
1 | from elasticsearch import Elasticsearch
2 | import logging
3 | import json
4 | import pprint
5 | import numpy
6 | import datetime
7 |
8 | class Elastic:
9 | """
10 | """
11 | def __init__(self,config,tool="browbeat") :
12 | self.config = config
13 | self.logger = logging.getLogger('browbeat.Elastic')
14 | self.es = Elasticsearch([
15 | {'host': self.config['elasticsearch']['host'],
16 | 'port': self.config['elasticsearch']['port']}],
17 | send_get_body_as='POST'
18 | )
19 | today = datetime.datetime.today()
20 | self.index = "{}-{}".format(tool,today.strftime('%Y.%m.%d'))
21 |
22 | """
23 | """
24 | def load_json(self,result):
25 | json_data = None
26 | self.logger.info("Loading JSON")
27 | json_data = json.loads(result)
28 | return json_data
29 |
30 | """
31 | """
32 | def load_json_file(self,result):
33 | json_data = None
34 | self.logger.info("Loading JSON file : {}".format(result))
35 | try :
36 | with open(result) as jdata :
37 | json_data = json.load(jdata)
38 | except (IOError, OSError) as e:
39 | self.logger.error("Error loading JSON file : {}".format(result))
40 | return False
41 | return json_data
42 |
43 | """
44 | """
45 | def combine_metadata(self,result):
46 | if len(self.config['elasticsearch']['metadata_files']) > 0 :
47 | meta = self.config['elasticsearch']['metadata_files']
48 | for _meta in meta:
49 | try :
50 | with open(_meta['file']) as jdata :
51 | result[_meta['name']] = json.load(jdata)
52 | except (IOError, OSError) as e:
53 | self.logger.error("Error loading Metadata file : {}".format(_meta['file']))
54 | return False
55 | return result
56 |
57 | """
58 | """
59 | def index_result(self,result,_type='result',_id=None) :
60 | return self.es.index(index=self.index,
61 | id=_id,
62 | body=result,
63 | doc_type=_type,
64 | refresh=True
65 | )
66 |
--------------------------------------------------------------------------------
/ansible/check/roles/keystone/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Keystone tasks for performance checks
4 | #
5 |
6 | - name: Check Keystone cron job
7 | shell: crontab -l -u keystone | grep -v "#"
8 | register: keystone_cron_result
9 | changed_when: false
10 | failed_when: "'token_flush' not in '{{ keystone_cron_result.stdout }}'"
11 | ignore_errors: True
12 |
13 | - name: Check Keystone Token Provider
14 | command: crudini --get /etc/keystone/keystone.conf token provider
15 | register: keystone_token_provider
16 | changed_when: false
17 | ignore_errors: True
18 |
19 | - debug: msg="Keystone Token Provider:{{ keystone_token_provider.stdout }}"
20 |
21 | - name: Determine if Keystone is deployed in eventlet
22 | shell: ps afx | grep "[Kk]eystone-all" -c
23 | register: keystone_in_eventlet
24 | changed_when: false
25 | ignore_errors: True
26 |
27 | - name: Set keystone_deployment variable to httpd
28 | set_fact: keystone_deployment='httpd'
29 | when: keystone_in_eventlet.stdout|int == 0
30 |
31 | - name: Set keystone_deployment variable to eventlet
32 | set_fact: keystone_deployment='eventlet'
33 | when: keystone_in_eventlet.stdout|int > 0
34 |
35 | - debug: msg="Keystone deployed in:{{ keystone_deployment }}"
36 |
37 | - name: Keystone HTTP admin processes
38 | command: egrep -o "processes=[0-9]+" /etc/httpd/conf.d/*keystone*admin.conf | egrep -o "[0-9]+"
39 | register: bz1330980
40 | failed_when: bz1330980.stdout|int < keystone_processes
41 | when: keystone_in_eventlet.stdout|int == 0
42 | failed_when:
43 | ignore_errors: True
44 |
45 | - name: Keystone HTTP main processes
46 | command: egrep -o "processes=[0-9]+" /etc/httpd/conf.d/*keystone*main.conf | egrep -o "[0-9]+"
47 | register: bz1330980
48 | failed_when: bz1330980.stdout|int < keystone_processes
49 | when: keystone_in_eventlet.stdout|int == 0
50 | ignore_errors: True
51 |
52 | - name: Keystone HTTP admin threads
53 | command: egrep -o "threads=[0-9]+" /etc/httpd/conf.d/*keystone*admin.conf | egrep -o "[0-9]+"
54 | register: bz1330980
55 | failed_when: bz1330980.stdout|int < keystone_threads
56 | when: keystone_in_eventlet.stdout|int == 0
57 | ignore_errors: True
58 |
59 | - name: Keystone HTTP main threads
60 | command: egrep -o "threads=[0-9]+" /etc/httpd/conf.d/*keystone*main.conf | egrep -o "[0-9]+"
61 | register: bz1330980
62 | failed_when: bz1330980.stdout|int < keystone_threads
63 | when: keystone_in_eventlet.stdout|int == 0
64 | ignore_errors: True
65 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-generic/templates/ose.collectd.conf.j2:
--------------------------------------------------------------------------------
1 | # Installed by Browbeat Ansible Installer
2 | # Config type: {{config_type}}
3 |
4 | # Interval default is 10s
5 | Interval {{collectd_interval}}
6 |
7 | # Hostname for this machine, if not defined, use gethostname(2) system call
8 | Hostname "{{inventory_hostname}}"
9 |
10 | # Allow collectd to log
11 | LoadPlugin syslog
12 |
13 | # Loaded Plugins:
14 | LoadPlugin write_graphite
15 | LoadPlugin cpu
16 | LoadPlugin df
17 | LoadPlugin disk
18 | # LoadPlugin exec
19 | LoadPlugin interface
20 | LoadPlugin irq
21 | LoadPlugin load
22 | LoadPlugin memory
23 | LoadPlugin numa
24 | LoadPlugin processes
25 | LoadPlugin swap
26 | LoadPlugin turbostat
27 | LoadPlugin unixsock
28 | LoadPlugin uptime
29 |
30 | # Open unix domain socket for collectdctl
31 |
32 | SocketFile "/var/run/collectd-unixsock"
33 | SocketGroup "collectd"
34 | SocketPerms "0770"
35 | DeleteSocket true
36 |
37 |
38 | # Graphite Host Configuration
39 |
40 |
41 | Host "{{graphite_host}}"
42 | Port "2003"
43 | Prefix "{{graphite_prefix}}."
44 | Protocol "tcp"
45 | LogSendErrors true
46 | StoreRates true
47 | AlwaysAppendDS false
48 | EscapeCharacter "_"
49 |
50 |
51 |
52 |
53 | ValuesPercentage true
54 |
55 |
56 |
57 | Disk "/^[hsv]d[a-z]+[0-9]?$/"
58 | IgnoreSelected false
59 |
60 |
61 | #
62 | # Exec "nobody:root" "/usr/local/bin/ose-metrics.py"
63 | #
64 |
65 |
66 | # Openshift Master
67 | ProcessMatch "openshift master" "/usr/bin/openshift.+start.+master"
68 | ProcessMatch "openshift node" "/usr/bin/openshift.+start.+node"
69 |
70 | ProcessMatch "etcd" "/usr/bin/etcd"
71 |
72 | ProcessMatch "ovsdb-server monitor" "ovsdb-server.+monitoring"
73 | ProcessMatch "ovsdb-server" "ovsdb-server.+/etc/openvswitch/conf.db"
74 | ProcessMatch "ovsdb-vswitchd monitor" "ovs-vswitchd.+monitoring"
75 | ProcessMatch "ovsdb-vswitchd" "ovs-vswitchd.+unix:/var/run/openvswitch/db.sock"
76 |
77 | ProcessMatch "docker daemon" "/usr/bin/docker.+daemon"
78 |
79 | ProcessMatch "pod" "pod"
80 |
81 | # Collect on collectd process
82 | ProcessMatch "collectd" "/usr/sbin/collectd.+-C.+/etc/collectd.conf"
83 |
84 |
85 |
86 | ReportBytes true
87 | ValuesPercentage true
88 |
89 |
90 | # Include other collectd configuration files
91 | Include "/etc/collectd.d"
92 |
--------------------------------------------------------------------------------
/ansible/install/roles/grafana_docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Install/run grafana-server for browbeat
4 | #
5 |
6 | - name: Import EPEL GPG Key
7 | rpm_key: key=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
8 | state=present
9 |
10 | - name: Check for EPEL repo
11 | yum: name=https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
12 | state=present
13 |
14 | - name: disable firewalld
15 | service: name=firewalld state=stopped enabled=false
16 | become: true
17 | ignore_errors: true
18 |
19 | - name: Install repo file for docker
20 | copy:
21 | src=docker.repo
22 | dest=/etc/yum.repos.d/docker.repo
23 | owner=root
24 | group=root
25 | mode=0644
26 | become: true
27 |
28 | - name: Install docker rpm
29 | yum: name={{ item }} state=present
30 | become: true
31 | with_items:
32 | - docker-engine
33 |
34 | # Start docker service
35 |
36 | - name: Setup docker service
37 | service: name=docker state=started enabled=true
38 | become: true
39 |
40 | - name: ensure data directory exists
41 | file: path={{ persistent_grafana_data_path }} state=directory mode=0777
42 |
43 | - name: ensure docker overrides for carbon-cache
44 | file: path=/etc/docker/grafana state=directory mode=0755
45 |
46 | - name: Install docker-ps script
47 | copy:
48 | src=docker-ps-names.sh
49 | dest=/usr/local/bin/docker-ps-names.sh
50 | owner=root
51 | group=root
52 | mode=0755
53 | become: true
54 |
55 | - name: check active containers
56 | command: /usr/local/bin/docker-ps-names.sh
57 | register: docker_ps_a
58 |
59 | - name: start grafana docker container
60 | command: "{{item}}"
61 | ignore_errors: true
62 | with_items:
63 | - docker kill grafana
64 | - docker rm grafana
65 | - docker run -d --name grafana -p {{ docker_grafana_port }}:3000 -v {{ persistent_grafana_data_path }}:/var/lib/grafana {{ grafana_docker_image }}
66 | when: '"grafana" not in docker_ps_a.stdout'
67 |
68 | - name: Setup grafana systemd config
69 | template:
70 | src=grafana-server.service.j2
71 | dest=/etc/systemd/system/grafana-server.service
72 | owner=root
73 | group=root
74 | mode=0644
75 | become: true
76 | register: systemd_grafana_needs_restart
77 |
78 | - name: bounce systemd and grafana-server container
79 | shell: /usr/bin/systemctl daemon-reload && /usr/bin/systemctl enable grafana-server && /usr/bin/systemctl restart grafana-server
80 | become: true
81 | when: systemd_grafana_needs_restart.changed
82 |
83 | - name: Disable EPEL Repo
84 | ini_file: dest=/etc/yum.repos.d/epel.repo
85 | section=epel
86 | option=enabled
87 | value=0
88 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-openstack/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Install/run collectd for browbeat
4 | #
5 |
6 | - name: Check for EPEL
7 | shell: rpm -qa | grep -q epel-release
8 | ignore_errors: true
9 | register: epel_installed
10 |
11 | #
12 | # (akrzos) I have found the use of the yum module for installing EPEL on rhel7 to encounter issues,
13 | # thus using rpm to install via ansible. This does display a warning in Ansible output.
14 | #
15 | - name: Install EPEL rpm
16 | command: rpm -ivh {{ epel7_rpm }}
17 | become: true
18 | when: epel_installed.rc != 0
19 |
20 | #
21 | # (akrzos) yum module works at this point due to the fact the EPEL repo now exists. EPEL rpm is
22 | # installed at this point in time.
23 | #
24 | - name: Install collectd rpms
25 | yum: name={{ item }} state=present
26 | become: true
27 | with_items:
28 | - collectd
29 | - collectd-turbostat
30 | - collectd-mysql
31 |
32 | - name: Configure collectd.conf
33 | template:
34 | src={{config_type}}.collectd.conf.j2
35 | dest=/etc/collectd.conf
36 | owner=root
37 | group=root
38 | mode=0644
39 | become: true
40 |
41 | - name: Copy collectd-redis.sh
42 | copy:
43 | src=collectd-redis.sh
44 | dest=/usr/local/bin/collectd-redis.sh
45 | owner=root
46 | group=root
47 | mode=0755
48 | become: true
49 |
50 | #
51 | # Configure selinux bits
52 | #
53 | - name: Check for collectd permissive
54 | shell: semodule -l | grep -q permissive_collectd_t
55 | become: true
56 | register: collectd_permissive
57 | ignore_errors: true
58 | changed_when: false
59 |
60 | - name: Set permissive for collectd
61 | shell: semanage permissive -a collectd_t
62 | become: true
63 | when: collectd_permissive.rc != 0
64 |
65 | #
66 | # Additional policy bits may be needed for exec
67 | #
68 | - name: Collectd policy customization
69 | copy:
70 | src=custom-collectd.pp
71 | dest=/root/custom-collectd.pp
72 | owner=root
73 | group=root
74 | mode=644
75 | become: true
76 |
77 | - name: Check for collectd custom
78 | shell: semodule -l | grep -q custom-collectd
79 | become: true
80 | register: collectd_custom
81 | ignore_errors: true
82 | changed_when: false
83 |
84 | - name: Set custom policy for collectd
85 | shell: semodule -i /root/custom-collectd.pp
86 | become: true
87 | when: collectd_custom.rc != 0
88 |
89 | #
90 | # Start collectd service
91 | #
92 | - name: Setup collectd service
93 | service: name=collectd state=restarted enabled=true
94 | become: true
95 |
96 | - name: Disable EPEL
97 | shell: rpm -e epel-release
98 | ignore_errors: true
99 | become: true
100 |
--------------------------------------------------------------------------------
/ansible/install/roles/grafana/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Install/run grafana-server for browbeat
4 | #
5 |
6 | # check that grafana_host and graphite_host is entered prior to playbook run
7 | - name: Check Graphite/Grafana Host IP Address
8 | fail:
9 | msg="** Edit grafana_host and graphite_host in ../install/group_vars/all.yml before running **"
10 | when: ((grafana_host is none) and (graphite_host is none))
11 |
12 | - name: Import EPEL GPG Key
13 | rpm_key: key=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
14 | state=present
15 |
16 | - name: Check for EPEL repo
17 | yum: name=https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
18 | state=present
19 |
20 | - name: Install grafana rpms
21 | yum: name={{ item }} state=present
22 | become: true
23 | with_items:
24 | - https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm
25 |
26 | - name: Set grafana server port
27 | ini_file:
28 | dest=/etc/grafana/grafana.ini
29 | section={{item.section}}
30 | option={{item.option}}
31 | value={{item.value}}
32 | with_items:
33 | - section: server
34 | option: http_port
35 | value: "{{grafana_port}}"
36 | - section: auth.anonymous
37 | option: enabled
38 | value: true
39 | become: true
40 |
41 | # disable firewalld (might need to create specific firewall rules or leave it to admin to do via iptables)
42 |
43 | - name: disable firewalld
44 | service: name=firewalld state=stopped enabled=false
45 | become: true
46 |
47 | #
48 | # setup the grafana-server service
49 | #
50 | - name: Setup grafana-server service
51 | service: name=grafana-server state=started enabled=true
52 | become: true
53 | ignore_errors: true
54 |
55 | - name: Wait for grafana to be ready
56 | wait_for: host={{grafana_host}} port={{grafana_port}} delay=5 timeout=30
57 |
58 | #
59 | # Add graphite server as a default datasource
60 | #
61 | - name: Ensure {{role_path}}/files directory exists
62 | file: path={{role_path}}/files state=directory
63 | connection: local
64 |
65 | - name: Create data_source.json
66 | template:
67 | src: data_source.json.j2
68 | dest: "{{role_path}}/files/data_source.json"
69 | connection: local
70 |
71 | - name: Create Data Source on grafana server
72 | command: "curl -X POST -H 'Content-Type: application/json' -d @{{role_path}}/files/data_source.json http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/datasources"
73 | connection: local
74 |
75 | - name: Remove leftover json file
76 | file: path={{role_path}}/files/data_source.json state=absent
77 | connection: local
78 |
79 | - name: Disable EPEL Repo
80 | ini_file: dest=/etc/yum.repos.d/epel.repo
81 | section=epel
82 | option=enabled
83 | value=0
84 |
--------------------------------------------------------------------------------
/ansible/check/roles/controller/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Performance checks specific to controller hosts
4 | #
5 |
6 | - name: Check max_connections on the database
7 | shell: mysql -e "show variables like 'max_connections';" | grep max_connections | awk '{print $2}'
8 | register: bz1266253
9 | changed_when: no
10 | failed_when: bz1266253.stdout|int < mariadb_max_connections
11 | ignore_errors: yes
12 |
13 | - name: Suggested buffer_pool_size
14 | shell: mysql -Bse "SELECT CEILING(Total_InnoDB_Bytes*1.6/POWER(1024,2)) RIBPS FROM (SELECT SUM(data_length+index_length) Total_InnoDB_Bytes FROM information_schema.tables WHERE engine='InnoDB') A;"
15 | register: suggested_buffer_pool_size
16 | changed_when: no
17 | ignore_errors: yes
18 |
19 | - name : Current buffer_pool_size
20 | shell: echo $(mysql -Bse " select @@innodb_buffer_pool_size")/1024/1024 | bc
21 | register: buffer_pool_size
22 | failed_when: buffer_pool_size.stdout|int < suggested_buffer_pool_size.stdout|int
23 | changed_when: no
24 | ignore_errors: yes
25 |
26 | - name : File descriptors for the mysql process
27 | shell: cat /proc/$(pgrep mysqld_safe)/limits | grep "open files" | awk '{print $4}'
28 | register: mysqld_safe_soft_fd
29 | failed_when: mysqld_safe_soft_fd.stdout|int < mysqld_soft_fd
30 | changed_when: no
31 | ignore_errors: yes
32 |
33 | - name : Check rabbitmq file descriptors
34 | shell: rabbitmqctl status | grep file_descriptors | awk -F',' '{print $3}' | sed 's/.$//'
35 | register: bz1282491
36 | changed_when: no
37 | failed_when: bz1282491.stdout|int < rabbitmq_fd
38 | ignore_errors: yes
39 |
40 | - name : Check HAProxy Default maxconn
41 | shell : cat /etc/haproxy/haproxy.cfg | grep -iPzo '(?s)defaults.*?\n(\n|$)' | grep maxconn | awk '{print $2}'
42 | register: bz1281584
43 | failed_when: bz1281584.stdout|int < mariadb_max_connections
44 | changed_when: no
45 | ignore_errors: yes
46 |
47 | - name : Check netns tuning
48 | shell : sysctl net.core.netdev_max_backlog | awk '{print $3}'
49 | register: bz1095811
50 | failed_when: bz1095811.stdout|int < netdev_max_backlog
51 | changed_when: no
52 | ignore_errors: yes
53 |
54 | - name : Check udev performance issue
55 | shell : grep -q 'SUBSYSTEM=="net", ACTION=="add", TAG+="systemd", ENV{SYSTEMD_WANTS}+="dhcp-interface@$name.service"' /etc/udev/rules.d/99-dhcp-all-interfaces.rules
56 | register : bz1293712
57 | changed_when: no
58 | ignore_errors: yes
59 | failed_when: bz1293712.rc == 0
60 |
61 | - name : Check rabbitmq for partitions
62 | shell: rabbitmqctl cluster_status | grep partitions -A 1 | grep -q controller
63 | register: rabbit_partitioned
64 | changed_when: no
65 | failed_when: rabbit_partitioned.rc == 0
66 |
67 | - name: Run MySQL Tuner script
68 | script: mysqltuner.pl --nocolor
69 | register: mysql_out
70 | ignore_errors: yes
71 |
--------------------------------------------------------------------------------
/ansible/install/roles/collectd-openstack/templates/ceph.collectd.conf.j2:
--------------------------------------------------------------------------------
1 | # Installed by Browbeat Ansible Installer
2 | # Config type: {{config_type}}
3 |
4 | # Interval default is 10s
5 | Interval {{collectd_interval}}
6 |
7 | # Hostname for this machine, if not defined, use gethostname(2) system call
8 | Hostname "{{inventory_hostname}}"
9 |
10 | # Allow collectd to log
11 | LoadPlugin syslog
12 |
13 | # Loaded Plugins:
14 | LoadPlugin write_graphite
15 | LoadPlugin cpu
16 | LoadPlugin conntrack
17 | LoadPlugin df
18 | LoadPlugin disk
19 | LoadPlugin exec
20 | LoadPlugin interface
21 | LoadPlugin irq
22 | LoadPlugin load
23 | LoadPlugin match_regex
24 | LoadPlugin memory
25 | LoadPlugin numa
26 | LoadPlugin processes
27 | LoadPlugin swap
28 | LoadPlugin tail
29 | LoadPlugin turbostat
30 | LoadPlugin unixsock
31 | LoadPlugin uptime
32 |
33 | # Open unix domain socket for collectdctl
34 |
35 | SocketFile "/var/run/collectd-unixsock"
36 | SocketGroup "collectd"
37 | SocketPerms "0770"
38 | DeleteSocket true
39 |
40 |
41 | PreCacheChain "PreCache"
42 |
43 |
44 |
45 | Plugin "^interface$"
46 | PluginInstance "^tap*"
47 |
48 | Target "stop"
49 |
50 |
51 |
52 | Plugin "^interface$"
53 | PluginInstance "^q.*"
54 |
55 | Target "stop"
56 |
57 | Target "return"
58 |
59 |
60 | # Graphite Host Configuration
61 |
62 |
63 | Host "{{graphite_host}}"
64 | Port "2003"
65 | Prefix "{{graphite_prefix}}."
66 | Protocol "tcp"
67 | LogSendErrors true
68 | StoreRates true
69 | AlwaysAppendDS false
70 | EscapeCharacter "_"
71 |
72 |
73 |
74 |
75 | ValuesPercentage true
76 |
77 |
78 |
79 | Disk "/^[hsv]d[a-z]+[0-9]?$/"
80 | IgnoreSelected false
81 |
82 |
83 |
84 | # Ceph
85 | ProcessMatch "ceph-mon" "^/usr/bin/ceph-mon"
86 | ProcessMatch "ceph-osd" "^/usr/bin/ceph-osd"
87 | ProcessMatch "diamond" "python.+diamond"
88 | ProcessMatch "salt-minion" "python.+salt-minion"
89 |
90 | ProcessMatch "collectd" "/usr/sbin/collectd.+-C.+/etc/collectd.conf"
91 | Process "corosync"
92 | Process "httpd"
93 | Process "memcached"
94 | ProcessMatch "ovs-vswitchd" "ovs-vswitchd.+openvswitch"
95 | ProcessMatch "ovsdb-server" "ovsdb-server.+openvswitch"
96 | ProcessMatch "qemu-kvm" "/usr/libexec/qemu-kvm"
97 |
98 |
99 |
100 | ReportBytes true
101 | ValuesPercentage true
102 |
103 |
104 | # Tail plugin configuration
105 |
106 | {# Add ceph logs to tail #}
107 |
108 |
109 | # Include other collectd configuration files
110 | Include "/etc/collectd.d"
111 |
--------------------------------------------------------------------------------
/ansible/install/roles/dashboard-openstack/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Generate Openstack collectd to graphite dashboards
4 | #
5 |
6 | - name: Generate Individual Machine Dashboards
7 | template:
8 | src: "{{role_path}}/templates/{{item.template_name}}_general_system_performance.json.j2"
9 | dest: "{{role_path}}/files/{{item.process_list_name}}_general_system_performance.json"
10 | with_items: "{{dashboards}}"
11 |
12 | - name: Generate All Openstack Nodes CPU/Memory/Disk/Network Dashboards
13 | template:
14 | src: "{{item}}.json.j2"
15 | dest: "{{role_path}}/files/{{item}}.json"
16 | with_items:
17 | - all_cpu_graphs
18 | - all_memory_graphs
19 | - all_disk_graphs
20 | - all_network_graphs
21 |
22 | - name: Remove Existing Individual Machine Dashboards
23 | command: "curl -X DELETE -H 'Content-Type: application/json' http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db/{{item.process_list_name|lower}}-general-system-performance"
24 | when: overwrite_existing
25 | with_items: "{{dashboards}}"
26 |
27 | - name: Remove Existing All Openstack Nodes CPU/Memory/Disk/Network Dashboards
28 | command: "curl -X DELETE -H 'Content-Type: application/json' http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db/{{item}}"
29 | when: overwrite_existing
30 | with_items:
31 | - "{{dashboard_cloud_name}}-all-nodes-cpu"
32 | - "{{dashboard_cloud_name}}-all-nodes-memory"
33 | - "{{dashboard_cloud_name}}-all-nodes-disk"
34 | - "{{dashboard_cloud_name}}-all-nodes-network"
35 | - cloud-system-performance-comparsion
36 |
37 | - name: Upload dashboards to Grafana
38 | command: "curl -X POST -H 'Content-Type: application/json' -d @{{role_path}}/files/{{item.process_list_name}}_general_system_performance.json http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db"
39 | with_items: "{{dashboards}}"
40 |
41 | - name: Upload Dashboards to Grafana
42 | command: "curl -X POST -H 'Content-Type: application/json' -d @{{item}} http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db"
43 | with_items:
44 | - "{{role_path}}/files/all_cpu_graphs.json"
45 | - "{{role_path}}/files/all_memory_graphs.json"
46 | - "{{role_path}}/files/all_disk_graphs.json"
47 | - "{{role_path}}/files/all_network_graphs.json"
48 | - "{{role_path}}/files/cloud_system_performance_comparsion.json"
49 |
50 | - name: Remove leftover json file(s) from Individual Machine Dashboards
51 | file: path={{role_path}}/files/{{item.process_list_name}}_general_system_performance.json state=absent
52 | with_items: "{{dashboards}}"
53 |
54 | - name: Remove leftover json file(s) from All Openstack Nodes CPU/Memory/Disk/Network Dashboards
55 | file: path={{role_path}}/files/{{item}} state=absent
56 | with_items:
57 | - all_cpu_graphs.json
58 | - all_memory_graphs.json
59 | - all_disk_graphs.json
60 | - all_network_graphs.json
61 |
--------------------------------------------------------------------------------
/lib/WorkloadBase.py:
--------------------------------------------------------------------------------
1 | from abc import ABCMeta, abstractmethod
2 | import os
3 | import logging
4 | import yaml
5 | import collections
6 | class WorkloadBase:
7 | __metaclass__ = ABCMeta
8 | success = 0
9 | failure = 0
10 | total_tests = 0
11 | total_scenarios = 0
12 | browbeat = {}
13 |
14 | @abstractmethod
15 | def update_scenarios(self):
16 | pass
17 |
18 | @abstractmethod
19 | def update_tests(self):
20 | pass
21 |
22 | @abstractmethod
23 | def update_pass_tests(self):
24 | pass
25 |
26 | @abstractmethod
27 | def update_fail_tests(self):
28 | pass
29 |
30 | def update_total_scenarios(self):
31 | WorkloadBase.total_scenarios += 1
32 |
33 | def update_total_tests(self):
34 | WorkloadBase.total_tests += 1
35 |
36 | def update_total_pass_tests(self):
37 | WorkloadBase.success += 1
38 |
39 | def update_total_fail_tests(self):
40 | WorkloadBase.failure += 1
41 |
42 | def workload_logger(self, result_dir, workload):
43 | base = result_dir.split('/')
44 | if not os.path.isfile("{}/{}/browbeat-{}-run.log".format(base[0], base[1], workload)):
45 | file = logging.FileHandler(
46 | "{}/{}/browbeat-{}-run.log".format(base[0], base[1], workload))
47 | file.setLevel(logging.DEBUG)
48 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)5s - %(message)s')
49 | file.setFormatter(formatter)
50 | self.logger.addHandler(file)
51 | return None
52 |
53 | def get_time_dict(self, to_time, from_time, benchmark, test_name, workload, status):
54 | time_diff = (to_time - from_time)
55 | if workload not in WorkloadBase.browbeat:
56 | WorkloadBase.browbeat[workload] = {}
57 | if benchmark not in WorkloadBase.browbeat[workload]:
58 | WorkloadBase.browbeat[workload][benchmark] = {}
59 | if 'tests' not in WorkloadBase.browbeat[workload][benchmark]:
60 | WorkloadBase.browbeat[workload][benchmark]['tests'] = []
61 | WorkloadBase.browbeat[workload][benchmark]['tests'].append(
62 | {'Test name': test_name, 'Time': time_diff, 'status': status})
63 |
64 | @staticmethod
65 | def print_report(result_dir, time_stamp):
66 | with open(os.path.join(result_dir,time_stamp + '.' + 'report'), 'w') as yaml_file:
67 | yaml_file.write("Browbeat Report Card\n")
68 | if not WorkloadBase.browbeat:
69 | yaml_file.write("No tests were enabled")
70 | else:
71 | yaml_file.write(yaml.dump(WorkloadBase.browbeat, default_flow_style=False))
72 |
73 | @staticmethod
74 | def print_summary():
75 | print("Total scenarios executed:{}".format(WorkloadBase.total_scenarios))
76 | print("Total tests executed:{}".format(WorkloadBase.total_tests))
77 | print("Total tests passed:{}".format(WorkloadBase.success))
78 | print("Total tests failed:{}".format(WorkloadBase.failure))
79 |
--------------------------------------------------------------------------------
/ansible/check/browbeat-example-bug_report.log:
--------------------------------------------------------------------------------
1 | # Browbeat generated bug report
2 | ---------------------------------------
3 | | Issues for host : overcloud-controller-0
4 | ---------------------------------------
5 | Bug: bz1095811
6 | Name: Network connectivity issues after 1000 netns
7 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1095811
8 |
9 | Bug: nova_vif_timeout_result
10 | Name: Nova VIF timeout should be >= 300
11 | URL: none
12 |
13 | Bug: bz1264740
14 | Name: RHEL OSP Director must be configure with nova-event-callback by default
15 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1264740
16 |
17 | ---------------------------------------
18 | | Issues for host : overcloud-controller-1
19 | ---------------------------------------
20 | Bug: bz1095811
21 | Name: Network connectivity issues after 1000 netns
22 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1095811
23 |
24 | Bug: nova_vif_timeout_result
25 | Name: Nova VIF timeout should be >= 300
26 | URL: none
27 |
28 | Bug: bz1264740
29 | Name: RHEL OSP Director must be configure with nova-event-callback by default
30 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1264740
31 |
32 | ---------------------------------------
33 | | Issues for host : overcloud-controller-2
34 | ---------------------------------------
35 | Bug: bz1095811
36 | Name: Network connectivity issues after 1000 netns
37 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1095811
38 |
39 | Bug: nova_vif_timeout_result
40 | Name: Nova VIF timeout should be >= 300
41 | URL: none
42 |
43 | Bug: bz1264740
44 | Name: RHEL OSP Director must be configure with nova-event-callback by default
45 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1264740
46 |
47 |
48 | ---------------------------------------
49 | | Issues for host : overcloud-novacompute-0
50 | ---------------------------------------
51 | Bug: bz1282644
52 | Name: increase reserved_host_memory_mb
53 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1282644
54 |
55 | Bug: bz1264740
56 | Name: RHEL OSP Director must be configure with nova-event-callback by default
57 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1264740
58 |
59 | Bug: tuned_profile_result
60 | Name: Ensure Tuned Profile is set to {{ tuned_profile }}
61 | URL: none
62 |
63 | Bug: bz1245714
64 | Name: No Swap Space allocated
65 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1245714
66 |
67 | Bug: nova_vif_timeout_result
68 | Name: Nova VIF timeout should be >= 300
69 | URL: none
70 |
71 | ---------------------------------------
72 | | Issues for host : overcloud-novacompute-1
73 | ---------------------------------------
74 | Bug: bz1282644
75 | Name: increase reserved_host_memory_mb
76 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1282644
77 |
78 | Bug: bz1264740
79 | Name: RHEL OSP Director must be configure with nova-event-callback by default
80 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1264740
81 |
82 | Bug: tuned_profile_result
83 | Name: Ensure Tuned Profile is set to {{ tuned_profile }}
84 | URL: none
85 |
86 | Bug: bz1245714
87 | Name: No Swap Space allocated
88 | URL: https://bugzilla.redhat.com/show_bug.cgi?id=1245714
89 |
90 | Bug: nova_vif_timeout_result
91 | Name: Nova VIF timeout should be >= 300
92 | URL: none
93 |
94 |
--------------------------------------------------------------------------------
/ansible/install/roles/elasticsearch/files/elasticsearch.in.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # check in case a user was using this mechanism
4 | if [ "x$ES_CLASSPATH" != "x" ]; then
5 | cat >&2 << EOF
6 | Error: Don't modify the classpath with ES_CLASSPATH. Best is to add
7 | additional elements via the plugin mechanism, or if code must really be
8 | added to the main classpath, add jars to lib/ (unsupported).
9 | EOF
10 | exit 1
11 | fi
12 |
13 | ES_CLASSPATH="$ES_HOME/lib/elasticsearch-2.2.0.jar:$ES_HOME/lib/*"
14 |
15 | if [ "x$ES_MIN_MEM" = "x" ]; then
16 | ES_MIN_MEM=8g
17 | fi
18 | if [ "x$ES_MAX_MEM" = "x" ]; then
19 | ES_MAX_MEM=8g
20 | fi
21 | if [ "x$ES_HEAP_SIZE" != "x" ]; then
22 | ES_MIN_MEM=$ES_HEAP_SIZE
23 | ES_MAX_MEM=$ES_HEAP_SIZE
24 | fi
25 |
26 | # min and max heap sizes should be set to the same value to avoid
27 | # stop-the-world GC pauses during resize, and so that we can lock the
28 | # heap in memory on startup to prevent any of it from being swapped
29 | # out.
30 | JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}"
31 | JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}"
32 |
33 | # new generation
34 | if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then
35 | JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}"
36 | fi
37 |
38 | # max direct memory
39 | if [ "x$ES_DIRECT_SIZE" != "x" ]; then
40 | JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}"
41 | fi
42 |
43 | # set to headless, just in case
44 | JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true"
45 |
46 | # Force the JVM to use IPv4 stack
47 | if [ "x$ES_USE_IPV4" != "x" ]; then
48 | JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true"
49 | fi
50 |
51 | # Add gc options. ES_GC_OPTS is unsupported, for internal testing
52 | if [ "x$ES_GC_OPTS" = "x" ]; then
53 | ES_GC_OPTS="$ES_GC_OPTS -XX:+UseParNewGC"
54 | ES_GC_OPTS="$ES_GC_OPTS -XX:+UseConcMarkSweepGC"
55 | ES_GC_OPTS="$ES_GC_OPTS -XX:CMSInitiatingOccupancyFraction=75"
56 | ES_GC_OPTS="$ES_GC_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
57 | fi
58 |
59 | JAVA_OPTS="$JAVA_OPTS $ES_GC_OPTS"
60 |
61 | # GC logging options
62 | if [ -n "$ES_GC_LOG_FILE" ]; then
63 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails"
64 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps"
65 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDateStamps"
66 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram"
67 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution"
68 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime"
69 | JAVA_OPTS="$JAVA_OPTS -Xloggc:$ES_GC_LOG_FILE"
70 |
71 | # Ensure that the directory for the log file exists: the JVM will not create it.
72 | mkdir -p "`dirname \"$ES_GC_LOG_FILE\"`"
73 | fi
74 |
75 | # Causes the JVM to dump its heap on OutOfMemory.
76 | JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError"
77 | # The path to the heap dump location, note directory must exists and have enough
78 | # space for a full heap dump.
79 | #JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof"
80 |
81 | # Disables explicit GC
82 | JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC"
83 |
84 | # Ensure UTF-8 encoding by default (e.g. filenames)
85 | JAVA_OPTS="$JAVA_OPTS -Dfile.encoding=UTF-8"
86 |
87 | # Use our provided JNA always versus the system one
88 | JAVA_OPTS="$JAVA_OPTS -Djna.nosys=true"
89 |
--------------------------------------------------------------------------------
/ansible/gather/roles/keystone/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Tasks to set keystone facts
4 | #
5 |
6 | - name: Get keystone provider
7 | command: crudini --get /etc/keystone/keystone.conf token provider
8 | register: keystone_provider
9 |
10 | - name: Set keystone provider fact
11 | set_fact:
12 | openstack_keystone_token_provider: "{{ keystone_provider.stdout }}"
13 |
14 | - name: Determine if Keystone is deployed in eventlet
15 | shell: ps afx | grep "[Kk]eystone-all" -c
16 | register: keystone_in_eventlet
17 | changed_when: false
18 | ignore_errors: True
19 |
20 | - name: Set keystone_deployment variable to httpd
21 | set_fact: openstack_keystone_deployment='httpd'
22 | when: keystone_in_eventlet.stdout|int == 0
23 |
24 | - name: Set keystone_deployment variable to eventlet
25 | set_fact: openstack_keystone_deployment='eventlet'
26 | when: keystone_in_eventlet.stdout|int > 0
27 |
28 | - name: Determine number of public workers for eventlet
29 | shell: crudini --get /etc/keystone/keystone.conf eventlet_server public_workers
30 | register: keystone_public_workers
31 | when: keystone_in_eventlet.stdout|int > 0
32 |
33 | - name: Determine number of admin workers for eventlet
34 | shell: crudini --get /etc/keystone/keystone.conf eventlet_server admin_workers
35 | register: keystone_admin_workers
36 | when: keystone_in_eventlet.stdout|int > 0
37 |
38 | - name: Set keystone eventlet worker facts
39 | set_fact:
40 | openstack_keystone_admin_workers: "{{ keystone_admin_workers.stdout }}"
41 | openstack_keystone_public_workers: "{{ keystone_public_workers.stdout }}"
42 | when: keystone_in_eventlet.stdout|int > 0
43 |
44 | - name: Determine number of keystone admin processes for httpd
45 | shell: grep processes /etc/httpd/conf.d/10-keystone_wsgi_admin.conf | awk '{print $5}'| awk -F= '{print $2}'
46 | register: keystone_admin_worker_processes
47 | when: keystone_in_eventlet.stdout|int == 0
48 |
49 | - name: Determine number of keystone admin threads for httpd
50 | shell: grep threads /etc/httpd/conf.d/10-keystone_wsgi_admin.conf | awk '{print $6}'| awk -F= '{print $2}'
51 | register: keystone_admin_worker_threads
52 | when: keystone_in_eventlet.stdout|int == 0
53 |
54 | - name: Determine number of keystone main threads for httpd
55 | shell: grep threads /etc/httpd/conf.d/10-keystone_wsgi_main.conf | awk '{print $6}'| awk -F= '{print $2}'
56 | register: keystone_main_worker_threads
57 | when: keystone_in_eventlet.stdout|int == 0
58 |
59 | - name: Determine number of keystone main processes for httpd
60 | shell: grep threads /etc/httpd/conf.d/10-keystone_wsgi_main.conf | awk '{print $5}'| awk -F= '{print $2}'
61 | register: keystone_main_worker_processes
62 | when: keystone_in_eventlet.stdout|int == 0
63 |
64 | - name: Set keystone httpd worker facts
65 | set_fact:
66 | openstack_keystone_admin_workers_processes: "{{ keystone_admin_workers_processes.stdout }}"
67 | openstack_keystone_admin_workers_threads: "{{ keystone_admin_workers_threads.stdout }}"
68 | openstack_keystone_main_workers_processes: "{{ keystone_main_workers_processes.stdout }}"
69 | openstack_keystone_main_workers_threads: "{{ keystone_main_workers_threads.stdout }}"
70 | when: keystone_in_eventlet.stdout|int == 0
71 |
--------------------------------------------------------------------------------
/lib/Grafana.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import subprocess
3 |
4 |
5 | class Grafana:
6 |
7 | def __init__(self, config):
8 | self.logger = logging.getLogger('browbeat.Grafana')
9 | self.config = config
10 | self.cloud_name = self.config['grafana']['cloud_name']
11 | self.hosts_file = self.config['ansible']['hosts']
12 | self.grafana_ip = self.config['grafana']['grafana_ip']
13 | self.grafana_port = self.config['grafana']['grafana_port']
14 | self.playbook = self.config['ansible']['grafana_snapshot']
15 | self.grafana_url = {}
16 |
17 | def extra_vars(self, from_ts, to_ts, result_dir, test_name):
18 | extra_vars = 'grafana_ip={} '.format(self.config['grafana']['grafana_ip'])
19 | extra_vars += 'grafana_port={} '.format(self.config['grafana']['grafana_port'])
20 | extra_vars += 'from={} '.format(from_ts)
21 | extra_vars += 'to={} '.format(to_ts)
22 | extra_vars += 'results_dir={}/{} '.format(result_dir, test_name)
23 | extra_vars += 'var_cloud={} '.format(self.cloud_name)
24 | if self.config['grafana']['snapshot']['snapshot_compute']:
25 | extra_vars += 'snapshot_compute=true '
26 | return extra_vars
27 |
28 | def grafana_urls(self):
29 | return self.grafana_url
30 |
31 | def create_grafana_urls(self, time):
32 | if 'grafana' in self.config and self.config['grafana']['enabled']:
33 | from_ts = time['from_ts']
34 | to_ts = time['to_ts']
35 | url = 'http://{}:{}/dashboard/db/'.format(
36 | self.grafana_ip, self.grafana_port)
37 | for dashboard in self.config['grafana']['dashboards']:
38 | self.grafana_url[dashboard]='{}{}?from={}&to={}&var-Cloud={}'.format(
39 | url, dashboard, from_ts, to_ts, self.cloud_name)
40 |
41 | def print_dashboard_url(self,test_name):
42 | for dashboard in self.grafana_url:
43 | self.logger.info('{} - Grafana Dashboard {} URL: {}'.format(test_name, dashboard,
44 | self.grafana_url[dashboard]))
45 |
46 | def log_snapshot_playbook_cmd(self, from_ts, to_ts, result_dir, test_name):
47 | if 'grafana' in self.config and self.config['grafana']['enabled']:
48 | extra_vars = self.extra_vars(
49 | from_ts, to_ts, result_dir, test_name)
50 | snapshot_cmd = 'ansible-playbook -i {} {} -e "{}"'.format(
51 | self.hosts_file, self.playbook, extra_vars)
52 | self.logger.info('Snapshot command: {}'.format(snapshot_cmd))
53 |
54 | def run_playbook(self, from_ts, to_ts, result_dir, test_name):
55 | if 'grafana' in self.config and self.config['grafana']['enabled']:
56 | if self.config['grafana']['snapshot']['enabled']:
57 | extra_vars = self.extra_vars(
58 | from_ts, to_ts, result_dir, test_name)
59 | subprocess_cmd = ['ansible-playbook', '-i', self.hosts_file, self.playbook, '-e',
60 | '{}'.format(extra_vars)]
61 | snapshot_log = open('{}/snapshot.log'.format(result_dir), 'a+')
62 | self.logger.info('Running ansible to create snapshots for: {}'.format(test_name))
63 | subprocess.Popen(subprocess_cmd, stdout=snapshot_log, stderr=subprocess.STDOUT)
64 |
--------------------------------------------------------------------------------
/rally/rally-plugins/netcreate-boot-ping/netcreate_nova-boot-fip-ping.py:
--------------------------------------------------------------------------------
1 | from rally.task import atomic
2 | from rally.task import scenario
3 | from rally.plugins.openstack.scenarios.nova import utils as nova_utils
4 | from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
5 | from rally.plugins.openstack.scenarios.vm import utils as vm_utils
6 | from rally.task import types
7 | from rally.task import utils as task_utils
8 | from rally.task import validation
9 |
10 | class NeutronBootFipPingPlugin(neutron_utils.NeutronScenario,
11 | vm_utils.VMScenario,
12 | scenario.Scenario):
13 | #
14 | # Create network
15 | # Create subnet
16 | # Attach to router
17 | # Attach guest to new network
18 | # List
19 | # Ping
20 | # Cleanup
21 | #
22 | @types.set(image=types.ImageResourceType,
23 | flavor=types.FlavorResourceType)
24 | @validation.image_valid_on_flavor("flavor", "image")
25 | @validation.required_openstack(users=True)
26 | @scenario.configure(context={"cleanup": ["nova", "neutron"],
27 | "keypair": {}, "allow_ssh": {}})
28 | def create_network_nova_boot_ping(self,image,flavor,ext_net,floating=False,router=None,
29 | network_create_args=None,subnet_create_args=None,
30 | **kwargs):
31 | if router == None:
32 | router = self._create_router({},ext_net)
33 |
34 | network = self._create_network(network_create_args or {})
35 | subnet = self._create_subnet(network, subnet_create_args or {})
36 | self._add_interface_router(subnet['subnet'],router['router'])
37 | kwargs["nics"] = [{ 'net-id': network['network']['id']}]
38 | _address = None
39 | if floating :
40 | _guest = self._boot_server_with_fip(image, flavor,True,ext_net, **kwargs)
41 | _address = _guest[1]['ip']
42 | else:
43 | self._boot_server(image, flavor,**kwargs)
44 | _address = ""
45 |
46 | if _address:
47 | self._wait_for_ping(_address)
48 |
49 |
50 |
51 |
52 | @atomic.action_timer("neutronPlugin.create_router")
53 | def _create_router(self, router_create_args, external_gw=False):
54 | """Create neutron router.
55 |
56 | :param router_create_args: POST /v2.0/routers request options
57 | :returns: neutron router dict
58 | """
59 | router_create_args["name"] = self.generate_random_name()
60 |
61 | if 'id' in external_gw.keys():
62 | for network in self._list_networks():
63 | if network.get("router:external"):
64 | if network.get("id") == external_gw["id"]:
65 | external_network = network
66 | gw_info = {"network_id": external_network["id"],
67 | "enable_snat": True}
68 | router_create_args.setdefault("external_gateway_info",
69 | gw_info)
70 |
71 | else:
72 | if external_gw:
73 | for network in self._list_networks():
74 | if network.get("router:external"):
75 | external_network = network
76 | gw_info = {"network_id": external_network["id"],
77 | "enable_snat": True}
78 | router_create_args.setdefault("external_gateway_info",
79 | gw_info)
80 |
81 | return self.clients("neutron").create_router(
82 | {"router": router_create_args})
83 |
84 |
--------------------------------------------------------------------------------
/ansible/install/roles/graphite/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Install/run graphite-web for browbeat
4 | #
5 |
6 | - name: Import EPEL GPG Key
7 | rpm_key: key=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
8 | state=present
9 |
10 | - name: Check for EPEL repo
11 | yum: name=https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
12 | state=present
13 |
14 | - name: Install graphite rpms
15 | yum: name={{ item }} state=present
16 | become: true
17 | with_items:
18 | - graphite-web
19 | - python-carbon
20 | - expect
21 |
22 | # moved to grafana specific playbook
23 | # - https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm
24 |
25 | - name: Check for graphite.db sqlite
26 | shell: ls /var/lib/graphite-web/graphite.db
27 | ignore_errors: true
28 | register: graphite_db_installed
29 |
30 | - name: Copy setup-graphite-db.exp
31 | copy:
32 | src=setup-graphite-db.exp
33 | dest=/root/setup-graphite-db.exp
34 | owner=root
35 | group=root
36 | mode=0755
37 | become: true
38 |
39 | - name: Create initial graphite db
40 | shell: /root/setup-graphite-db.exp {{ graphite_username }} {{ graphite_password }} && chown apache:apache /var/lib/graphite-web/graphite.db
41 | become: true
42 | when: graphite_db_installed.rc != 0
43 | register: apache_needs_restart
44 |
45 | - name: Setup httpd graphite-web config
46 | template:
47 | src=graphite-web.conf.j2
48 | dest=/etc/httpd/conf.d/graphite-web.conf
49 | owner=root
50 | group=root
51 | mode=0644
52 | become: true
53 | register: apache_needs_restart
54 |
55 |
56 | # Start graphite-web service
57 |
58 | - name: Setup httpd service
59 | service: name=httpd state=started enabled=true
60 | become: true
61 |
62 | # disable firewalld (might need to create specific firewall rules or leave it to admin to do via iptables)
63 |
64 | - name: disable firewalld
65 | service: name=firewalld state=stopped enabled=false
66 | become: true
67 | ignore_errors: true
68 |
69 | # remove silly welcome from apache (if it exists)
70 | - name: Remove httpd welcome config
71 | become: true
72 | file: path=/etc/httpd/conf.d/welcome.conf state=absent
73 | register: apache_needs_restart
74 |
75 | - name: Bounce Apache
76 | service: name=httpd state=restarted enabled=true
77 | become: true
78 | when: apache_needs_restart.changed
79 |
80 | #
81 | # setup the python-carbon service
82 | #
83 |
84 | - name: Setup carbon-cache service
85 | service: name=carbon-cache state=started enabled=true
86 | become: true
87 |
88 | - name: copy carbon storage schema config
89 | copy:
90 | src=storage-schemas.conf
91 | dest=/etc/carbon/storage-schemas.conf
92 | owner=root
93 | group=root
94 | mode=0644
95 | become: true
96 | register: carbon_cache_needs_restart
97 |
98 | - name: copy carbon storage aggregation config
99 | copy:
100 | src=storage-aggregation.conf
101 | dest=/etc/carbon/storage-aggregation.conf
102 | owner=root
103 | group=root
104 | mode=0644
105 | become: true
106 | register: carbon_cache_needs_restart
107 |
108 | - name: copy carbon config
109 | copy:
110 | src=carbon.conf
111 | dest=/etc/carbon/carbon.conf
112 | owner=root
113 | group=root
114 | mode=0644
115 | become: true
116 | register: carbon_cache_needs_restart
117 |
118 | - name: bounce carbon cache
119 | service: name=carbon-cache state=restarted enabled=true
120 | become: true
121 | when: carbon_cache_needs_restart.changed
122 |
123 | - name: Disable EPEL Repo
124 | ini_file: dest=/etc/yum.repos.d/epel.repo
125 | section=epel
126 | option=enabled
127 | value=0
128 |
--------------------------------------------------------------------------------
/ansible/browbeat/roles/keystone-token/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Keystone tasks to change token type
4 | #
5 |
6 | - name: Determine if keystone is deployed in eventlet
7 | shell: ps afx | grep "[Kk]eystone-all" -c
8 | register: deployed
9 | when: keystone_deployment is undefined
10 | ignore_errors: true
11 | changed_when: false
12 |
13 | - name: Set keystone_deployment variable/fact to httpd
14 | set_fact: keystone_deployment='httpd'
15 | when: keystone_deployment is undefined and deployed.stdout|int == 0
16 |
17 | - name: Set keystone_deployment variable/fact to eventlet
18 | set_fact: keystone_deployment='eventlet'
19 | when: keystone_deployment is undefined
20 |
21 | #
22 | # Get Token type
23 | #
24 |
25 | - name: Check Keystone Token Provider
26 | command: crudini --get /etc/keystone/keystone.conf token provider
27 | register: keystone_token_provider
28 | changed_when: false
29 | ignore_errors: True
30 |
31 | - name: Set current_token_provider variable/fact to uuid
32 | set_fact: current_token_provider='uuid'
33 | when: "'uuid' in '{{ keystone_token_provider.stdout }}'"
34 |
35 | - name: Set current_token_provider variable/fact to fernet
36 | set_fact: current_token_provider='fernet'
37 | when: "'fernet' in '{{ keystone_token_provider.stdout }}'"
38 |
39 | - name: Set current_token_provider variable/fact to pkiz
40 | set_fact: current_token_provider='pkiz'
41 | when: "'pkiz' in '{{ keystone_token_provider.stdout }}'"
42 |
43 | #
44 | # Tasks to change token provider if necessary:
45 | #
46 |
47 | - name: Change token provider
48 | command: crudini --set /etc/keystone/keystone.conf token provider "keystone.token.providers.{{ token_provider }}.Provider"
49 | when: "'{{ current_token_provider }}' != '{{ token_provider }}'"
50 | notify:
51 | - pacemaker default unmanaged
52 | - stop keystone service
53 | - restart httpd service
54 | - restart keystone service
55 | - pacemaker default managed
56 | - pacemaker cleanup keystone
57 |
58 | #
59 | # fernet token setup:
60 | #
61 |
62 | - name: Create fernet keys directory
63 | file:
64 | path=/etc/keystone/fernet-keys
65 | state=directory
66 | owner=keystone
67 | group=keystone
68 | mode=0700
69 | when: "'{{ token_provider }}' == 'fernet'"
70 |
71 | - name: Setup fernet keys
72 | command: keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
73 | when: ("'{{ token_provider }}' == 'fernet'") and (inventory_hostname == groups['controller'][0])
74 |
75 | - name: Get fernet keys
76 | fetch: src=/etc/keystone/fernet-keys/{{ item }} dest=roles/keystone-token/files/{{ item }} flat=yes
77 | with_items:
78 | - 0
79 | - 1
80 | when: ('{{ token_provider }}' == 'fernet') and (inventory_hostname == groups['controller'][0])
81 | changed_when: false
82 |
83 | - name: Copy fernet keys
84 | copy: src={{ item }} dest=/etc/yum.repos.d/
85 | with_items:
86 | - 0
87 | - 1
88 | when: ("'{{ token_provider }}' == 'fernet'") and (inventory_hostname != groups['controller'][0])
89 |
90 | - name: Copy keystone type enforcement file
91 | copy:
92 | src: my-keystone.te
93 | dest: /root/my-keystone.te
94 | when: "'{{ token_provider }}' == 'fernet'"
95 |
96 | - name: Create keystone.mod file
97 | command: checkmodule -M -m -o /root/my-keystone.mod /root/my-keystone.te
98 | when: "'{{ token_provider }}' == 'fernet'"
99 |
100 | - name: Create keystone.pp file
101 | command: semodule_package -o /root/my-keystone.pp -m /root/my-keystone.mod
102 | when: "'{{ token_provider }}' == 'fernet'"
103 |
104 | - name: Install keystone selinux policy
105 | shell: semodule -i /root/my-keystone.pp
106 | when: "'{{ token_provider }}' == 'fernet'"
107 |
--------------------------------------------------------------------------------