├── tests └── __init__.py ├── browbeat ├── __init__.py ├── workloads │ └── __init__.py └── schema │ ├── shaker.yml │ └── rally.yml ├── ansible ├── kubeconfig_paths ├── install │ ├── roles │ │ ├── rally │ │ │ ├── files │ │ │ │ ├── browbeat-rally │ │ │ │ │ ├── browbeat_rally │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── db │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ └── models.py │ │ │ │ │ └── setup.py │ │ │ │ └── create_lock_table.py │ │ │ └── templates │ │ │ │ └── rally.conf.j2 │ │ ├── statsd-ironic │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── logstash │ │ │ ├── files │ │ │ │ ├── 30-lumberjack-output.conf │ │ │ │ ├── 10-syslog.conf │ │ │ │ ├── logstash.repo │ │ │ │ ├── 01-lumberjack-input.conf │ │ │ │ ├── 30-elasticsearch-output.conf │ │ │ │ └── 10-syslog-filter.conf │ │ │ └── templates │ │ │ │ ├── 02-beats-input.conf.j2 │ │ │ │ └── openssl_extras.cnf.j2 │ │ ├── firewall │ │ │ └── handlers │ │ │ │ └── main.yml │ │ ├── grafana-prometheus-dashboards │ │ │ ├── files │ │ │ │ └── README.rst │ │ │ └── templates │ │ │ │ └── partials │ │ │ │ └── description.yaml │ │ ├── index-ocp-data │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── check_oc.yml │ │ ├── collectd-openstack │ │ │ ├── files │ │ │ │ ├── custom-collectd.pp │ │ │ │ ├── ovs_flows.sh │ │ │ │ └── ovn_monitoring.sh │ │ │ └── templates │ │ │ │ └── 00-browbeat_mod_status.conf.j2 │ │ ├── collectd-rhoso │ │ │ ├── templates │ │ │ │ ├── logs.yml.j2 │ │ │ │ └── collectd_svc_template.yaml.j2 │ │ │ ├── files │ │ │ │ ├── entrypoint.sh │ │ │ │ ├── prometheus_cluster_rolebinding.yaml │ │ │ │ ├── collectd_svc_monitor.yaml │ │ │ │ └── prometheus_cluster_role.yaml │ │ │ ├── tasks │ │ │ │ └── gen_configs.yml │ │ │ └── vars │ │ │ │ └── constants.yml │ │ ├── statsd-install │ │ │ ├── templates │ │ │ │ ├── statsd_config.js.j2 │ │ │ │ └── statsd.service.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── browbeat-results │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── 00-browbeat.conf.j2 │ │ ├── repo │ │ │ ├── templates │ │ │ │ └── browbeat.repo.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── grafana │ │ │ ├── files │ │ │ │ └── grafana.repo │ │ │ └── templates │ │ │ │ └── data_source.json.j2 │ │ ├── fluentd │ │ │ └── files │ │ │ │ └── fluentd.repo │ │ ├── epel │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── stockpile │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── workloads │ │ │ └── templates │ │ │ │ ├── custom-cirros.file │ │ │ │ ├── octavia-userdata.file │ │ │ │ ├── abench-user.file │ │ │ │ ├── stress-ng-user.file │ │ │ │ └── linpack-user.file │ │ ├── curator │ │ │ ├── files │ │ │ │ └── curator.repo │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── elasticsearch │ │ │ └── files │ │ │ │ └── elasticsearch.repo │ │ ├── collectd-generic │ │ │ └── vars │ │ │ │ └── main.yml │ │ ├── grafana-dashboards │ │ │ └── templates │ │ │ │ ├── partials │ │ │ │ ├── description.yaml │ │ │ │ ├── ovs_flows.yaml │ │ │ │ ├── gnocchi_backlog.yaml.j2 │ │ │ │ └── neutron_resources.yaml │ │ │ │ ├── cloud_gnocchi_status.yaml.j2 │ │ │ │ └── iostat.yaml.j2 │ │ ├── graphite │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── files │ │ │ │ └── storage-schemas.conf │ │ ├── flavors │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── vars │ │ │ │ └── main.yml │ │ ├── rsyslog-templates │ │ │ ├── templates │ │ │ │ ├── rsyslog.conf.j2 │ │ │ │ ├── 05-outputs.conf.j2 │ │ │ │ └── 01-modules.conf.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── kibana-visualization │ │ │ └── templates │ │ │ │ ├── markdown.json.j2 │ │ │ │ └── dashboard.json.j2 │ │ ├── containers │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── common │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── e2e-benchmarking │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── no-sshd-dns │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── collectd │ │ │ └── templates │ │ │ └── 00-browbeat_mod_status.conf.j2 │ ├── index-ocp-data.yml │ ├── es-template.yml │ ├── ironic-statsd.yml │ ├── repos.yml │ ├── grafana.yml │ ├── graphite.yml │ ├── statsd.yml │ ├── check-collectd-config.yml │ ├── pre-collectd.yml │ ├── rsyslog-aggregator.yml │ ├── collectd-rhoso.yaml │ ├── browbeat.yml │ ├── stop-collectd-rhoso.yml │ ├── cleanup_sqlalchemy_collectd.yml │ ├── rsyslog-logging.yml │ ├── grafana-prometheus-dashboards.yml │ ├── collectd.yml │ ├── browbeat_rhoso.yaml │ ├── stop-collectd.yml │ ├── start-collectd.yml │ ├── browbeat_rhosp.yaml │ └── shaker_build.yml ├── oooq │ ├── roles │ │ ├── collectd-undercloud │ │ │ ├── templates │ │ │ │ ├── hosts.j2 │ │ │ │ └── ssh-config.j2 │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── collectd │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── pre-install-setup │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── browbeat-classify │ │ │ └── files │ │ │ │ └── uuid_extract.sh │ │ ├── template-configs │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── vars │ │ │ │ └── main.yml │ │ ├── grafana-dashboard-setup │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── oooq-metadata │ │ │ └── vars │ │ │ │ └── main.yml │ │ ├── browbeat-run │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── gather-metadata │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── ci-network │ │ │ └── defaults │ │ │ │ └── main.yml │ │ └── bug-check │ │ │ └── tasks │ │ │ └── main.yml │ ├── configure-browbeat.yml │ ├── disable-ssh-dns.yml │ ├── overcloud-metrics.yml │ ├── undercloud-metrics.yml │ ├── install-browbeat.yml │ ├── quickstart-browbeat.yml │ ├── browbeat-minimal.yml │ ├── baremetal-virt-undercloud-int-browbeat.yml │ └── baremetal-virt-undercloud-tripleo-browbeat.yml ├── browbeat │ ├── group_vars │ │ └── all.yml │ ├── roles │ │ ├── ceilometer-polling │ │ │ └── templates │ │ │ │ └── polling.yaml.j2 │ │ ├── neutron-ovsdb │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── keystone-workers │ │ │ └── templates │ │ │ │ └── keystone_ports.conf.j2 │ │ ├── apache-config │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── prefork.conf.j2 │ │ ├── run-task-at │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── nova-config │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── keystone-token │ │ │ └── files │ │ │ │ └── my-keystone.te │ │ ├── cinder-workers │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── handlers │ │ │ │ └── main.yml │ │ ├── nova-db │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── neutron-l3 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── neutron-firewall │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── nova-workers │ │ │ └── tasks │ │ │ └── main.yml │ ├── adjustment-db.yml │ ├── cleanlogs.yml │ ├── adjustment-l3.yml │ ├── ntp-sync.yml │ ├── adjustment-firewall_driver.yml │ ├── clean-gnocchi-resources.yml │ ├── adjustment-keystone-token.yml │ ├── odl-open-transactions.yml │ ├── install-at.yml │ ├── adjustment-apache.yml │ └── adjustment-haproxy.yml ├── tune │ ├── group_vars │ │ ├── compute │ │ └── controller │ ├── README.rst │ ├── tune.yml │ └── roles │ │ ├── tuned │ │ └── tasks │ │ │ └── main.yml │ │ └── udev_dhcp_all_interfaces │ │ └── tasks │ │ └── main.yml ├── ansible.cfg ├── common_logging │ ├── roles │ │ ├── browbeat_logging │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── filebeat_setup │ │ │ └── tasks │ │ │ └── main.yml │ ├── browbeat_logging.yml │ └── install_logging.yml ├── README.testing.rst ├── gather │ └── group_vars │ │ └── all.yml ├── logs │ └── roles │ │ ├── fetch-logs │ │ └── tasks │ │ │ └── main.yml │ │ ├── openvswitch │ │ └── tasks │ │ │ └── main.yml │ │ └── httpd │ │ └── tasks │ │ └── main.yml └── install_e2e_benchmarking.sh ├── rally ├── neutron │ └── README.rst ├── rally-plugins │ ├── workloads │ │ └── pbench-uperf.png │ ├── netcreate-boot │ │ └── README.rst │ ├── pbench-fio │ │ ├── templates │ │ │ ├── read.job.j2 │ │ │ └── write.job.j2 │ │ └── ansible │ │ │ ├── pbench_agent_tool_meister_firewall.yml │ │ │ └── pbench_agent_install.yaml │ ├── gnocchi │ │ ├── gnocchi-metric-list.yml │ │ ├── gnocchi-resource-list.yml │ │ ├── gnocchi-capabilities-list.yml │ │ ├── gnocchi-resource-type-list.yml │ │ ├── gnocchi-archive-policy-list.yml │ │ ├── gnocchi-create-resource-type.yml │ │ ├── gnocchi-create-archive-policy.yml │ │ ├── gnocchi-archive-policy-rule-list.yml │ │ ├── gnocchi-create-archive-policy-rule.yml │ │ ├── gnocchi-create-delete-archive-policy.yml │ │ ├── gnocchi-create-delete-resource-type.yml │ │ ├── gnocchi-create-delete-archive-policy-rule.yml │ │ ├── gnocchi-status-get.yml │ │ ├── gnocchi-create-resource.yml │ │ ├── gnocchi-create-delete-resource.yml │ │ ├── gnocchi-create-metric.yml │ │ ├── gnocchi-create-delete-metric.yml │ │ ├── gnocchi-metric-aggregation.yml │ │ └── gnocchi-metric-get-measures.yml │ └── neutron │ │ ├── securitygroup_port.yml │ │ └── router_subnet_create_delete.yml ├── nova │ └── README.rst ├── heat │ ├── templates │ │ ├── random-strings.yaml.template │ │ ├── updated-random-strings-delete.yaml.template │ │ ├── updated-random-strings-add.yaml.template │ │ └── updated-random-strings-replace.yaml.template │ └── create-update-delete-stack.yaml ├── CeilometerAlarms │ └── list_alarms-cc.yml ├── barbican │ ├── create-and-list-secret.yaml │ ├── create-and-delete-secret.yaml │ ├── create-and-delete-asymmetric.yaml │ └── create-and-delete-certificate.yaml ├── CeilometerEvents │ ├── create_user_and_get_event-cc.yml │ ├── create_user_and_list_events-cc.yml │ └── create_user_and_list_event_types-cc.yml ├── CeilometerTraits │ ├── create_user_and_list_traits-cc.yml │ └── create_user_and_list_trait_descriptions-cc.yml ├── authenticate │ ├── validate_octavia-cc.yml │ ├── keystone-cc.yml │ ├── validate_heat-cc.yml │ ├── validate_nova-cc.yml │ ├── validate_cinder-cc.yml │ ├── validate_glance-cc.yml │ ├── validate_monasca-cc.yml │ ├── validate_neutron-cc.yml │ └── validate_ceilometer-cc.yml ├── manila │ ├── create-share-network-and-delete.yaml │ ├── create-share-network-and-list.yaml │ ├── create-share-and-extend.yaml │ └── create-share-and-shrink.yaml ├── keystonebasic │ ├── create_tenant-cc.yml │ ├── create_user-cc.yml │ ├── get_entities-cc.yml │ ├── create_delete_user-cc.yml │ ├── create_and_get_role-cc.yml │ ├── create_and_delete_role-cc.yml │ ├── create_and_list_tenants-cc.yml │ ├── create_and_list_users-cc.yml │ ├── add_and_remove_user_role-cc.yml │ ├── create_and_delete_service-cc.yml │ ├── create_and_list_services-cc.yml │ ├── create_user_update_password-cc.yml │ ├── create_add_and_list_user_roles-cc.yml │ ├── create_and_list_ec2credentials-cc.yml │ ├── create_and_delete_ec2credentials-cc.yml │ ├── create_update_and_delete_tenant-cc.yml │ ├── authenticate_user_and_validate_token-cc.yml │ ├── create_add_list_roles-cc.yml │ ├── create_and_update_user-cc.yml │ ├── create_tenant_with_users-cc.yml │ └── create_user_set_enabled_and_delete-cc.yml ├── CeilometerResource │ └── get_tenant_resources-cc.yml ├── glance │ └── list-images-cc.yml ├── CeilometerStats │ └── create_meter_and_get_stats-cc.yml ├── swift │ ├── create-container-and-object-then-delete-all.yaml │ ├── create-container-and-object-then-list-objects.yaml │ └── list-objects-in-containers.yaml ├── cinder │ └── cinder-create-and-list-volume.yml └── octavia │ ├── octavia-create-list-loadbalancers.yml │ ├── octavia-create-show-loadbalancers.yml │ ├── octavia-create-delete-loadbalancers.yml │ ├── octavia-create-update-loadbalancers.yml │ └── octavia-create-stats-show-loadbalancers.yml ├── .gitreview ├── log └── .gitignore ├── requirements.yml ├── results └── .gitignore ├── visualization ├── Keystone │ └── README.rst ├── Neutron │ ├── README.rst │ └── visualization │ │ ├── NeutronResults.json │ │ ├── NeutronErrors.json │ │ └── NeutronNumberOfAPIWorkers.json ├── OpenStack-Workers │ ├── README.rst │ └── dashboard │ │ └── OpenStack-Worker.json ├── Performance-Dashboard │ ├── README.rst │ └── visualization │ │ ├── Action.json │ │ ├── ResultsPerCloudName.json │ │ ├── VersionDataTable.json │ │ ├── ErrorCountPerUUID.json │ │ ├── ResultPerUUID.json │ │ ├── Times.json │ │ ├── Concurrency.json │ │ └── RallyScenario.json ├── Shaker │ ├── dashboard │ │ └── .gitignore │ └── visualization │ │ └── .gitignore ├── README.rst └── Network-Performance │ └── visualization │ ├── Browbeat-UUID.json │ ├── Browbeat-Shaker-Cloud.json │ ├── Browbeat-Shaker-UUID.json │ ├── Browbeat-Shaker-Test.json │ ├── Browbeat-Shaker-Executor.json │ ├── Browbeat-Shaker-Scenario.json │ ├── Browbeat-Shaker-Result.json │ ├── Browbeat-Shaker-Density.json │ ├── Browbeat-Shaker-Compute.json │ ├── Browbeat-Shaker-Concurrency.json │ ├── Browbeat-Shaker-Placement.json │ └── Browbeat-Shaker-Distribution.json ├── metadata └── .gitignore ├── doc └── source │ ├── images │ ├── Duplicate_Atomic_Actions_Duration_Line_Chart.png │ ├── Resource_Atomic_Actions_Duration_Line_Chart.png │ └── Per_Iteration_Duration_Stacked_Area_Chart │ │ ├── Iteration1.png │ │ ├── Iteration2.png │ │ ├── Iteration3.png │ │ └── Iteration4.png │ ├── introduction.rst │ └── index.rst ├── elastic ├── v7ilm_policy │ └── template-browbeat-rally.json.j2 ├── templates │ └── README.rst ├── v5templates │ └── README.rst └── README.rst ├── ci-scripts ├── config │ └── tripleo │ │ └── install-and-check │ │ └── all.yml ├── tripleo │ └── config │ │ └── nodes │ │ └── 1ctlr_1comp.yml └── molecule │ └── test-molecule.sh ├── bindep.txt ├── browbeat-containers ├── collectd-openstack │ └── files │ │ ├── ovs_flows.sh │ │ └── ovn_monitoring.sh ├── collectd-guest │ └── Dockerfile ├── collectd-baremetal │ └── Dockerfile └── collectd-rhoso │ └── files │ └── ovn_monitoring.sh ├── tools-requirements.txt ├── .pre-commit-config.yaml ├── setup.py ├── ocp_on_osp ├── tasks │ └── create_flavors.yml └── vars │ └── flavors.yaml ├── requirements.txt └── utils └── cleanup_rally_resources.sh /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /browbeat/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /browbeat/workloads/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/kubeconfig_paths: -------------------------------------------------------------------------------- 1 | /home/stack/.kube/config 2 | -------------------------------------------------------------------------------- /rally/neutron/README.rst: -------------------------------------------------------------------------------- 1 | Neutron scenarios 2 | ================= 3 | -------------------------------------------------------------------------------- /ansible/install/roles/rally/files/browbeat-rally/browbeat_rally/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/install/roles/rally/files/browbeat-rally/browbeat_rally/db/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.opendev.org 3 | port=29418 4 | project=x/browbeat.git 5 | -------------------------------------------------------------------------------- /ansible/oooq/roles/collectd-undercloud/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | [undercloud] 2 | undercloud 3 | -------------------------------------------------------------------------------- /log/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore 5 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: community.general 4 | - name: ansible.posix 5 | -------------------------------------------------------------------------------- /results/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore 5 | -------------------------------------------------------------------------------- /ansible/oooq/roles/collectd/vars/main.yml: -------------------------------------------------------------------------------- 1 | graphite_host_template: "1.2.3.4.5" 2 | graphite_prefix_template: "CI" 3 | -------------------------------------------------------------------------------- /ansible/browbeat/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # If Pacemaker is managing services or not 3 | pacemaker_controlled: false 4 | -------------------------------------------------------------------------------- /ansible/tune/group_vars/compute: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Tuning vars for computes 4 | # 5 | 6 | tuned_profile: virtual-host 7 | -------------------------------------------------------------------------------- /ansible/install/index-ocp-data.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: yes 4 | roles: 5 | - index-ocp-data 6 | -------------------------------------------------------------------------------- /ansible/install/roles/statsd-ironic/defaults/main.yml: -------------------------------------------------------------------------------- 1 | statsd_host: localhost 2 | statsd_port: 8125 3 | statsd_enabled: false 4 | -------------------------------------------------------------------------------- /ansible/oooq/roles/collectd-undercloud/vars/main.yml: -------------------------------------------------------------------------------- 1 | graphite_host_template: "1.2.3.4.5" 2 | graphite_prefix_template: "CI" 3 | -------------------------------------------------------------------------------- /visualization/Keystone/README.rst: -------------------------------------------------------------------------------- 1 | # OpenStack Browbeat Keystone Kibana page 2 | ------------------------------------------ 3 | -------------------------------------------------------------------------------- /ansible/install/roles/logstash/files/30-lumberjack-output.conf: -------------------------------------------------------------------------------- 1 | output { 2 | elasticsearch { hosts => ["localhost:9200"] } 3 | } 4 | -------------------------------------------------------------------------------- /visualization/Neutron/README.rst: -------------------------------------------------------------------------------- 1 | # OpenStack Browbeat Neutron Kibana page 2 | ------------------------------------------ 3 | 4 | -------------------------------------------------------------------------------- /visualization/OpenStack-Workers/README.rst: -------------------------------------------------------------------------------- 1 | # OpenStack Browbeat Worker Kibana page 2 | ---------------------------------------- 3 | -------------------------------------------------------------------------------- /ansible/tune/group_vars/controller: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Tuning vars for controllers 4 | # 5 | 6 | tuned_profile: throughput-performance 7 | -------------------------------------------------------------------------------- /ansible/oooq/roles/pre-install-setup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | browbeat_dependencies: 2 | - rsync 3 | - ansible 4 | -------------------------------------------------------------------------------- /ansible/oooq/configure-browbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Browbeat 3 | hosts: Undercloud 4 | roles: 5 | - browbeat/pre-install-setup 6 | -------------------------------------------------------------------------------- /ansible/oooq/roles/collectd-undercloud/templates/ssh-config.j2: -------------------------------------------------------------------------------- 1 | Host undercloud 2 | HostName localhost 3 | Port 22 4 | User stack 5 | -------------------------------------------------------------------------------- /rally/rally-plugins/workloads/pbench-uperf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/rally/rally-plugins/workloads/pbench-uperf.png -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/README.rst: -------------------------------------------------------------------------------- 1 | # OpenStack Browbeat Performance-CI Kibana page 2 | ------------------------------------------------- 3 | -------------------------------------------------------------------------------- /ansible/install/es-template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | remote_user: "{{ local_remote_user }}" 5 | roles: 6 | - { role: es-template } 7 | -------------------------------------------------------------------------------- /ansible/install/roles/firewall/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart iptables 2 | service: 3 | name: iptables 4 | state: restarted 5 | become: true 6 | 7 | -------------------------------------------------------------------------------- /visualization/Shaker/dashboard/.gitignore: -------------------------------------------------------------------------------- 1 | # This .gitignore is included to ensure this directory exists 2 | * 3 | # Ignores everything, except this file 4 | !.gitignore 5 | -------------------------------------------------------------------------------- /ansible/oooq/disable-ssh-dns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Disable sshd dns 3 | hosts: overcloud 4 | vars: 5 | disable_ssh_dns: true 6 | roles: 7 | - browbeat/no-sshd-dns 8 | -------------------------------------------------------------------------------- /ansible/oooq/roles/browbeat-classify/files/uuid_extract.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | grep -o 'UUID: [^,]*' $1 | tail -1 | grep -o ' [^,]*' | tr -d ' ' | sed 's/\x27/ /g' 4 | 5 | 6 | -------------------------------------------------------------------------------- /metadata/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory (placeholder directory) 2 | # version.json is provided as an example 3 | * 4 | # Except this file 5 | !.gitignore 6 | -------------------------------------------------------------------------------- /visualization/Shaker/visualization/.gitignore: -------------------------------------------------------------------------------- 1 | # This .gitignore is included to ensure this directory exists 2 | * 3 | # Ignores everything, except this file 4 | !.gitignore 5 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-prometheus-dashboards/files/README.rst: -------------------------------------------------------------------------------- 1 | This directory stores the descriptions of Grafana dashboards in YAML format and is removed it once uploaded. 2 | -------------------------------------------------------------------------------- /ansible/install/roles/index-ocp-data/vars/main.yml: -------------------------------------------------------------------------------- 1 | required_vars: 2 | - es_server 3 | - es_index 4 | - uuid 5 | - job_name 6 | - metrics 7 | - start_time 8 | - end_time 9 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-openstack/files/custom-collectd.pp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/ansible/install/roles/collectd-openstack/files/custom-collectd.pp -------------------------------------------------------------------------------- /doc/source/images/Duplicate_Atomic_Actions_Duration_Line_Chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/doc/source/images/Duplicate_Atomic_Actions_Duration_Line_Chart.png -------------------------------------------------------------------------------- /doc/source/images/Resource_Atomic_Actions_Duration_Line_Chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/doc/source/images/Resource_Atomic_Actions_Duration_Line_Chart.png -------------------------------------------------------------------------------- /ansible/browbeat/roles/ceilometer-polling/templates/polling.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - name: some_pollsters 4 | interval: {{polling_interval}} 5 | meters: 6 | - "*" 7 | -------------------------------------------------------------------------------- /doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration1.png -------------------------------------------------------------------------------- /doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration2.png -------------------------------------------------------------------------------- /doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration3.png -------------------------------------------------------------------------------- /doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloud-bulldozer/browbeat/HEAD/doc/source/images/Per_Iteration_Duration_Stacked_Area_Chart/Iteration4.png -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/templates/logs.yml.j2: -------------------------------------------------------------------------------- 1 | container_logs: 2 | {% for instance, filename in filenames.items() %} 3 | {{ instance }}: "/var/log/containers/{{ filename }}.log" 4 | {% endfor %} 5 | -------------------------------------------------------------------------------- /ansible/install/roles/statsd-install/templates/statsd_config.js.j2: -------------------------------------------------------------------------------- 1 | { 2 | graphitePort: 2003 3 | , graphiteHost: "{{graphite_host}}" 4 | , port: {{statsd_port}} 5 | , backends: [ "./backends/graphite" ] 6 | } 7 | -------------------------------------------------------------------------------- /ansible/oooq/roles/template-configs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Template Browbeat configuration 3 | template: 4 | "src={{ browbeat_config_file }} \ 5 | dest={{ ansible_env.HOME }}/browbeat/browbeat-config.yaml" 6 | -------------------------------------------------------------------------------- /ansible/install/roles/browbeat-results/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Browbeat Results handlers 4 | # 5 | 6 | - name: restart httpd 7 | service: 8 | name: httpd 9 | state: restarted 10 | become: true 11 | -------------------------------------------------------------------------------- /ansible/install/roles/repo/templates/browbeat.repo.j2: -------------------------------------------------------------------------------- 1 | # Deployed by Browbeat 2 | 3 | {% for key in repos %} 4 | [{{key}}] 5 | name={{key}} 6 | baseurl={{repos[key].baseurl}} 7 | gpgcheck=0 8 | enabled=1 9 | {% endfor %} 10 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana/files/grafana.repo: -------------------------------------------------------------------------------- 1 | [grafana] 2 | name=grafana 3 | baseurl=https://packagecloud.io/grafana/stable/el/7/$basearch 4 | enabled=1 5 | gpgcheck=1 6 | gpgkey=https://grafanarel.s3.amazonaws.com/RPM-GPG-KEY-grafana 7 | -------------------------------------------------------------------------------- /ansible/install/roles/fluentd/files/fluentd.repo: -------------------------------------------------------------------------------- 1 | [treasuredata] 2 | name=TreasureData 3 | baseurl=http://packages.treasuredata.com/2/redhat/\$releasever/\$basearch 4 | gpgcheck=1 5 | gpgkey=https://packages.treasuredata.com/GPG-KEY-td-agent 6 | -------------------------------------------------------------------------------- /ansible/tune/README.rst: -------------------------------------------------------------------------------- 1 | Browbeat OSP Performance Tuning Playbook 2 | ======================================== 3 | 4 | This playbook aims to tune OSP deployed on Red Hat Enterprise Linux. 5 | 6 | The playbook in here is currently experimental. 7 | -------------------------------------------------------------------------------- /ansible/oooq/roles/grafana-dashboard-setup/vars/main.yml: -------------------------------------------------------------------------------- 1 | grafana_enabled_template: false 2 | grafana_host_template: 1.2.3.4 3 | grafana_username_template: admin 4 | grafana_password_template: admin 5 | graphite_prefix_template: "browbeat-ci" 6 | 7 | -------------------------------------------------------------------------------- /ansible/install/ironic-statsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # enables Ironic StatsD metrics and points at the StatsD host configured in groupvars/all.yml 4 | 5 | - hosts: Undercloud 6 | remote_user: "{{ local_remote_user }}" 7 | roles: 8 | - statsd-ironic 9 | -------------------------------------------------------------------------------- /ansible/tune/tune.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Tunes overcloud for browbeat/performance 4 | # 5 | 6 | - hosts: Controller:Compute 7 | gather_facts: false 8 | remote_user: heat-admin 9 | roles: 10 | - udev_dhcp_all_interfaces 11 | - tuned 12 | -------------------------------------------------------------------------------- /ansible/install/roles/epel/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Handler to clean up EPEL whenever it is used 4 | # 5 | - name: remove_epel 6 | package: 7 | name: epel-release 8 | state: absent 9 | ignore_errors: true 10 | become: true 11 | -------------------------------------------------------------------------------- /ansible/install/roles/logstash/files/10-syslog.conf: -------------------------------------------------------------------------------- 1 | input { 2 | stdin { 3 | type => "syslog" 4 | } 5 | } 6 | output { 7 | # stdout {codec => rubydebug } 8 | elasticsearch { 9 | hosts => "localhost:9200" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /ansible/install/roles/repo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Task to deploy a repo file 4 | # 5 | 6 | - name: Add custom repos 7 | template: 8 | src: "templates/browbeat.repo.j2" 9 | dest: /etc/yum.repos.d/browbeat.repo 10 | become: true 11 | -------------------------------------------------------------------------------- /ansible/install/roles/stockpile/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Clone stockpile 4 | git: 5 | repo: 'http://github.com/cloud-bulldozer/stockpile.git' 6 | dest: "{{ browbeat_path }}/ansible/gather/stockpile" 7 | version: master 8 | force: yes 9 | -------------------------------------------------------------------------------- /ansible/install/repos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Add repos 4 | # 5 | # Can be used in conjunction with doing an upgrade 6 | # 7 | 8 | - hosts: overcloud 9 | remote_user: "{{ host_remote_user }}" 10 | gather_facts: false 11 | roles: 12 | - { role: repo } 13 | -------------------------------------------------------------------------------- /ansible/install/roles/workloads/templates/custom-cirros.file: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | sudo echo "nameserver {{ dns_server }}" > /etc/resolv.conf 4 | cd /home/cirros/ && {{ octavia_test_bin_download_cmd }} "{{ octavia_test_bin_path }}" 5 | echo "Browbeat workload installed" 6 | -------------------------------------------------------------------------------- /elastic/v7ilm_policy/template-browbeat-rally.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "policy": { 3 | "phases": { 4 | "delete": { 5 | "min_age": "{{ age }}", 6 | "actions": { 7 | "delete": {} 8 | } 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/neutron-ovsdb/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Neutron handlers for browbeat adjustment 4 | # 5 | 6 | - name: restart neutron services 7 | service: name={{ item }} state=restarted 8 | with_items: 9 | - neutron-openvswitch-agent 10 | -------------------------------------------------------------------------------- /ansible/install/roles/logstash/files/logstash.repo: -------------------------------------------------------------------------------- 1 | [logstash-2.2] 2 | name=logstash repository for 2.2 packages 3 | baseurl=http://packages.elasticsearch.org/logstash/2.2/centos 4 | gpgcheck=1 5 | gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch 6 | enabled=1 7 | -------------------------------------------------------------------------------- /ci-scripts/config/tripleo/install-and-check/all.yml: -------------------------------------------------------------------------------- 1 | dns_server: 192.168.23.1 2 | browbeat_pub_subnet: 192.0.2.0/24 3 | browbeat_pub_pool_start: 192.0.2.100 4 | browbeat_pub_pool_end: 192.0.2.200 5 | browbeat_pub_pool_gw: 192.0.2.1 6 | browbeat_pri_pool_dns: 192.168.23.1 7 | -------------------------------------------------------------------------------- /ansible/browbeat/adjustment-db.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # 4 | 5 | - hosts: Controller 6 | remote_user: heat-admin 7 | gather_facts: false 8 | vars: 9 | ansible_become: true 10 | greenlet_pool_size: 100 11 | max_overflow: 100 12 | roles: 13 | - nova-db 14 | -------------------------------------------------------------------------------- /ansible/install/grafana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to install Grafana 4 | # 5 | 6 | - hosts: Grafana 7 | remote_user: root 8 | roles: 9 | - {role: epel, when: ansible_distribution_major_version == '7'} 10 | - grafana 11 | environment: "{{proxy_env}}" 12 | -------------------------------------------------------------------------------- /ansible/install/roles/curator/files/curator.repo: -------------------------------------------------------------------------------- 1 | [curator-3] 2 | name=CentOS/RHEL 7 repository for Elasticsearch Curator 3.x packages 3 | baseurl=http://packages.elastic.co/curator/3/centos/7 4 | gpgcheck=1 5 | gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch 6 | enabled=1 7 | -------------------------------------------------------------------------------- /ansible/install/roles/elasticsearch/files/elasticsearch.repo: -------------------------------------------------------------------------------- 1 | [elasticsearch-2.x] 2 | name=Elasticsearch repository for 2.x packages 3 | baseurl=http://packages.elastic.co/elasticsearch/2.x/centos 4 | gpgcheck=1 5 | gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch 6 | enabled=1 7 | -------------------------------------------------------------------------------- /ansible/install/roles/logstash/files/01-lumberjack-input.conf: -------------------------------------------------------------------------------- 1 | input { 2 | lumberjack { 3 | port => 5043 4 | type => "logs" 5 | ssl_certificate => "/etc/pki/tls/certs/filebeat-forwarder.crt" 6 | ssl_key => "/etc/pki/tls/private/filebeat-forwarder.key" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /ansible/tune/roles/tuned/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Sets correct tuned profile on each host 4 | # See https://bugzilla.redhat.com/show_bug.cgi?id=1246645 5 | # 6 | 7 | - name: Set tuned profile 8 | become: true 9 | command: tuned-adm profile {{ tuned_profile }} 10 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/keystone-workers/templates/keystone_ports.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | Listen {{ item.public_ip_address }}:80 4 | {% if 'httpd' in item.deployment %} 5 | Listen {{ item.admin_ip_address }}:35357 6 | Listen {{ item.public_ip_address }}:5000 7 | {% endif %} -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/files/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Determine the configuration file based on the node name 4 | CONFIG_FILE="/etc/config/${NODE_NAME}.conf" 5 | # Start the main process with the selected configuration file 6 | exec collectd -f -C $CONFIG_FILE 7 | -------------------------------------------------------------------------------- /ansible/install/graphite.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to install Carbon and Graphite 4 | # 5 | 6 | - hosts: Graphite 7 | remote_user: root 8 | roles: 9 | - {role: epel, when: ansible_distribution_major_version == '7'} 10 | - graphite 11 | environment: "{{proxy_env}}" 12 | -------------------------------------------------------------------------------- /rally/rally-plugins/netcreate-boot/README.rst: -------------------------------------------------------------------------------- 1 | Browbeat Rally Plugin: netcreate-boot 2 | ====================================== 3 | 4 | Functions: 5 | ---------- 6 | - Create 'N' Neutron network 7 | - Create 'N' Neutron subnet 8 | - Launch Nova instance attached to all the networks created 9 | -------------------------------------------------------------------------------- /ansible/oooq/roles/oooq-metadata/vars/main.yml: -------------------------------------------------------------------------------- 1 | dlrn_hash: "Not a pipeline build" 2 | rhos_puddle: "Not a pipeline build" 3 | logs_link: "https://thirdparty.logs.rdoproject.org/jenkins-{{ lookup('env','JOB_NAME') }}-{{ lookup('env','BUILD_NUMBER') }}/" 4 | instackenv: "/home/stack/instackenv.json" 5 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-generic/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Vars for collectd-generic 4 | # 5 | 6 | collectd_packages: 7 | baremetal: 8 | - collectd 9 | - collectd-turbostat 10 | guest: 11 | - collectd 12 | graphite: 13 | - collectd 14 | - collectd-turbostat 15 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-dashboards/templates/partials/description.yaml: -------------------------------------------------------------------------------- 1 | - title: description row 2 | height: 50px 3 | panels: 4 | - title: Browbeat provided Dashboard 5 | content: "**This dashboard is provided by Browbeat and managed via Grafyaml**" 6 | type: text 7 | -------------------------------------------------------------------------------- /ansible/oooq/roles/browbeat-run/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Run Browbeat 4 | shell: 5 | "source {{ ansible_env.HOME }}/browbeat/.browbeat-venv/bin/activate; \ 6 | cd {{ ansible_env.HOME }}/browbeat/; \ 7 | python browbeat.py all | tee {{ ansible_env.HOME }}/browbeat/log/stdout-ci-run.log" 8 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callback_whitelist = profile_tasks 3 | fact_caching_timeout = 86400 4 | fact_caching = jsonfile 5 | fact_caching_connection = /tmp/browbeat_fact_cache 6 | gathering = smart 7 | roles_path = ./browbeat/roles:./install/roles: 8 | timeout = 30 9 | host_key_checking = False 10 | 11 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-prometheus-dashboards/templates/partials/description.yaml: -------------------------------------------------------------------------------- 1 | - title: description row 2 | height: 50px 3 | panels: 4 | - title: Browbeat provided Dashboard 5 | content: "**This dashboard is provided by Browbeat and managed via Grafyaml**" 6 | type: text 7 | -------------------------------------------------------------------------------- /elastic/templates/README.rst: -------------------------------------------------------------------------------- 1 | ElasticSearch / Kibana Template 2 | ---------------------------------- 3 | 4 | Template to instruct elasticSearch & Kibana to not processes some of our fields. For example, our UUIDs would turn into multiple strings due the default tokenizer's use of '-', '.', '/', etc. as token separators. 5 | -------------------------------------------------------------------------------- /rally/nova/README.rst: -------------------------------------------------------------------------------- 1 | Nova scenarios 2 | ============== 3 | 4 | We have one required field to make all these Nova scenario's work*: 5 | net_id - The network ID we should attach all these guests to. 6 | 7 | * The Neutron network(s) MUST be set to --shared. If they are not set to --shared, the Rally workload will fail. 8 | -------------------------------------------------------------------------------- /ansible/install/roles/epel/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # epel rpm for collectd packages 2 | epel_rpm: https://download.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm 3 | epel_rpmkey: https://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }} 4 | -------------------------------------------------------------------------------- /ansible/install/roles/logstash/templates/02-beats-input.conf.j2: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => {{logstash_syslog_port}} 4 | ssl => true 5 | ssl_certificate => "/etc/pki/tls/certs/filebeat-forwarder.crt" 6 | ssl_key => "/etc/pki/tls/private/filebeat-forwarder.key" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /ansible/oooq/roles/template-configs/vars/main.yml: -------------------------------------------------------------------------------- 1 | elastic_enabled: false 2 | elastic_host: "1.2.3.4.5" 3 | grafana_enabled: false 4 | grafana_host: "1.2.3.4.5" 5 | browbeat_config_file: "browbeat-basic.yaml.j2" 6 | browbeat_cloud_name: "browbeat_ci" 7 | overcloud_size: "{{num_nodes.stdout|int}}" 8 | ntp_server: "pool.ntp.org" 9 | -------------------------------------------------------------------------------- /ansible/install/statsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Installs a StatsD server on the StatsD host with the default port, does not secure it 4 | 5 | - hosts: statsd 6 | remote_user: root 7 | roles: 8 | - {role: epel, when: ansible_distribution_major_version == '7'} 9 | - statsd-install 10 | environment: "{{proxy_env}}" 11 | -------------------------------------------------------------------------------- /ansible/common_logging/roles/browbeat_logging/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart filebeat 2 | service: 3 | name: filebeat 4 | state: restarted 5 | become: true 6 | 7 | - name: stop filebeat 8 | service: 9 | name: filebeat 10 | state: stopped 11 | become: true 12 | when: not logging_status|bool 13 | 14 | -------------------------------------------------------------------------------- /bindep.txt: -------------------------------------------------------------------------------- 1 | # This is a cross-platform list tracking distribution packages needed by tests; 2 | # see http://docs.openstack.org/infra/bindep/ for additional information. 3 | libffi-dev [platform:dpkg] 4 | libffi-devel [platform:rpm] 5 | libssl-dev [platform:dpkg] 6 | openssl-devel [platform:rpm] 7 | virtual/libffi [platform:gentoo] 8 | 9 | -------------------------------------------------------------------------------- /browbeat-containers/collectd-openstack/files/ovs_flows.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | HOSTNAME="${COLLECTD_HOSTNAME:-`hostname -f`}" 3 | INTERVAL="${COLLECTD_INTERVAL:-15}" 4 | 5 | while sleep "$INTERVAL"; do 6 | VALUE=$(sudo ovs-ofctl dump-flows br-int | wc -l) 7 | echo "PUTVAL \"$HOSTNAME/ovs-flows/gauge-ovs_flows\" interval=$INTERVAL N:$VALUE" 8 | done 9 | -------------------------------------------------------------------------------- /ansible/install/check-collectd-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | tasks: 4 | - name: Check if graphite_host is empty in group_vars/all.yml 5 | include_vars: 6 | file: "group_vars/all.yml" 7 | name: group_vars 8 | failed_when: group_vars['is_rhoso_deployment'] is not true and group_vars['graphite_host'] is none 9 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana/templates/data_source.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name":"graphite", 3 | "type":"graphite", 4 | "url":"http://{{graphite_host}}:{{graphite_port}}/", 5 | "access":"proxy", 6 | "isDefault":true, 7 | "basicAuth":true, 8 | "basicAuthUser":"{{graphite_username}}", 9 | "basicAuthPassword":"{{graphite_password}}" 10 | } -------------------------------------------------------------------------------- /ansible/install/roles/logstash/files/30-elasticsearch-output.conf: -------------------------------------------------------------------------------- 1 | output { 2 | elasticsearch { 3 | hosts => ["localhost:9200"] 4 | sniffing => true 5 | manage_template => false 6 | index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" 7 | document_type => "%{[@metadata][type]}" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-openstack/files/ovs_flows.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | HOSTNAME="${COLLECTD_HOSTNAME:-`hostname -f`}" 3 | INTERVAL="${COLLECTD_INTERVAL:-15}" 4 | 5 | while sleep "$INTERVAL"; do 6 | VALUE=$(sudo ovs-ofctl dump-flows br-int | wc -l) 7 | echo "PUTVAL \"$HOSTNAME/ovs-flows/gauge-ovs_flows\" interval=$INTERVAL N:$VALUE" 8 | done 9 | 10 | -------------------------------------------------------------------------------- /ansible/install/pre-collectd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: undercloud 3 | roles: 4 | - { role: osp_version } 5 | tags: undercloud, controller, compute 6 | tasks: 7 | - name: set fact collectd_container 8 | set_fact: 9 | collectd_container: "{{ (rhosp_major|int > 14)| ternary(true, false) }}" 10 | tags: undercloud, controller, compute 11 | -------------------------------------------------------------------------------- /ansible/oooq/overcloud-metrics.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Overcloud Metrics 3 | hosts: overcloud 4 | vars: 5 | - config_type: "{{group_names[0]}}" 6 | roles: 7 | - browbeat/common 8 | - browbeat/osp_version 9 | - browbeat/epel 10 | - browbeat/collectd-openstack 11 | - browbeat/rsyslog-install 12 | - browbeat/rsyslog-templates 13 | -------------------------------------------------------------------------------- /ansible/install/roles/workloads/templates/octavia-userdata.file: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | PORT=80 4 | NUM_POOLS=2 5 | 6 | echo "Running test_server binary" 7 | chmod 777 /home/cirros/test_server.bin 8 | sudo su && echo 1 > /proc/sys/vm/overcommit_memory 9 | 10 | for i in $(seq 0 $(($NUM_POOLS-1))); do 11 | /home/cirros/test_server.bin -port $(($PORT+ $i)) & 12 | done 13 | -------------------------------------------------------------------------------- /ansible/tune/roles/udev_dhcp_all_interfaces/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Removes 99-dhcp-all-interfaces.rules to prevent creating failed systemd resources 4 | # See https://bugzilla.redhat.com/show_bug.cgi?id=1293712 5 | # 6 | 7 | - name: Remove 99-dhcp-all-interfaces.rules 8 | become: true 9 | file: path=/etc/udev/rules.d/99-dhcp-all-interfaces.rules state=absent 10 | -------------------------------------------------------------------------------- /tools-requirements.txt: -------------------------------------------------------------------------------- 1 | setuptools>=80.9.0;python_version>='3.9' 2 | setuptools<80.9.0;python_version<'3.9' 3 | wheel 4 | elasticsearch 5 | scapy 6 | pyrsistent==0.16.0;python_version<'3' 7 | pyrsistent>=0.17.0;python_version>='3' 8 | paramiko==2.8.0 9 | python-manilaclient<=3.0.0 10 | python-novaclient<18.3.0 11 | oslo.utils>=3.36.0 12 | oslo.log>=3.36.0 13 | oslo.serialization 14 | -------------------------------------------------------------------------------- /ansible/install/roles/graphite/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Carbon and Graphite Handlers 4 | # 5 | 6 | - name: restart apache 7 | service: 8 | name: httpd 9 | state: restarted 10 | enabled: true 11 | become: true 12 | 13 | - name: restart carbon-cache 14 | service: 15 | name: carbon-cache 16 | state: restarted 17 | enabled: true 18 | become: true 19 | -------------------------------------------------------------------------------- /rally/heat/templates/random-strings.yaml.template: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | description: Test template for rally create-update-delete scenario 4 | 5 | resources: 6 | test_string_one: 7 | type: OS::Heat::RandomString 8 | properties: 9 | length: 20 10 | test_string_two: 11 | type: OS::Heat::RandomString 12 | properties: 13 | length: 20 14 | -------------------------------------------------------------------------------- /ansible/install/rsyslog-aggregator.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to install and configure a rsyslog aggregation server 4 | # 5 | 6 | 7 | - hosts: elasticsearch 8 | remote_user: root 9 | vars: 10 | rsyslog_elasticsearch_server: "localhost" 11 | rsyslog_aggregator_server: "localhost" 12 | rsyslog_aggregator: true 13 | roles: 14 | - rsyslog-install 15 | - rsyslog-templates 16 | -------------------------------------------------------------------------------- /elastic/v5templates/README.rst: -------------------------------------------------------------------------------- 1 | ElasticSearch / Kibana Template 2 | ---------------------------------- 3 | 4 | to use v5 templates, set 'elastic5: true' in ansible/install/group_vars/all 5 | 6 | Template to instruct elasticsearch & Kibana to not processes some of our fields. For example, our UUIDs would turn into multiple strings due the default tokenizer's use of '-', '.', '/', etc. as token separators. 7 | -------------------------------------------------------------------------------- /ansible/install/roles/flavors/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Add flavors to OpenStack Cloud 4 | # 5 | 6 | # Ignore errors here incase the flavors already exist. 7 | - name: Add flavors to overcloud 8 | shell: . {{ overcloudrc }}; openstack flavor create --ram {{item.memory}} --disk {{item.disk}} --vcpus {{item.cpu}} {{item.name}} 9 | with_items: "{{browbeat_flavors}}" 10 | ignore_errors: true 11 | -------------------------------------------------------------------------------- /browbeat-containers/collectd-guest/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/centos/centos:stream8 2 | 3 | RUN dnf update -y && \ 4 | dnf clean all && \ 5 | dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ 6 | dnf install -y centos-release-opstools && \ 7 | dnf install -y collectd 8 | 9 | ADD config/collectd.conf /etc/collectd.conf 10 | 11 | CMD ["collectd", "-f"] 12 | -------------------------------------------------------------------------------- /ansible/oooq/undercloud-metrics.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Undercloud Metrics 3 | hosts: Undercloud 4 | vars: 5 | - config_type: "{{group_names[0]}}" 6 | - statsd_host: "{{ graphite_host }}" 7 | roles: 8 | - browbeat/common 9 | - browbeat/epel 10 | - browbeat/collectd-openstack 11 | - browbeat/rsyslog-install 12 | - browbeat/rsyslog-templates 13 | - browbeat/statsd-ironic 14 | -------------------------------------------------------------------------------- /doc/source/introduction.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Introduction 3 | ============ 4 | 5 | This started as a project to help determine the number of database 6 | connections a given OpenStack deployment uses via stress tests. It has 7 | since grown into a set of Ansible playbooks to help check deployments 8 | for known issues, install tools, run performance stress workloads and 9 | change parameters of the overcloud. 10 | -------------------------------------------------------------------------------- /rally/heat/templates/updated-random-strings-delete.yaml.template: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | description: > 4 | Test template for create-update-delete-stack scenario in rally. 5 | The template deletes one resource from the stack defined by random-strings.yaml.template. 6 | 7 | resources: 8 | test_string_one: 9 | type: OS::Heat::RandomString 10 | properties: 11 | length: 20 12 | -------------------------------------------------------------------------------- /ansible/install/roles/workloads/templates/abench-user.file: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo yum clean all 4 | sudo yum install -y httpd 5 | sudo systemctl enable httpd 6 | sudo systemctl start httpd 7 | sudo cp /usr/share/httpd/noindex/index.html /var/www/html/. 8 | sudo chown apache:apache /var/www/html 9 | 10 | sudo sed -i 's/disable_root: 1/disable_root: 0/g' /etc/cloud/cloud.cfg 11 | echo "Browbeat workload installed" 12 | -------------------------------------------------------------------------------- /ansible/browbeat/cleanlogs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to clean log files on controller nodes 4 | # 5 | 6 | - hosts: Controller 7 | remote_user: heat-admin 8 | gather_facts: false 9 | tasks: 10 | - name: Clean Logs 11 | shell: for i in $(ls {{ item }}); do echo "" > $i; done 12 | with_items: 13 | - /var/log/keystone/*.log 14 | - /var/log/nova/*.log 15 | - /var/log/neutron/*.log 16 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-openstack/files/ovn_monitoring.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | HOSTNAME="${COLLECTD_HOSTNAME:-`hostname -f`}" 3 | INTERVAL="${COLLECTD_INTERVAL:-15}" 4 | 5 | while sleep "$INTERVAL"; do 6 | VALUE=$(sudo ovsdb-client dump --no-headings unix:/var/lib/openvswitch/ovn/ovn$1_db.sock $2 | wc -l) 7 | VALUE=$[VALUE-1] 8 | echo "PUTVAL \"$HOSTNAME/ovn-$1db-$2/gauge-ovn_$1db_$2\" interval=$INTERVAL N:$VALUE" 9 | done 10 | -------------------------------------------------------------------------------- /browbeat-containers/collectd-openstack/files/ovn_monitoring.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | HOSTNAME="${COLLECTD_HOSTNAME:-`hostname -f`}" 3 | INTERVAL="${COLLECTD_INTERVAL:-15}" 4 | 5 | while sleep "$INTERVAL"; do 6 | VALUE=$(sudo ovsdb-client dump --no-headings unix:/var/lib/openvswitch/ovn/ovn$1_db.sock $2 | wc -l) 7 | VALUE=$[VALUE-1] 8 | echo "PUTVAL \"$HOSTNAME/ovn-$1db-$2/gauge-ovn_$1db_$2\" interval=$INTERVAL N:$VALUE" 9 | done 10 | -------------------------------------------------------------------------------- /ansible/install/collectd-rhoso.yaml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | gather_facts: true 3 | vars: 4 | ansible_user: "{{ browbeat_user }}" 5 | ansible_python_interpreter: "{{ python_interpreter }}" 6 | roles: 7 | - { role: collectd-rhoso } 8 | environment: "{{proxy_env}}" 9 | 10 | - name: trigger the cronjob to index data from OCP 11 | import_playbook: toggle-indexing-cron-job.yml 12 | vars: 13 | cron_state: "present" 14 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/files/prometheus_cluster_rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-cluster-role-binding 5 | subjects: 6 | - kind: ServiceAccount 7 | name: prometheus-k8s 8 | namespace: openshift-monitoring 9 | roleRef: 10 | kind: ClusterRole 11 | name: prometheus-cluster-role 12 | apiGroup: rbac.authorization.k8s.io 13 | 14 | -------------------------------------------------------------------------------- /ansible/install/browbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | tasks: 4 | - debug: 5 | msg: "redirecting to browbeat installation based on deployment type" 6 | 7 | - name: Install browbeat 8 | import_playbook: browbeat_rhoso.yaml 9 | vars: 10 | rally_undercloud_enabled: false 11 | when: is_rhoso_deployment 12 | 13 | - name: Install browbeat 14 | import_playbook: browbeat_rhosp.yaml 15 | when: not is_rhoso_deployment 16 | -------------------------------------------------------------------------------- /ansible/install/roles/rsyslog-templates/templates/rsyslog.conf.j2: -------------------------------------------------------------------------------- 1 | # Browbeat Rsyslog config defaults blown away for consistency in logging 2 | 3 | # Include all config files in /etc/rsyslog.d/ │ 4 | $IncludeConfig /etc/rsyslog.d/*.conf 5 | 6 | # Prevents messages from getting dumped to /var/log/messages 7 | stop 8 | -------------------------------------------------------------------------------- /ansible/oooq/install-browbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Browbeat 3 | hosts: Undercloud 4 | vars: 5 | results_in_httpd: false 6 | statsd_host: "{{ graphite_host }}" 7 | roles: 8 | - browbeat/common 9 | - browbeat/stockpile 10 | - browbeat/browbeat 11 | - browbeat/firewall 12 | - browbeat/rally 13 | - browbeat/shaker 14 | - browbeat/flavors 15 | - browbeat/images 16 | - browbeat/template-configs 17 | -------------------------------------------------------------------------------- /browbeat-containers/collectd-baremetal/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/centos/centos:stream8 2 | 3 | RUN dnf update -y && \ 4 | dnf clean all && \ 5 | dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ 6 | dnf install -y centos-release-opstools && \ 7 | dnf install -y collectd collectd-turbostat collectd-disk 8 | 9 | ADD config/collectd.conf /etc/collectd.conf 10 | 11 | CMD ["collectd", "-f"] 12 | -------------------------------------------------------------------------------- /ansible/browbeat/adjustment-l3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to change number of Neutron l3 agents 4 | # 5 | # Change l3 agents Example: 6 | # ansible-playbook -i hosts browbeat/adjustment-l3.yml -e "max_l3_agents=3 min_l3_agents=3" 7 | # 8 | 9 | - hosts: Controller 10 | remote_user: heat-admin 11 | gather_facts: false 12 | vars: 13 | ansible_become: true 14 | max_l3_agents: 3 15 | min_l3_agents: 2 16 | roles: 17 | - neutron-l3 18 | -------------------------------------------------------------------------------- /ci-scripts/tripleo/config/nodes/1ctlr_1comp.yml: -------------------------------------------------------------------------------- 1 | # This is a workaround for 2 | # https://github.com/openstack/tripleo-quickstart/commit/4b5918ced49a4907f299931a7d026137ac0804fb 3 | # Define a single controller node and a single compute node. 4 | overcloud_nodes: 5 | - name: control_0 6 | flavor: control 7 | virtualbmc_port: 6230 8 | 9 | - name: compute_0 10 | flavor: compute 11 | virtualbmc_port: 6231 12 | 13 | node_count: 2 14 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/apache-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Tasks to deploy new prefork.conf settings for httpd 4 | # 5 | 6 | - name: Push new prefork.conf 7 | become: true 8 | template: 9 | src: prefork.conf.j2 10 | dest: /etc/httpd/conf.modules.d/prefork.conf 11 | mode: 0644 12 | owner: root 13 | group: root 14 | backup: true 15 | 16 | - name: Restart httpd 17 | systemd: 18 | name: httpd 19 | state: restarted 20 | -------------------------------------------------------------------------------- /browbeat/schema/shaker.yml: -------------------------------------------------------------------------------- 1 | # This schema defines how a Rally workload is formated 2 | name: Shaker workload schema 3 | type: map 4 | allowempty: True 5 | mapping: 6 | # Required items to be a Shaker workload 7 | enabled: 8 | type: bool 9 | required: True 10 | file: 11 | type: str 12 | required: True 13 | name: 14 | type: str 15 | required: True 16 | type: 17 | type: str 18 | required: True 19 | enum: ["shaker"] 20 | -------------------------------------------------------------------------------- /ansible/install/roles/workloads/templates/stress-ng-user.file: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo yum clean all 4 | sudo yum install -y epel-release 5 | sudo yum install -y stress-ng 6 | sudo sed -i 's/disable_root: 1/disable_root: 0/g' /etc/cloud/cloud.cfg 7 | sudo sed -i 's/^.*sleep 10" //g' /root/.ssh/authorized_keys 8 | sudo cat /root/.ssh/authorized_keys 9 | sudo cat /etc/cloud/cloud.cfg 10 | sudo chattr +i /etc/cloud/cloud.cfg 11 | echo "Browbeat workload installed" 12 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/templates/collectd_svc_template.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ospcp-collectd-service-{{ idx }} 5 | namespace: ospperf 6 | labels: 7 | app: ospcp-collectd 8 | spec: 9 | selector: 10 | statefulset.kubernetes.io/pod-name: osp-controlplane-collectd-{{ idx }} 11 | ports: 12 | - name: http 13 | protocol: TCP 14 | port: 9104 15 | targetPort: 9104 16 | type: ClusterIP 17 | -------------------------------------------------------------------------------- /ansible/install/stop-collectd-rhoso.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | tasks: 3 | - name: stop collectd on OCP nodes. 4 | shell: | 5 | oc scale -n ospperf statefulset osp-controlplane-collectd --replicas=0 6 | environment: 7 | KUBECONFIG: "{{ kubeconfig_path }}" 8 | ignore_errors: yes 9 | 10 | - name: stop the cronjob to stop index data from OCP 11 | import_playbook: toggle-indexing-cron-job.yml 12 | vars: 13 | cron_state: "absent" 14 | -------------------------------------------------------------------------------- /rally/rally-plugins/pbench-fio/templates/read.job.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | bs = $@ 3 | runtime = {{ runtime }} 4 | ioengine = libaio 5 | iodepth = {{ io_depth }} 6 | direct = 1 7 | startdelay= {{ start_delay }} 8 | clocksource = clock_gettime 9 | directory = /mnt/fio/ 10 | write_bw_log = fio 11 | write_iops_log = fio 12 | write_lat_log = fio 13 | log_avg_msec = 10000 14 | write_hist_log = fio 15 | 16 | [rhcs-read-test] 17 | rw = $@ 18 | size = {{ workload_size }} 19 | numjobs = {{ num_jobs }} 20 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/files/collectd_svc_monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: ospcp-collectd-monitor 5 | namespace: openshift-monitoring 6 | spec: 7 | endpoints: 8 | - honorLabels: true 9 | interval: 30s 10 | path: /metrics 11 | port: http 12 | scheme: http 13 | namespaceSelector: 14 | matchNames: 15 | - ospperf 16 | selector: 17 | matchLabels: 18 | app: ospcp-collectd 19 | -------------------------------------------------------------------------------- /ansible/install/roles/kibana-visualization/templates/markdown.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "title": "{{item.title}}", 3 | "visState": "{\"title\":\"{{item.title}}\",\"type\":\"markdown\",\"params\":{\"markdown\":\"{{item.markdown}}\"},\"aggs\":[],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/common_logging/roles/browbeat_logging/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: check if filebeat config is present 2 | stat: 3 | path: /etc/filebeat/filebeat.yml 4 | register: filebeat_config 5 | 6 | - name: insert browbeat uuid 7 | lineinfile: 8 | path: /etc/filebeat/filebeat.yml 9 | regexp: '^\s+browbeat_uuid' 10 | line: ' browbeat_uuid: "{{browbeat_uuid}}"' 11 | become: true 12 | when: filebeat_config.stat.exists 13 | notify: 14 | - restart filebeat 15 | - stop filebeat 16 | -------------------------------------------------------------------------------- /ansible/README.testing.rst: -------------------------------------------------------------------------------- 1 | Running browebat ansible unit tests 2 | =================================== 3 | 4 | Running ansible molecule unit tests 5 | ----------------------------------- 6 | 7 | - Ensure that you have docker installed: 8 | 9 | https://docs.docker.com/install/ 10 | 11 | - Run tox -e molecule 12 | 13 | Adding ansible molecue unit test 14 | -------------------------------- 15 | 16 | - cd ansible/install/roles/ 17 | molecule init scenario --role-name --driver-name docker 18 | -------------------------------------------------------------------------------- /ansible/browbeat/ntp-sync.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to force ntp time sync 4 | # 5 | # Versions tested: Newton, Ocata, Pike 6 | # 7 | # Example: 8 | # 9 | # ansible-playbook -i hosts browbeat/ntp-sync.yml -e 'ntp_server=clock.walkabout.com' 10 | # 11 | 12 | - hosts: overcloud 13 | remote_user: "{{ host_remote_user }}" 14 | gather_facts: false 15 | vars_files: 16 | - ../install/group_vars/all.yml 17 | tasks: 18 | - name: Sync NTP Time 19 | command: ntpdate -u {{ntp_server}} 20 | become: true 21 | -------------------------------------------------------------------------------- /ansible/gather/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Adjust Browbeat user if you are deploying Browbeat on a different machine than the Undercloud 4 | browbeat_user: "{{ ansible_user | default('stack') }}" 5 | # Login user for the local/jump machine (Typically Undercloud) 6 | local_remote_user: "{{ ansible_user | default('stack') }}" 7 | # Login user for the Overcloud hosts 8 | host_remote_user: heat-admin 9 | 10 | home_dir: "/home/{{ browbeat_user }}" 11 | browbeat_path: "{{ home_dir }}/browbeat" 12 | container_cli: docker 13 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/files/prometheus_cluster_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-cluster-role 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["services", "endpoints", "pods"] 8 | verbs: ["get", "list", "watch"] 9 | - apiGroups: ["extensions"] 10 | resources: ["ingresses"] 11 | verbs: ["get", "list", "watch"] 12 | - apiGroups: ["networking.k8s.io"] 13 | resources: ["ingresses"] 14 | verbs: ["get", "list", "watch"] 15 | 16 | -------------------------------------------------------------------------------- /ansible/browbeat/adjustment-firewall_driver.yml: -------------------------------------------------------------------------------- 1 | - hosts: Controller 2 | remote_user: heat-admin 3 | gather_facts: false 4 | vars: 5 | ansible_become: true 6 | driver: neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver 7 | roles: 8 | - neutron-firewall 9 | 10 | - hosts: Compute 11 | remote_user: heat-admin 12 | gather_facts: false 13 | vars: 14 | ansible_become: true 15 | driver: neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver 16 | roles: 17 | - neutron-firewall 18 | 19 | 20 | -------------------------------------------------------------------------------- /ansible/install/cleanup_sqlalchemy_collectd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to clean up sqlalchemy collectd configuration on controller hosts. 4 | # The sqlalchemy collectd configuration on containers from a previous deployment 5 | # causes issues in the next overcloud deployment. 6 | 7 | - hosts: Controller 8 | strategy: free 9 | remote_user: "{{ host_remote_user }}" 10 | 11 | roles: 12 | - { role: osp_version } 13 | - { role: containers } 14 | - { role: common } 15 | - { role: cleanup_sqlalchemy_collectd } 16 | -------------------------------------------------------------------------------- /ansible/install/roles/containers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set browbeat_contianers_path 3 | set_fact: 4 | browbeat_containers_path: "{{ ansible_user_dir }}/browbeat/browbeat-containers" 5 | 6 | - name: Set container_cli (OSP < 15) 7 | set_fact: 8 | container_cli: docker 9 | when: rhosp_version is version('15.0', '<') and osp_version is version('12.0', '>=') 10 | 11 | - name: Set container_cli (OSP > 15) 12 | set_fact: 13 | container_cli: podman 14 | when: rhosp_version is version('15.0', '>=') 15 | 16 | -------------------------------------------------------------------------------- /ansible/install/rsyslog-logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to install and configure rsyslog on the overcloud/undercloud 4 | # 5 | 6 | 7 | - hosts: Undercloud 8 | remote_user: "{{ local_remote_user }}" 9 | vars: 10 | ansible_ssh_pipelining: yes 11 | roles: 12 | - rsyslog-install 13 | - rsyslog-templates 14 | 15 | - hosts: overcloud 16 | remote_user: "{{ host_remote_user }}" 17 | serial: 10 18 | vars: 19 | ansible_ssh_pipelining: yes 20 | roles: 21 | - rsyslog-install 22 | - rsyslog-templates 23 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/run-task-at/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Tasks to kick a task off at a specific time using at daemon 4 | # 5 | 6 | - name: Create job file 7 | become: true 8 | shell: "echo '#!/bin/bash\n {{the_task}} '>/root/browbeat-sync.sh" 9 | 10 | - name: Set execute on file 11 | become: true 12 | file: 13 | path: /root/browbeat-sync.sh 14 | owner: root 15 | group: root 16 | mode: 0744 17 | 18 | - name: Create at job 19 | become: true 20 | command: "at -f /root/browbeat-sync.sh {{task_time}}" 21 | -------------------------------------------------------------------------------- /ansible/oooq/quickstart-browbeat.yml: -------------------------------------------------------------------------------- 1 | # This is the playbook used by the `quickstart.sh` script. 2 | 3 | - import_playbook: quickstart-extras.yml 4 | 5 | - import_playbook: configure-browbeat.yml 6 | 7 | - import_playbook: undercloud-metrics.yml 8 | 9 | - import_playbook: overcloud-metrics.yml 10 | 11 | - import_playbook: install-browbeat.yml 12 | 13 | - import_playbook: gather-metadata.yml 14 | 15 | - name: Run Browbeat 16 | hosts: Undercloud 17 | roles: 18 | - browbeat/browbeat-run 19 | - browbeat/browbeat-classify 20 | -------------------------------------------------------------------------------- /rally/heat/create-update-delete-stack.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | HeatStacks.create_update_delete_stack: 3 | - 4 | args: 5 | template_path: "rally/heat/templates/random-strings.yaml.template" 6 | updated_template_path: {{updated_template_path}} 7 | runner: 8 | type: "constant" 9 | times: {{times}} 10 | concurrency: {{concurrency}} 11 | context: 12 | users: 13 | tenants: 2 14 | users_per_tenant: 3 15 | sla: 16 | failure_rate: 17 | max: 0 18 | -------------------------------------------------------------------------------- /rally/rally-plugins/pbench-fio/templates/write.job.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | bs = $@ 3 | runtime = {{ runtime }} 4 | ioengine = libaio 5 | iodepth = {{ io_depth }} 6 | direct = 1 7 | startdelay= {{ start_delay }} 8 | clocksource = clock_gettime 9 | directory = /mnt/fio/ 10 | write_bw_log = fio 11 | write_iops_log = fio 12 | write_lat_log = fio 13 | log_avg_msec = 10000 14 | write_hist_log = fio 15 | create_on_open = 1 16 | create_serialize = 0 17 | 18 | [rhcs-write-test] 19 | rw = $@ 20 | size = {{ workload_size }} 21 | numjobs = {{ num_jobs }} 22 | -------------------------------------------------------------------------------- /ansible/install/roles/browbeat-results/templates/00-browbeat.conf.j2: -------------------------------------------------------------------------------- 1 | # Browbeat httpd config to serve results on undercloud 2 | # Installed via browbeat installer 3 | 4 | Listen {{ browbeat_results_port }} 5 | 6 | ServerName browbeat-results 7 | DocumentRoot "{{ browbeat_path }}/results" 8 | 9 | Options Indexes FollowSymLinks 10 | IndexOptions NameWidth=* 11 | AllowOverride None 12 | Require all granted 13 | 14 | 15 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/nova-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Configure nova.conf tasks 4 | # 5 | 6 | - name: (Newton, Ocata, Pike, Train) Configure nova.conf 7 | become: true 8 | ini_file: 9 | dest: "{{nova_config_file}}" 10 | mode: 0640 11 | # (akrzos) Commented out Group as to prevent in Pike incorrect permissions on config file 12 | # group: nova 13 | section: "{{ item.section }}" 14 | option: "{{ item.option }}" 15 | value: "{{ item.value }}" 16 | backup: yes 17 | with_items: 18 | - "{{nova_configuration}}" 19 | -------------------------------------------------------------------------------- /elastic/README.rst: -------------------------------------------------------------------------------- 1 | ElasticSearch Configuration for Browbeat 2 | ----------------------------------------- 3 | 4 | **+ templates/** 5 | 6 | Will contain Elasticsearch templates to account for things like the Browbeat UUID. 7 | These Templates will be installed if you run through our Elasticsearch installer. If 8 | you already have a Elasticsearch Host, you can install them by running the following:: 9 | 10 | $ cd ../ansible 11 | $ vi install/group_vars/all.yml 12 | # Update your es_ip 13 | $ ansible-playbook -i hosts install/es-template.yml 14 | 15 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/keystone-token/files/my-keystone.te: -------------------------------------------------------------------------------- 1 | module my-keystone 1.0; 2 | 3 | require { 4 | type etc_t; 5 | type keystone_t; 6 | class process execmem; 7 | class dir write; 8 | } 9 | 10 | #============= keystone_t ============== 11 | allow keystone_t etc_t:dir write; 12 | 13 | #!!!! This avc is allowed in the current policy 14 | allow keystone_t self:process execmem; 15 | 16 | require { 17 | type httpd_t; 18 | type etc_t; 19 | class dir write; 20 | } 21 | 22 | #============= httpd_t ============== 23 | allow httpd_t etc_t:dir write; -------------------------------------------------------------------------------- /ansible/oooq/roles/gather-metadata/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #Gathers Browbeat metdata and inserts it into elasticsearch 3 | # and flat files in browbeat/results 4 | 5 | - name: Gather Metadata 6 | shell: 7 | "cd {{ ansible_env.HOME }}/browbeat/ansible; \ 8 | ansible-playbook -i hosts \ 9 | gather/site.yml > {{ ansible_env.HOME }}/browbeat/results/metadata.log" 10 | register: metadata_run 11 | until: metadata_run.rc == 0 12 | retries: 2 13 | delay: 60 14 | environment: 15 | ANSIBLE_SSH_ARGS: "-F {{ ansible_env.HOME }}/browbeat/ansible/ssh-config" 16 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/neutron-ovsdb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Configure the ovsdb driver 2 | ini_file: 3 | dest: "{{ item.file }}" 4 | mode: 0640 5 | section: "{{ item.section }}" 6 | option: "{{ item.option }}" 7 | value: "{{ item.value }}" 8 | backup: yes 9 | with_items: 10 | - { file: /etc/neutron/plugins/ml2/openvswitch_agent.ini, section: ovs, option: ovsdb_interface, value: "{{ driver }}" } 11 | notify: 12 | - unmanage neutron services 13 | - restart neutron services 14 | - manage neutron services 15 | - cleanup neutron services 16 | -------------------------------------------------------------------------------- /ansible/install/grafana-prometheus-dashboards.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Builds and Uploads Dashboards for Browbeat analysis of System Performance Metrics 3 | # Templated General Dashboards (Ex. OpenStack General System Performance) 4 | 5 | - hosts: localhost 6 | gather_facts: false 7 | vars: 8 | ansible_connection: local 9 | upload_general: true 10 | general_dashboards: 11 | - template_name: openstack 12 | template_node_type: "*" 13 | process_list_name: OpenStack 14 | roles: 15 | - grafana-prometheus-dashboards 16 | environment: "{{proxy_env}}" 17 | -------------------------------------------------------------------------------- /ansible/common_logging/browbeat_logging.yml: -------------------------------------------------------------------------------- 1 | - hosts: Undercloud 2 | remote_user: "{{ local_remote_user }}" 3 | vars_files: 4 | - ../install/group_vars/all.yml 5 | roles: 6 | - { role: browbeat_logging } 7 | 8 | - hosts: Controller 9 | remote_user: "{{ host_remote_user }}" 10 | vars_files: 11 | - ../install/group_vars/all.yml 12 | roles: 13 | - { role: browbeat_logging } 14 | 15 | - hosts: Compute 16 | remote_user: "{{ host_remote_user }}" 17 | vars_files: 18 | - ../install/group_vars/all.yml 19 | roles: 20 | - { role: browbeat_logging } 21 | 22 | -------------------------------------------------------------------------------- /ansible/oooq/roles/ci-network/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Public network that shaker utilize 3 | browbeat_pub_net_name: browbeat_public 4 | browbeat_pub_subnet: 1.1.1.1/22 5 | browbeat_pub_pool_start: 1.1.1.1 6 | browbeat_pub_pool_end: 1.1.1.1 7 | browbeat_pub_pool_gw: 1.1.1.1 8 | # Private subnet 9 | browbeat_pri_net_name: browbeat_private 10 | browbeat_pri_subnet: 172.16.10.0/24 11 | browbeat_pri_pool_start: 172.16.10.2 12 | browbeat_pri_pool_end: 172.16.10.100 13 | browbeat_pri_pool_gw: 172.16.10.1 14 | browbeat_pri_pool_dns: 8.8.8.8 15 | 16 | browbeat_router_name: browbeat_router 17 | -------------------------------------------------------------------------------- /ansible/oooq/browbeat-minimal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Browbeat integration test 3 | # Check upstream zuul CI 4 | 5 | - import_playbook: configure-browbeat.yml 6 | when: enable_minimal_browbeat|default(false)|bool 7 | 8 | - import_playbook: install-browbeat.yml 9 | when: enable_minimal_browbeat|default(false)|bool 10 | 11 | - import_playbook: disable-ssh-dns.yml 12 | when: enable_minimal_browbeat|default(false)|bool 13 | 14 | - name: Run Browbeat 15 | hosts: Undercloud 16 | roles: 17 | - { role: browbeat/browbeat-run, 18 | when: enable_minimal_browbeat|default(false)|bool } 19 | -------------------------------------------------------------------------------- /ansible/browbeat/clean-gnocchi-resources.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Clean Gnocchi resources 4 | # 5 | # Used incase of adding a new default archive-policy and thus all resources should 6 | # use the same new archive-policy. 7 | # 8 | 9 | - hosts: Undercloud 10 | remote_user: "{{ local_remote_user }}" 11 | gather_facts: false 12 | vars_files: 13 | - ../install/group_vars/all.yml 14 | tasks: 15 | - name: Clean Gnocchi Resources 16 | shell: ". {{ overcloudrc }}; gnocchi resource list -c type -c id -c revision_end | grep None | awk '{print $2}' | xargs -I % gnocchi resource delete %" 17 | -------------------------------------------------------------------------------- /ansible/logs/roles/fetch-logs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Tar the logs directory 3 | archive: 4 | path: /home/{{host_remote_user}}/{{ansible_hostname}} 5 | dest: /home/{{host_remote_user}}/{{ansible_hostname}}.gz 6 | 7 | - name: Remove logs directory 8 | file: 9 | path: /home/{{host_remote_user}}/{{ansible_hostname}} 10 | state: absent 11 | 12 | - name: Fetch logs 13 | fetch: 14 | src: /home/{{host_remote_user}}/{{ansible_hostname}}.gz 15 | dest: 16 | /home/{{browbeat_user}}/logs/{{inventory_hostname}}-{{ansible_date_time.epoch}}.gz 17 | flat: yes 18 | 19 | -------------------------------------------------------------------------------- /ansible/install/collectd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set fact collectd_container 3 | import_playbook: pre-collectd.yml 4 | when: not is_rhoso_deployment 5 | 6 | - name: Run containerized collectd (Stein and greater recommended) 7 | import_playbook: collectd-container.yml 8 | when: hostvars['undercloud']['collectd_container'] 9 | 10 | - name: Run collectd installed through RPMs 11 | import_playbook: collectd-baremetal.yml 12 | when: not hostvars['undercloud']['collectd_container'] 13 | 14 | - name: Run collectd rhoso 15 | import_playbook: collectd-rhoso.yaml 16 | when: is_rhoso_deployment 17 | -------------------------------------------------------------------------------- /ansible/install/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Browbeat Install Common 4 | # 5 | - name: Check external connectivity 6 | command: ping google.com -c 1 -q 7 | register: ping 8 | ignore_errors: true 9 | become: true 10 | tags: 11 | # Skip ANSIBLE0012 Commands should not change things if nothing needs doing 12 | # Need to check external activity every time 13 | - skip_ansible_lint 14 | 15 | - name: Add DNS record 16 | become: true 17 | lineinfile: dest=/etc/resolv.conf state=present line="nameserver {{ dns_server }}" insertafter="^search" 18 | when: ping.rc != 0 19 | -------------------------------------------------------------------------------- /ansible/install/roles/e2e-benchmarking/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Clone e2e-benchmarking 4 | git: 5 | repo: 'http://github.com/cloud-bulldozer/e2e-benchmarking.git' 6 | dest: "{{ browbeat_path }}/ansible/gather/e2e-benchmarking" 7 | version: master 8 | force: yes 9 | 10 | - name: Deploy benchmark operator and make changes to scripts 11 | shell: | 12 | export KUBECONFIG={{item}} 13 | ./install_e2e_benchmarking.sh 14 | loop: "{{ lookup('file', '{{ browbeat_path }}/ansible/kubeconfig_paths').splitlines() }}" 15 | args: 16 | chdir: "{{ browbeat_path }}/ansible" 17 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-dashboards/templates/cloud_gnocchi_status.yaml.j2: -------------------------------------------------------------------------------- 1 | #jinja2:lstrip_blocks: True 2 | --- 3 | dashboard: 4 | title: Cloud Gnocchi Status 5 | templating: 6 | - name: Cloud 7 | query: "*" 8 | refresh: true 9 | type: query 10 | - name: Node 11 | query: "$Cloud.*" 12 | refresh: true 13 | type: query 14 | time: 15 | from: now-1h 16 | to: now 17 | rows: 18 | {% include 'partials/description.yaml' %} 19 | 20 | {% set partial_panel = {'collapse': 'false'} %} 21 | {% include 'partials/gnocchi_backlog.yaml.j2' %} 22 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-metric-list.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.metric_list: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-resource-list.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.resource_list: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /ansible/install/roles/no-sshd-dns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Disables DNS lookup in the overcloud sshd config file. Speeds up operations in environments with slow dns servers hugely. 2 | --- 3 | 4 | - name: Disable DNS resolution in Overcloud sshd config 5 | lineinfile: 6 | dest: /etc/ssh/sshd_config 7 | line: "UseDNS no" 8 | state: present 9 | insertbefore: BOF 10 | when: disable_ssh_dns 11 | become: true 12 | become_user: root 13 | 14 | - name: Restart sshd service 15 | service: name=sshd state=restarted 16 | when: disable_ssh_dns 17 | become: true 18 | become_user: root 19 | -------------------------------------------------------------------------------- /ansible/oooq/baremetal-virt-undercloud-int-browbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Browbeat integration test 3 | 4 | - import_playbook: baremetal-prep-virthost.yml 5 | 6 | - import_playbook: configure-browbeat.yml 7 | 8 | - import_playbook: baremetal-quickstart-extras.yml 9 | 10 | - import_playbook: install-browbeat.yml 11 | 12 | - import_playbook: disable-ssh-dns.yml 13 | 14 | - import_playbook: undercloud-metrics.yml 15 | - import_playbook: overcloud-metrics.yml 16 | 17 | - import_playbook: gather-metadata.yml 18 | 19 | - name: Run Browbeat 20 | hosts: Undercloud 21 | roles: 22 | - browbeat/browbeat-run 23 | -------------------------------------------------------------------------------- /rally/rally-plugins/pbench-fio/ansible/pbench_agent_tool_meister_firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Open ports for redis and tool data sink 3 | hosts: servers 4 | remote_user: cloud-user 5 | become: true 6 | 7 | roles: 8 | - pbench_firewall_open_ports 9 | 10 | tasks: 11 | - name: enable port 8765 12 | firewalld: 13 | port: 8765/tcp 14 | permanent: true 15 | state: enabled 16 | 17 | - name: restart firewalld and redis 18 | systemd: 19 | name: "{{ item }}" 20 | state: restarted 21 | loop: 22 | - firewalld 23 | - redis 24 | -------------------------------------------------------------------------------- /ansible/oooq/baremetal-virt-undercloud-tripleo-browbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: baremetal-prep-virthost.yml 3 | 4 | - import_playbook: configure-browbeat.yml 5 | 6 | - import_playbook: baremetal-quickstart-extras.yml 7 | 8 | - import_playbook: install-browbeat.yml 9 | 10 | - import_playbook: disable-ssh-dns.yml 11 | 12 | - import_playbook: undercloud-metrics.yml 13 | - import_playbook: overcloud-metrics.yml 14 | 15 | - import_playbook: gather-metadata.yml 16 | 17 | - name: Run Browbeat 18 | hosts: Undercloud 19 | roles: 20 | - browbeat/browbeat-run 21 | # - browbeat/browbeat-classify 22 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-capabilities-list.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.capabilities_list: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-resource-type-list.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.resource_type_list: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /ansible/browbeat/adjustment-keystone-token.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to change token type from uuid to fernet and back for keystone. 4 | # 5 | # Examples: 6 | # ansible-playbook -i hosts browbeat/adjustment-keystone-token.yml -e "token_provider=fernet" 7 | # ansible-playbook -i hosts browbeat/adjustment-keystone-token.yml -e "token_provider=uuid" 8 | # 9 | 10 | - hosts: Controller 11 | remote_user: heat-admin 12 | pre_tasks: 13 | - name: Check for variable (token_provider) 14 | fail: msg="token_provider not defined" 15 | when: token_provider is undefined 16 | roles: 17 | - keystone-token 18 | -------------------------------------------------------------------------------- /ansible/install/browbeat_rhoso.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to install Browbeat(Rally) for RHOSO 4 | # 5 | 6 | - hosts: localhost 7 | gather_facts: true 8 | vars: 9 | ansible_user: "{{ browbeat_user }}" 10 | ansible_python_interpreter: "{{ python_interpreter }}" 11 | roles: 12 | - browbeat-rhoso-prep 13 | - browbeat 14 | - { role: browbeat-results, when: browbeat_results_in_httpd|bool} 15 | - firewall 16 | - rally 17 | - { role: flavors, when: browbeat_create_flavors|bool} 18 | - { role: images, when: browbeat_upload_guest_images|bool} 19 | environment: "{{proxy_env}}" 20 | -------------------------------------------------------------------------------- /ansible/install/roles/graphite/files/storage-schemas.conf: -------------------------------------------------------------------------------- 1 | # Schema definitions for Whisper files. Entries are scanned in order, 2 | # and first match wins. This file is scanned for changes every 60 seconds. 3 | # 4 | # [name] 5 | # pattern = regex 6 | # retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ... 7 | # 8 | # Carbon's internal metrics. This entry should match what is specified in 9 | # CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings 10 | [carbon] 11 | pattern = ^carbon\. 12 | retentions = 60:90d 13 | 14 | [default] 15 | pattern = .* 16 | retentions = 10s:7d,60s:90d,1h:180d 17 | 18 | -------------------------------------------------------------------------------- /ansible/install/roles/logstash/files/10-syslog-filter.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [type] == "syslog" { 3 | grok { 4 | match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" } 5 | add_field => [ "received_at", "%{@timestamp}" ] 6 | add_field => [ "received_from", "%{host}" ] 7 | } 8 | syslog_pri { } 9 | date { 10 | match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 11 | } 12 | } 13 | } 14 | 15 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-archive-policy-list.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.archive_policy_list: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-resource-type.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.create_resource_type: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /ansible/oooq/roles/bug-check/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Checks the cloud for known bugs and produces a bug report 3 | # not functional on osp8 or earlier, therefore errors are ignored 4 | 5 | - name: Check Cloud for Bugs 6 | shell: 7 | "cd {{ ansible_env.HOME }}/browbeat/ansible; \ 8 | ansible-playbook -i hosts \ 9 | check/site.yml > {{ ansible_env.HOME }}/browbeat/results/check.log" 10 | register: check_run 11 | ignore_errors: true 12 | until: check_run.rc == 0 13 | retries: 2 14 | delay: 60 15 | environment: 16 | ANSIBLE_SSH_ARGS: "-F {{ ansible_env.HOME }}/browbeat/ansible/ssh-config" 17 | -------------------------------------------------------------------------------- /rally/heat/templates/updated-random-strings-add.yaml.template: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | description: > 4 | Test template for create-update-delete-stack scenario in rally. 5 | The template updates the stack defined by random-strings.yaml.template with additional resource. 6 | 7 | resources: 8 | test_string_one: 9 | type: OS::Heat::RandomString 10 | properties: 11 | length: 20 12 | test_string_two: 13 | type: OS::Heat::RandomString 14 | properties: 15 | length: 20 16 | test_string_three: 17 | type: OS::Heat::RandomString 18 | properties: 19 | length: 20 20 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-archive-policy.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.create_archive_policy: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /ansible/browbeat/odl-open-transactions.yml: -------------------------------------------------------------------------------- 1 | - hosts: Controller 2 | remote_user: "{{host_remote_user}}" 3 | vars_files: 4 | - ../install/group_vars/all.yml 5 | tasks: 6 | - name: Get open transactions 7 | shell: > 8 | sshpass -p {{ karaf_password }} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@localhost "trace:transactions" > /tmp/open-transactions.txt 9 | - name: Copy the open transactions 10 | fetch: 11 | src: /tmp/open-transactions.txt 12 | dest: "{{home_dir}}/open-transactions-{{ansible_hostname}}.txt" 13 | flat: yes 14 | 15 | 16 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-archive-policy-rule-list.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.archive_policy_rule_list: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-archive-policy-rule.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.create_archive_policy_rule: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-delete-archive-policy.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.create_delete_archive_policy: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-delete-resource-type.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.create_delete_resource_type: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /ansible/install/roles/index-ocp-data/tasks/check_oc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if oc is installed 3 | shell: "which oc" 4 | register: oc_location 5 | ignore_errors: true 6 | 7 | - name: Fail if oc is not installed 8 | fail: 9 | msg: "oc is not installed" 10 | when: oc_location.rc != 0 11 | 12 | - name: Check if logged into Kubernetes cluster 13 | shell: "oc version" 14 | register: oc_version 15 | ignore_errors: true 16 | environment: 17 | KUBECONFIG: "{{ kubeconfig_path }}" 18 | 19 | - name: Fail if not logged in 20 | fail: 21 | msg: "Not logged in to Kubernetes cluster" 22 | when: oc_version.rc != 0 23 | -------------------------------------------------------------------------------- /ansible/install/roles/logstash/templates/openssl_extras.cnf.j2: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | prompt = no 5 | 6 | [req_distinguished_name] 7 | C = TG 8 | ST = Togo 9 | L = Lome 10 | O = Private company 11 | CN = * 12 | 13 | [v3_req] 14 | subjectKeyIdentifier = hash 15 | authorityKeyIdentifier = keyid,issuer 16 | basicConstraints = CA:TRUE 17 | subjectAltName = @alt_names 18 | 19 | [alt_names] 20 | DNS.1 = * 21 | DNS.2 = *.* 22 | DNS.3 = *.*.* 23 | DNS.4 = *.*.*.* 24 | DNS.5 = *.*.*.*.* 25 | DNS.6 = *.*.*.*.*.* 26 | DNS.7 = *.*.*.*.*.*.* 27 | IP.1 = {{ ansible_default_ipv4.address }} 28 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-delete-archive-policy-rule.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatGnocchi.create_delete_archive_policy_rule: 6 | - 7 | args: {} 8 | runner: 9 | concurrency: {{concurrency}} 10 | times: {{times}} 11 | type: "constant" 12 | context: {} 13 | sla: 14 | max_avg_duration: {{sla_max_avg_duration}} 15 | max_seconds_per_iteration: {{sla_max_seconds}} 16 | failure_rate: 17 | max: {{sla_max_failure}} 18 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v2.1.0 5 | hooks: 6 | - id: mixed-line-ending 7 | - id: check-byte-order-marker 8 | - id: check-executables-have-shebangs 9 | - id: check-merge-conflict 10 | - id: debug-statements 11 | - repo: https://github.com/ansible/ansible-lint 12 | rev: v25.6.1 13 | hooks: 14 | - id: ansible-lint 15 | always_run: true 16 | pass_filenames: false 17 | entry: ansible-lint --force-color -v 18 | additional_dependencies: 19 | - ansible-core 20 | - yamllint 21 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/cinder-workers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Cinder tasks for Browbeat 4 | # * Can change worker count 5 | # 6 | 7 | - name: Configure cinder.conf 8 | become: true 9 | ini_file: 10 | dest: /etc/cinder/cinder.conf 11 | mode: 0640 12 | section: "{{ item.section }}" 13 | option: "{{ item.option }}" 14 | value: "{{ item.value }}" 15 | backup: yes 16 | with_items: 17 | - { section: DEFAULT, option: osapi_volume_workers, value: "{{ workers }}" } 18 | notify: 19 | - unmanage cinder services 20 | - restart cinder services 21 | - manage cinder services 22 | - cleanup cinder services 23 | -------------------------------------------------------------------------------- /ci-scripts/molecule/test-molecule.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | # Run molecule tests. Any arguments passed to this script will be passed onto 5 | # molecule. 6 | 7 | set -e 8 | 9 | molecules="$(find ansible/install/roles/ -name molecule -type d)" 10 | 11 | failed=0 12 | ran=0 13 | for molecule in $molecules; do 14 | pushd $(dirname $molecule) 15 | if ! molecule test --all $*; then 16 | failed=$((failed + 1)) 17 | fi 18 | ran=$((ran + 1)) 19 | popd 20 | done 21 | 22 | if [[ $failed -ne 0 ]]; then 23 | echo "Failed $failed / $ran molecule tests" 24 | exit 1 25 | fi 26 | 27 | echo "Ran $ran molecule tests successfully" 28 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/apache-config/templates/prefork.conf.j2: -------------------------------------------------------------------------------- 1 | # Deployed by Browbeat 2 | 3 | StartServers {{httpd_startservers}} 4 | MinSpareServers {{httpd_minspareservers}} 5 | MaxSpareServers {{httpd_maxspareservers}} 6 | ServerLimit {{httpd_serverlimit}} 7 | MaxClients {{httpd_maxclients}} 8 | MaxRequestsPerChild {{httpd_maxrequestsperchild}} 9 | 10 | 11 | # Defaults: 12 | # httpd_startservers: 8 13 | # httpd_minspareservers: 5 14 | # httpd_maxspareservers: 20 15 | # httpd_serverlimit: 256 16 | # httpd_maxclients: 256 17 | # httpd_maxrequestsperchild: 4000 18 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/tasks/gen_configs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: generate config files 3 | debug: 4 | msg: "generating config files for {{ node }}" 5 | 6 | - name: include node specific vars 7 | include_vars: "{{ node }}.yml" 8 | 9 | - name: render collectd template with db_conf 10 | template: 11 | src: "controlplane.collectd.conf.j2" 12 | dest: "/tmp/{{ node }}.conf" 13 | vars: 14 | db_conf: true 15 | when: idx == 0 16 | 17 | - name: render collectd template without db_conf 18 | template: 19 | src: "controlplane.collectd.conf.j2" 20 | dest: "/tmp/{{ node }}.conf" 21 | vars: 22 | db_conf: false 23 | when: idx != 0 24 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. browbeat documentation master file, created by 2 | sphinx-quickstart on Tue Jul 9 22:26:36 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Browbeat documentation 7 | ======================================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | introduction 15 | installation 16 | usage 17 | plugins 18 | charts 19 | developing 20 | contributing 21 | 22 | Indices and tables 23 | ================== 24 | 25 | * :ref:`genindex` 26 | * :ref:`modindex` 27 | * :ref:`search` 28 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-status-get.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | {% set detailed = detailed or False %} 5 | --- 6 | BrowbeatGnocchi.status_get: 7 | - 8 | args: 9 | detailed: {{detailed}} 10 | runner: 11 | concurrency: {{concurrency}} 12 | times: {{times}} 13 | type: "constant" 14 | context: {} 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /ansible/install/roles/statsd-install/templates/statsd.service.j2: -------------------------------------------------------------------------------- 1 | # This is a systemd file to make statsd work well 2 | # To make this work: 3 | # * place this file in /etc/systemd/system and run the commands: 4 | # 5 | # Credit for this template goes to the venerable Kambiz 6 | # 7 | # systemctl daemon-reload 8 | # systemctl enable graphite-web 9 | # systemctl start graphite-web 10 | # 11 | [Unit] 12 | Description=statsd 13 | 14 | [Service] 15 | Type=simple 16 | TimeoutStartSec=5m 17 | 18 | ExecStart=statsd /etc/statsd/conf.js 19 | 20 | ExecReload=statsd /etc/statsd/conf.js 21 | 22 | Restart=always 23 | RestartSec=30 24 | 25 | [Install] 26 | WantedBy=default.target 27 | -------------------------------------------------------------------------------- /rally/CeilometerAlarms/list_alarms-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerAlarms.list_alarms: 6 | - 7 | context: 8 | users: 9 | tenants: 2 10 | users_per_tenant: 2 11 | runner: 12 | concurrency: {{concurrency}} 13 | times: {{times}} 14 | type: "constant" 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /rally/barbican/create-and-list-secret.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BarbicanSecrets.create_and_list: 6 | - 7 | runner: 8 | type: "constant" 9 | times: {{times}} 10 | concurrency: {{concurrency}} 11 | context: 12 | users: 13 | tenants: 1 14 | users_per_tenant: 1 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /rally/barbican/create-and-delete-secret.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BarbicanSecrets.create_and_delete: 6 | - 7 | runner: 8 | type: "constant" 9 | times: {{times}} 10 | concurrency: {{concurrency}} 11 | context: 12 | users: 13 | tenants: 1 14 | users_per_tenant: 1 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /ansible/install/stop-collectd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set fact collectd_container 3 | import_playbook: pre-collectd.yml 4 | when: not is_rhoso_deployment 5 | 6 | - name: Run containerized collectd (Stein and greater recommended) 7 | import_playbook: stop-collectd-container.yml 8 | when: hostvars['undercloud']['collectd_container'] and not is_rhoso_deployment 9 | 10 | - name: Run collectd installed through RPMs 11 | import_playbook: stop-collectd-baremetal.yml 12 | when: not hostvars['undercloud']['collectd_container'] and not is_rhoso_deployment 13 | 14 | - name: stop collectd on RHOSO ocp workers 15 | import_playbook: stop-collectd-rhoso.yml 16 | when: is_rhoso_deployment 17 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/nova-db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Nova tasks for Browbeat 4 | # 5 | 6 | - name: Ensure nova.conf is properly configured 7 | ini_file: 8 | dest: /etc/nova/nova.conf 9 | mode: 0640 10 | section: "{{ item.section }}" 11 | option: "{{ item.option }}" 12 | value: "{{ item.value }}" 13 | backup: yes 14 | with_items: 15 | - { section: DEFAULT, option: wsgi_default_pool_size, value: "{{ greenlet_pool_size }}" } 16 | - { section: api_database, option: max_overflow, value: "{{ max_overflow }}" } 17 | notify: 18 | - unmanage nova services 19 | - restart nova services 20 | - manage nova services 21 | - cleanup nova services 22 | -------------------------------------------------------------------------------- /ansible/install/roles/workloads/templates/linpack-user.file: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo echo "nameserver {{ dns_server }}" > /etc/resolv.conf 3 | if [ $? -gt 0 ] 4 | then 5 | exit 1 6 | fi 7 | sudo curl -O {{ linpack_url }} 8 | sudo tar -xvzf {{ linpack_url | basename }} 9 | sudo mkdir /opt/linpack 10 | sudo cp {{ linpack_url | basename | replace(".tgz", "") }}{{ linpack_path}}* /opt/linpack 11 | if [ $? -gt 0 ] 12 | then 13 | exit 1 14 | fi 15 | 16 | # Allow for root access 17 | sudo sed -i 's/disable_root: 1/disable_root: 0/g' /etc/cloud/cloud.cfg 18 | cat /etc/cloud/cloud.cfg | grep disable_root 19 | if [ $? -gt 0 ] 20 | then 21 | exit 1 22 | fi 23 | 24 | echo "Browbeat workload installed" 25 | -------------------------------------------------------------------------------- /rally/barbican/create-and-delete-asymmetric.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BarbicanOrders.create_asymmetric_and_delete: 6 | - 7 | runner: 8 | type: "constant" 9 | times: {{times}} 10 | concurrency: {{concurrency}} 11 | context: 12 | users: 13 | tenants: 1 14 | users_per_tenant: 1 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /ansible/install/start-collectd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set fact collectd_container 3 | import_playbook: pre-collectd.yml 4 | when: not is_rhoso_deployment 5 | 6 | - name: Start containerized collectd (Stein and greater recommended) 7 | import_playbook: start-collectd-container.yml 8 | when: hostvars['undercloud']['collectd_container'] and not is_rhoso_deployment 9 | 10 | - name: Start collectd installed through RPMs 11 | import_playbook: start-collectd-baremetal.yml 12 | when: not hostvars['undercloud']['collectd_container'] and not is_rhoso_deployment 13 | 14 | - name: Start collectd on RHOSO OCP workers 15 | import_playbook: start-collectd-rhoso.yml 16 | when: is_rhoso_deployment 17 | -------------------------------------------------------------------------------- /rally/barbican/create-and-delete-certificate.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BarbicanContainers.create_certificate_and_delete: 6 | - 7 | runner: 8 | type: "constant" 9 | times: {{times}} 10 | concurrency: {{concurrency}} 11 | context: 12 | users: 13 | tenants: 1 14 | users_per_tenant: 1 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-resource.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | {% set resource_type = resource_type or 'generic' %} 5 | --- 6 | BrowbeatGnocchi.create_resource: 7 | - 8 | args: 9 | resource_type: {{resource_type}} 10 | runner: 11 | concurrency: {{concurrency}} 12 | times: {{times}} 13 | type: "constant" 14 | context: {} 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/neutron-l3/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Neutron tasks for Browbeat 4 | # 5 | 6 | - name: Configure min_l3_agents 7 | ini_file: 8 | dest: /etc/neutron/neutron.conf 9 | mode: 0640 10 | section: "{{ item.section }}" 11 | option: "{{ item.option }}" 12 | value: "{{ item.value }}" 13 | backup: yes 14 | with_items: 15 | - { section: DEFAULT, option: max_l3_agents_per_router, value: "{{ max_l3_agents }}" } 16 | - { section: DEFAULT, option: min_l3_agents_per_router, value: "{{ min_l3_agents }}" } 17 | notify: 18 | - unmanage neutron services 19 | - restart neutron services 20 | - manage neutron services 21 | - cleanup neutron services 22 | -------------------------------------------------------------------------------- /ansible/common_logging/roles/filebeat_setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install filebeat RPM 2 | yum: 3 | name: "{{ filebeat_url }}" 4 | state: present 5 | become: true 6 | 7 | - name: include vars 8 | include_vars: 9 | file: vars/{{rhosp_major}}.yml 10 | 11 | - name: Template the filebeat configuration file 12 | template: 13 | src: filebeat.yml.j2 14 | dest: /etc/filebeat/filebeat.yml 15 | owner: root 16 | group: root 17 | mode: 0644 18 | become: true 19 | 20 | - name: Start and enable filebeat 21 | service: 22 | name: filebeat 23 | state: started 24 | enabled: yes 25 | become: true 26 | when: start_filebeat is defined and start_filebeat|bool 27 | 28 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | import setuptools 14 | 15 | setuptools.setup( 16 | setup_requires=['pbr'], 17 | pbr=True) 18 | -------------------------------------------------------------------------------- /rally/CeilometerEvents/create_user_and_get_event-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerEvents.create_user_and_get_event: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | tenants: 2 11 | users_per_tenant: 2 12 | runner: 13 | concurrency: {{concurrency}} 14 | times: {{times}} 15 | type: "constant" 16 | sla: 17 | max_avg_duration: {{sla_max_avg_duration}} 18 | max_seconds_per_iteration: {{sla_max_seconds}} 19 | failure_rate: 20 | max: {{sla_max_failure}} 21 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-delete-resource.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | {% set resource_type = resource_type or 'generic' %} 5 | --- 6 | BrowbeatGnocchi.create_delete_resource: 7 | - 8 | args: 9 | resource_type: {{resource_type}} 10 | runner: 11 | concurrency: {{concurrency}} 12 | times: {{times}} 13 | type: "constant" 14 | context: {} 15 | sla: 16 | max_avg_duration: {{sla_max_avg_duration}} 17 | max_seconds_per_iteration: {{sla_max_seconds}} 18 | failure_rate: 19 | max: {{sla_max_failure}} 20 | -------------------------------------------------------------------------------- /visualization/README.rst: -------------------------------------------------------------------------------- 1 | # OpenStack Browbeat Kibana visualization 2 | ----------------------------------------- 3 | To import these Visualizations, you need Kibana 4.1 or greater. 4 | 5 | ## How to install? 6 | ------------------- 7 | Use the provided ansible playbook in ansible/install in order to install the Kibana Viz. 8 | 9 | ## Assumptions 10 | -------------- 11 | This work assumes you are using : 12 | 13 | - Browbeat to run Rally workloads 14 | - Browbeat Gather scripts for Metadata 15 | 16 | Without the two pieces of information above, your milage may vary. 17 | 18 | Also, this work assumes you are using the default browbeat-rally-YYYY.MM.DD ElasticSearch index. If that is not the case, update the jsons. 19 | -------------------------------------------------------------------------------- /rally/CeilometerEvents/create_user_and_list_events-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerEvents.create_user_and_list_events: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | tenants: 2 11 | users_per_tenant: 2 12 | runner: 13 | concurrency: {{concurrency}} 14 | times: {{times}} 15 | type: "constant" 16 | sla: 17 | max_avg_duration: {{sla_max_avg_duration}} 18 | max_seconds_per_iteration: {{sla_max_seconds}} 19 | failure_rate: 20 | max: {{sla_max_failure}} 21 | -------------------------------------------------------------------------------- /rally/CeilometerTraits/create_user_and_list_traits-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerTraits.create_user_and_list_traits: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | tenants: 2 11 | users_per_tenant: 2 12 | runner: 13 | concurrency: {{concurrency}} 14 | times: {{times}} 15 | type: "constant" 16 | sla: 17 | max_avg_duration: {{sla_max_avg_duration}} 18 | max_seconds_per_iteration: {{sla_max_seconds}} 19 | failure_rate: 20 | max: {{sla_max_failure}} 21 | -------------------------------------------------------------------------------- /ansible/install/roles/kibana-visualization/templates/dashboard.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "title": "{{item.title}}", 3 | "hits": 0, 4 | "description": "", 5 | "panelsJSON": "[{% for panel in item.panels %} {\"id\":\"{{panel.id}}\",\"type\":\"{{panel.type}}\",\"panelIndex\":{{panel.panelIndex}},\"size_x\":{{panel.size_x}},\"size_y\":{{panel.size_y}},\"col\":{{panel.col}},\"row\":{{panel.row}}}{% if not loop.last %},{% endif %}{% endfor %}]", 6 | "optionsJSON": "{\"darkTheme\":{{item.darkTheme}}}", 7 | "uiStateJSON": "{}", 8 | "version": 1, 9 | "timeRestore": false, 10 | "kibanaSavedObjectMeta": { 11 | "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /rally/CeilometerEvents/create_user_and_list_event_types-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerEvents.create_user_and_list_event_types: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | tenants: 2 11 | users_per_tenant: 2 12 | runner: 13 | concurrency: {{concurrency}} 14 | times: {{times}} 15 | type: "constant" 16 | sla: 17 | max_avg_duration: {{sla_max_avg_duration}} 18 | max_seconds_per_iteration: {{sla_max_seconds}} 19 | failure_rate: 20 | max: {{sla_max_failure}} 21 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/neutron-firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Configure the firewall driver 2 | ini_file: 3 | dest: "{{ item.file }}" 4 | mode: 0640 5 | section: "{{ item.section }}" 6 | option: "{{ item.option }}" 7 | value: "{{ item.value }}" 8 | backup: yes 9 | with_items: 10 | - { file: /etc/neutron/plugins/ml2/ml2_conf.ini, section: securitygroup, option: firewall_driver, value: "{{ driver }}" } 11 | - { file: /etc/neutron/plugins/ml2/openvswitch_agent.ini, section: securitygroup, option: firewall_driver, value: "{{ driver }}" } 12 | notify: 13 | - unmanage neutron services 14 | - restart neutron services 15 | - manage neutron services 16 | - cleanup neutron services 17 | 18 | 19 | -------------------------------------------------------------------------------- /rally/CeilometerTraits/create_user_and_list_trait_descriptions-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerTraits.create_user_and_list_trait_descriptions: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | tenants: 2 11 | users_per_tenant: 2 12 | runner: 13 | concurrency: {{concurrency}} 14 | times: {{times}} 15 | type: "constant" 16 | sla: 17 | max_avg_duration: {{sla_max_avg_duration}} 18 | max_seconds_per_iteration: {{sla_max_seconds}} 19 | failure_rate: 20 | max: {{sla_max_failure}} 21 | -------------------------------------------------------------------------------- /rally/rally-plugins/pbench-fio/ansible/pbench_agent_install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install pbench-agent 3 | hosts: servers 4 | remote_user: cloud-user 5 | become: yes 6 | module_defaults: 7 | ansible.builtin.get_url: 8 | validate_certs: false 9 | 10 | # The default value ('production') can be overridden by cenv, a host-specific 11 | # inventory variable. 12 | vars: 13 | pbench_configuration_environment: "{{ cenv | default('production') }}" 14 | 15 | roles: 16 | - pbench_repo_install 17 | - pbench_agent_install 18 | - pbench_agent_config 19 | 20 | tasks: 21 | - name: register pbench across servers 22 | shell: | 23 | source /etc/profile.d/pbench-agent.sh 24 | pbench-register-tool-set 25 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-rhoso/vars/constants.yml: -------------------------------------------------------------------------------- 1 | # only below listed containers' logfiles only captured 2 | # for tail plugin. 3 | # 4 | # key: container name 5 | # value: instance name used by tail plugin to push data in graphite 6 | container_instance_map: 7 | nova-api-log: nova-api 8 | nova-metadata-log: nova-metadata-api 9 | nova-scheduler-scheduler: nova-scheduler 10 | nova-cell1-novncproxy-novncproxy: nova-novncproxy 11 | cinder-api-log: cinder-api 12 | cinder-scheduler: cinder-scheduler 13 | glance-api: glance-api 14 | keystone-api: keystone 15 | ovsdbserver-nb: ovsdb-server-nb 16 | ovsdbserver-sb: ovsdb-server-sb 17 | neutron-api: neutron-server 18 | ovn-controller: ovn-controller 19 | ovn-northd: ovn-northd 20 | -------------------------------------------------------------------------------- /ansible/install/roles/rally/files/create_lock_table.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | from browbeat_rally.db import schema 13 | 14 | 15 | if __name__ == "__main__": 16 | schema.schema_create() 17 | -------------------------------------------------------------------------------- /ansible/browbeat/install-at.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to install and enable atd 4 | # 5 | # Versions tested: Newton, Ocata, Pike 6 | # 7 | # This allows you to syncohize a script/command across multiple machines. 8 | # Example: Synconhized restarting of ceilometer polling across computes 9 | # and controllers. 10 | # 11 | 12 | - hosts: overcloud 13 | remote_user: "{{ host_remote_user }}" 14 | gather_facts: false 15 | vars_files: 16 | - ../install/group_vars/all.yml 17 | roles: 18 | - repo 19 | tasks: 20 | - name: Install at 21 | package: 22 | name: at 23 | become: true 24 | 25 | - name: Start atd 26 | service: 27 | name: atd 28 | enabled: true 29 | state: restarted 30 | become: true 31 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-dashboards/templates/partials/ovs_flows.yaml: -------------------------------------------------------------------------------- 1 | - title: OVS Flows 2 | collapse: true 3 | height: 200px 4 | showTitle: true 5 | panels: 6 | - title: OVS Flows 7 | type: graph 8 | legend: 9 | alignAsTable: true 10 | avg: true 11 | current: true 12 | max: true 13 | min: true 14 | rightSide: true 15 | show: true 16 | total: false 17 | values: true 18 | nullPointMode: 'null' 19 | targets: 20 | - target: alias($Cloud.$Node.ovs-flows.gauge-ovs_flows, 'br-int flows') 21 | yaxes: 22 | - format: short 23 | - format: short 24 | -------------------------------------------------------------------------------- /ocp_on_osp/tasks/create_flavors.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import flavors data 3 | include_vars: 4 | dir: "{{ playbook_dir }}/vars" 5 | files_matching: flavors.yaml 6 | - name: create flavors 7 | os_nova_flavor: 8 | cloud: "overcloud" 9 | state: present 10 | name: "{{ item.name }}" 11 | ram: "{{ item.ram }}" 12 | vcpus: "{{ item.vcpus }}" 13 | disk: "{{ item.disk }}" 14 | with_items: "{{ flavors }}" 15 | 16 | - name: create pci flavors 17 | os_nova_flavor: 18 | cloud: "overcloud" 19 | state: present 20 | name: "{{ item.name }}" 21 | ram: "{{ item.ram }}" 22 | vcpus: "{{ item.vcpus }}" 23 | disk: "{{ item.disk }}" 24 | extra_specs: "'pci_passthrough:alias'='nvme:1'" 25 | with_items: "{{ nvme_flavors }}" 26 | -------------------------------------------------------------------------------- /rally/heat/templates/updated-random-strings-replace.yaml.template: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | description: > 4 | Test template for create-update-delete-stack scenario in rally. 5 | The template deletes one resource from the stack defined by 6 | random-strings.yaml.template and re-creates it with the updated parameters 7 | (so-called update-replace). That happens because some parameters cannot be 8 | changed without resource re-creation. The template allows to measure performance 9 | of update-replace operation. 10 | 11 | resources: 12 | test_string_one: 13 | type: OS::Heat::RandomString 14 | properties: 15 | length: 20 16 | test_string_two: 17 | type: OS::Heat::RandomString 18 | properties: 19 | length: 40 20 | -------------------------------------------------------------------------------- /ansible/oooq/roles/collectd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #role to install CollectD on all nodes 3 | 4 | - name: Install CollectD 5 | shell: 6 | "cd {{ ansible_env.HOME }}/browbeat/ansible; \ 7 | ansible-playbook -i hosts \ 8 | --extra-vars collectd_compute=true \ 9 | --extra-vars graphite_host={{ graphite_host_template }} \ 10 | --extra-vars graphite_prefix={{ graphite_prefix_template }} \ 11 | --extra-vars dns_server={{ dns_server }} \ 12 | install/collectd.yml \ 13 | > {{ ansible_env.HOME }}/browbeat/results/collecd_install.log" 14 | register: collectd_install 15 | until: collectd_install.rc == 0 16 | retries: 2 17 | delay: 60 18 | environment: 19 | ANSIBLE_SSH_ARGS: "-F {{ ansible_env.HOME }}/browbeat/ansible/ssh-config" 20 | -------------------------------------------------------------------------------- /ansible/install/roles/rsyslog-templates/defaults/main.yml: -------------------------------------------------------------------------------- 1 | openstack_services: 2 | - ceilometer 3 | - cinder 4 | - cluster 5 | - congress 6 | - glance 7 | - gnocchi 8 | - heat 9 | - horizon 10 | - httpd 11 | - ironic 12 | - ironic-inspector 13 | - keystone 14 | - mariadb 15 | - mongodb 16 | - mysql 17 | - neutron 18 | - nova 19 | - openvswitch 20 | - ovs 21 | - rabbitmq 22 | - rabbitmq 23 | - redis 24 | - swift 25 | - zaqar 26 | rsyslog_elasticsearch_server: "" 27 | rsyslog_elasticsearch_port: "9200" 28 | rsyslog_aggregator_server: "" 29 | rsyslog_aggregator_port: "7894" 30 | rsyslog_cloud_name: "{{graphite_prefix}}" 31 | disk_backed_rsyslog: false 32 | rsyslog_forwarding: true 33 | rsyslog_aggregator: false 34 | -------------------------------------------------------------------------------- /rally/authenticate/validate_octavia-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_octavia: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | tenants: 1 13 | users_per_tenant: 8 14 | runner: 15 | concurrency: {{concurrency}} 16 | times: {{times}} 17 | type: "constant" 18 | sla: 19 | max_avg_duration: {{sla_max_avg_duration}} 20 | max_seconds_per_iteration: {{sla_max_seconds}} 21 | failure_rate: 22 | max: {{sla_max_failure}} 23 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # to avoid dependency conflicts, try to use ranges, eventually banning a 2 | # buggy minor version (!=) or capping (<) once you have proof it breaks. 3 | ansible>=2.4.1 4 | elasticsearch==7.13 5 | numpy<2.0.0 6 | pyrsistent==0.16.0;python_version<'3' 7 | pyrsistent>=0.17.0;python_version>='3' 8 | grafyaml>=0.0.7 9 | openstacksdk 10 | python-dateutil>=2.4.2 11 | python-openstackclient==3.11.0;python_version<'3' 12 | python-openstackclient>=3.11.0;python_version>='3' 13 | pykwalify==1.7.0;python_version<'3' 14 | pykwalify>=1.8.0;python_version>='3' 15 | gspread==3.7.0;python_version<'3' 16 | gspread>=4.0.0;python_version>='3' 17 | oauth2client==1.4.1;python_version<'3' 18 | oauth2client;python_version>='3' 19 | gspread_formatting 20 | pandas 21 | gspread_dataframe 22 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/Action.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat Action", 3 | "visState": "{\"title\":\"Concurrency\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"action\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/curator/tasks/main.yml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | # 4 | # install curator tool for managing elasticsearch 5 | # 6 | 7 | - name: Copy curator yum repo file 8 | copy: 9 | src=curator.repo 10 | dest=/etc/yum.repos.d/curator.repo 11 | owner=root 12 | group=root 13 | mode=0644 14 | become: true 15 | when: install_curator_tool 16 | 17 | - name: Import curator GPG Key 18 | rpm_key: key=http://packages.elastic.co/GPG-KEY-elasticsearch 19 | state=present 20 | when: install_curator_tool 21 | 22 | - name: Install curator and python-setuptools 23 | package: 24 | name: "{{ item }}" 25 | state: present 26 | become: true 27 | with_items: 28 | - python-elasticsearch-curator 29 | - python-setuptools 30 | when: install_curator_tool 31 | -------------------------------------------------------------------------------- /rally/manila/create-share-network-and-delete.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | ManilaShares.create_share_network_and_delete: 6 | - 7 | runner: 8 | type: "constant" 9 | times: {{ times }} 10 | concurrency: {{ concurrency }} 11 | context: 12 | quotas: 13 | manila: 14 | share_networks: -1 15 | users: 16 | tenants: 2 17 | users_per_tenant: 1 18 | sla: 19 | max_avg_duration: {{ sla_max_avg_duration }} 20 | max_seconds_per_iteration: {{ sla_max_seconds }} 21 | failure_rate: 22 | max: {{ sla_max_failure }} 23 | -------------------------------------------------------------------------------- /utils/cleanup_rally_resources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set +e 3 | 4 | source ~/overcloudrc 5 | for i in `openstack server list --all -c ID -f value`; do openstack server delete $i; done 6 | for i in `openstack floating ip list -c ID -f value`; do openstack floating ip delete $i; done 7 | for i in `openstack router list -c ID -f value`; do openstack router unset --external-gateway $i; done 8 | for router in `openstack router list -c ID -f value`; do 9 | subnet=`openstack router show $router -c interfaces_info -f json | jq -r '.interfaces_info[0].subnet_id'` 10 | openstack router remove subnet $router $subnet 11 | done 12 | for i in `openstack router list -c ID -f value`; do openstack router delete $i; done 13 | for i in `openstack network list -c ID -f value`; do openstack network delete $i; done 14 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-dashboards/templates/partials/gnocchi_backlog.yaml.j2: -------------------------------------------------------------------------------- 1 | - title: Gnocchi Backlog 2 | collapse: {{partial_panel.collapse}} 3 | height: 200px 4 | showTitle: true 5 | panels: 6 | - title: $Node Metrics/Measures Backlog 7 | type: graph 8 | fill: 0 9 | legend: 10 | alignAsTable: true 11 | avg: true 12 | current: true 13 | max: true 14 | min: true 15 | rightSide: true 16 | show: true 17 | total: false 18 | values: true 19 | nullPointMode: 'connected' 20 | steppedLine: true 21 | targets: 22 | - target: aliasByMetric(aliasSub($Cloud.$Node.gnocchi_status.*, 'gauge-', '')) 23 | -------------------------------------------------------------------------------- /ansible/logs/roles/openvswitch/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check that openvswitch is installed 3 | stat: 4 | path: /etc/openvswitch/default.conf 5 | register: openvswitch_config 6 | 7 | - name: Set openvswitch log location (non-containerized) 8 | set_fact: 9 | openvswitch_logs: /var/log/openvswitch 10 | when: openvswitch_config.stat.exists 11 | 12 | - name: Check if log folder exists 13 | stat: 14 | path: "{{openvswitch_logs}}" 15 | register: logs_path 16 | 17 | - name: Copy logs to directory on host 18 | synchronize: 19 | src: "{{openvswitch_logs}}" 20 | dest: /home/{{host_remote_user}}/{{ansible_hostname}} 21 | delegate_to: "{{ inventory_hostname }}" 22 | when: openvswitch_config.stat.exists and logs_path.stat.isdir is defined and logs_path.stat.isdir 23 | 24 | -------------------------------------------------------------------------------- /rally/authenticate/keystone-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | Authenticate.keystone: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /ansible/common_logging/install_logging.yml: -------------------------------------------------------------------------------- 1 | - hosts: Undercloud 2 | remote_user: "{{ local_remote_user }}" 3 | vars_files: 4 | - ../install/group_vars/all.yml 5 | vars: 6 | config_type: undercloud 7 | roles: 8 | - { role: osp_version } 9 | - { role: filebeat_setup } 10 | 11 | - hosts: Controller 12 | remote_user: "{{ host_remote_user }}" 13 | vars_files: 14 | - ../install/group_vars/all.yml 15 | vars: 16 | config_type: controller 17 | roles: 18 | - { role: osp_version } 19 | - { role: filebeat_setup } 20 | 21 | - hosts: Compute 22 | remote_user: "{{ host_remote_user }}" 23 | vars_files: 24 | - ../install/group_vars/all.yml 25 | vars: 26 | config_type: compute 27 | roles: 28 | - { role: osp_version } 29 | - { role: filebeat_setup } 30 | 31 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-UUID.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-UUID", 3 | "visState": "{\"title\":\"New Visualization\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"browbeat_uuid\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/rally/files/browbeat-rally/setup.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | from setuptools import setup, find_packages 14 | 15 | setup( 16 | name='browbeat_rally', 17 | version='0.0.1', 18 | packages=find_packages(),) 19 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_tenant-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_tenant: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_user-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_user: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/get_entities-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.get_entities: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/ResultsPerCloudName.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat Results per cloudname", 3 | "visState": "{\"title\":\"New Visualization\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"cloud_name\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/VersionDataTable.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat Version data table", 3 | "visState": "{\"title\":\"version data table\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"version.osp_series\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd/templates/00-browbeat_mod_status.conf.j2: -------------------------------------------------------------------------------- 1 | # Installed by Browbeat Ansible Installer 2 | 3 | LoadModule status_module modules/mod_status.so 4 | 5 | {% if 'Undercloud' in group_names %} 6 | Listen {{apache_undercloud_mod_status_port}} 7 | {% endif %} 8 | {% if 'Controller' in group_names %} 9 | Listen {{apache_controller_mod_status_port}} 10 | {% endif %} 11 | 12 | ExtendedStatus on 13 | {% if 'Undercloud' in group_names %} 14 | 15 | {% endif %} 16 | {% if 'Controller' in group_names %} 17 | 18 | {% endif %} 19 | 20 | SetHandler server-status 21 | Order deny,allow 22 | Deny from all 23 | Allow from 127.0.0.1 24 | 25 | 26 | -------------------------------------------------------------------------------- /ansible/install/roles/statsd-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install StatsD 3 | package: 4 | name: statsd 5 | state: present 6 | become: True 7 | 8 | - name: Create StatsD configuration folder 9 | file: 10 | path: /etc/statsd 11 | state: directory 12 | become: True 13 | 14 | - name: Template configuration 15 | template: 16 | src: statsd_config.js.j2 17 | dest: /etc/statsd/config.js 18 | become: True 19 | 20 | - name: Template StatsD service file 21 | template: 22 | src: statsd.service.j2 23 | dest: /etc/systemd/system/statsd.service.j2 24 | owner: root 25 | group: root 26 | mode: 0644 27 | become: True 28 | 29 | - name: bounce systemd and setup StatsD to run on startup 30 | systemd: 31 | name: statsd 32 | enabled: yes 33 | state: restarted 34 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_delete_user-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_delete_user: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Neutron/visualization/NeutronResults.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Neutron results: Top 10 results", 3 | "visState": "{\"title\":\"Neutron errors: Top 10 Errors\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"action\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"_type: result\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/rally/templates/rally.conf.j2: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | # If set to true, the logging level will be set to DEBUG instead of 3 | # the default INFO level. (boolean value) 4 | # Note: This option can be changed without restarting. 5 | #debug = false 6 | 7 | [benchmark] 8 | 9 | # Server boot timeout (floating point value) 10 | #nova_server_boot_timeout = 300.0 11 | 12 | [cleanup] 13 | # Number of cleanup threads to run (integer value) 14 | #cleanup_threads = 20 15 | 16 | [database] 17 | # The SQLAlchemy connection string to use to connect to the database. 18 | # (string value) 19 | # Deprecated group/name - [DEFAULT]/sql_connection 20 | # Deprecated group/name - [DATABASE]/sql_connection 21 | # Deprecated group/name - [sql]/connection 22 | #connection = 23 | connection=sqlite:////tmp/{{browbeat_user}}.sqlite 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_get_role-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_get_role: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Cloud.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Cloud", 3 | "visState": "{\"aggs\":[{\"id\":\"1\",\"params\":{},\"schema\":\"metric\",\"type\":\"count\"},{\"id\":\"2\",\"params\":{\"field\":\"cloud_name\",\"order\":\"desc\",\"orderBy\":\"1\",\"size\":5},\"schema\":\"bucket\",\"type\":\"terms\"}],\"listeners\":{},\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"title\":\"New Visualization\",\"type\":\"table\"}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},\"filter\":[]}" 9 | } 10 | } 11 | 12 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-UUID.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-UUID", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Scenario\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"shaker_uuid\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/browbeat/adjustment-apache.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to adjust Apache prefork settings 4 | # 5 | # Versions tested: Newton, Ocata 6 | # 7 | # Example: 8 | # 9 | # ansible-playbook -i hosts browbeat/adjustment-httpd.yml -e 'httpd_startservers=8 httpd_minspareservers=5 httpd_maxspareservers=20 httpd_serverlimit=256 httpd_maxclients=256 httpd_maxrequestsperchild=4000' 10 | # 11 | 12 | - hosts: Controller 13 | remote_user: "{{ host_remote_user }}" 14 | gather_facts: false 15 | vars_files: 16 | - ../install/group_vars/all.yml 17 | vars: 18 | # Defaults per Ocata (OSP11) 19 | httpd_startservers: 8 20 | httpd_minspareservers: 5 21 | httpd_maxspareservers: 20 22 | httpd_serverlimit: 256 23 | httpd_maxclients: 256 24 | httpd_maxrequestsperchild: 4000 25 | roles: 26 | - apache-config 27 | -------------------------------------------------------------------------------- /ansible/install/browbeat_rhosp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to install Browbeat (Rally + Shaker) on undercloud 4 | # 5 | 6 | - hosts: Browbeat 7 | remote_user: "{{ browbeat_user }}" 8 | roles: 9 | - common 10 | - osp_version 11 | - browbeat 12 | - stockpile 13 | - { role: browbeat-results, when: browbeat_results_in_httpd|bool} 14 | - firewall 15 | - rally 16 | - shaker 17 | - { role: flavors, when: browbeat_create_flavors|bool} 18 | - { role: images, when: browbeat_upload_guest_images|bool} 19 | - { role: workloads, when: install_browbeat_workloads|bool} 20 | - { role: e2e-benchmarking, when: install_e2e_benchmarking|bool} 21 | environment: "{{proxy_env}}" 22 | 23 | - hosts: Controller*, Compute* 24 | remote_user: "{{ host_remote_user }}" 25 | roles: 26 | - no-sshd-dns 27 | -------------------------------------------------------------------------------- /ansible/install/roles/collectd-openstack/templates/00-browbeat_mod_status.conf.j2: -------------------------------------------------------------------------------- 1 | # Installed by Browbeat Ansible Installer 2 | 3 | LoadModule status_module modules/mod_status.so 4 | 5 | {% if 'Undercloud' in group_names %} 6 | Listen {{apache_undercloud_mod_status_port}} 7 | {% endif %} 8 | {% if 'Controller' in group_names %} 9 | Listen {{apache_controller_mod_status_port}} 10 | {% endif %} 11 | 12 | ExtendedStatus on 13 | {% if 'Undercloud' in group_names %} 14 | 15 | {% endif %} 16 | {% if 'Controller' in group_names %} 17 | 18 | {% endif %} 19 | 20 | SetHandler server-status 21 | Order deny,allow 22 | Deny from all 23 | Allow from 127.0.0.1 24 | 25 | 26 | -------------------------------------------------------------------------------- /ansible/install_e2e_benchmarking.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ansible_dir=`pwd` 4 | cd gather/e2e-benchmarking/workloads/kube-burner 5 | 6 | create_operator_deploy_script() { 7 | cat > deploy_operator.sh <<- EOM 8 | #!/usr/bin/bash -e 9 | 10 | set -e 11 | 12 | . common.sh 13 | 14 | deploy_operator 15 | exit 0 16 | EOM 17 | } 18 | 19 | remove_unnecessary_calls_from_scripts() { 20 | find . -type f -name '*fromgit.sh' | xargs sed -i -e 's/deploy_operator//g' 21 | find . -type f -name '*fromgit.sh' | xargs sed -i -e 's/check_running_benchmarks//g' 22 | find . -type f -name '*fromgit.sh' | xargs sed -i -e 's/rm -rf benchmark-operator//g' 23 | } 24 | 25 | create_operator_deploy_script 26 | sudo chmod 775 deploy_operator.sh 27 | ./deploy_operator.sh 28 | remove_unnecessary_calls_from_scripts 29 | cd $ansible_dir 30 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_delete_role-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_delete_role: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_list_tenants-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_list_tenants: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_list_users-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_list_users: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/rally-plugins/neutron/securitygroup_port.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | BrowbeatPlugin.securitygroup_port: 6 | - 7 | args: 8 | network_create_args: {} 9 | runner: 10 | concurrency: {{concurrency}} 11 | times: {{times}} 12 | type: "constant" 13 | context: 14 | users: 15 | tenants: 1 16 | users_per_tenant: 8 17 | quotas: 18 | neutron: 19 | network: -1 20 | port: -1 21 | security_group: -1 22 | sla: 23 | max_avg_duration: {{sla_max_avg_duration}} 24 | max_seconds_per_iteration: {{sla_max_seconds}} 25 | failure_rate: 26 | max: {{sla_max_failure}} 27 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Test.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Test", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Executor\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"record.test\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/ErrorCountPerUUID.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat Error count per browbeat-uuid", 3 | "visState": "{\"title\":\"New Visualization\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"browbeat_uuid\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"_type:error\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-dashboards/templates/partials/neutron_resources.yaml: -------------------------------------------------------------------------------- 1 | - title: Neutron Resources 2 | collapse: true 3 | height: 200px 4 | showTitle: true 5 | panels: 6 | - title: $Cloud - Neutron $Node Resources 7 | type: graph 8 | legend: 9 | avg: false 10 | current: false 11 | max: false 12 | min: false 13 | show: true 14 | total: false 15 | values: false 16 | nullPointMode: 'null' 17 | targets: 18 | - target: "$Cloud.$Node.ovsagent_monitoring.gauge-qdhcp_ns_total-count" 19 | - target: "$Cloud.$Node.ovsagent_monitoring.gauge-qrouter_ns_total-count" 20 | - target: "$Cloud.$Node.ovsagent_monitoring.gauge-tap_interface_total-count" 21 | -------------------------------------------------------------------------------- /rally/keystonebasic/add_and_remove_user_role-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.add_and_remove_user_role: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_delete_service-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_delete_service: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_list_services-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_list_services: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/ResultPerUUID.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Result count per browbeat-uuid", 3 | "visState": "{\"title\":\"result count per browbeat-uuid\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"browbeat_uuid\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"_type:result\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/flavors/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Var data for browbeat install. 4 | # 5 | 6 | browbeat_flavors: 7 | - name: m1.xtiny 8 | cpu: 1 9 | memory: 64 10 | disk: 1 11 | - name: m1.tiny-cirros 12 | cpu: 1 13 | memory: 128 14 | disk: 1 15 | - name: m1.tiny-centos 16 | cpu: 1 17 | memory: 224 18 | disk: 8 19 | - name: m1.tiny-centos84 20 | cpu: 1 21 | memory: 285 22 | disk: 12 23 | - name: m1.tiny 24 | cpu: 1 25 | memory: 512 26 | disk: 1 27 | - name: m1.small 28 | cpu: 1 29 | memory: 2048 30 | disk: 20 31 | - name: m1.medium 32 | cpu: 2 33 | memory: 4096 34 | disk: 40 35 | - name: m1.large 36 | cpu: 4 37 | memory: 8192 38 | disk: 80 39 | - name: m1.xlarge 40 | cpu: 8 41 | memory: 16384 42 | disk: 160 43 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_user_update_password-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_user_update_password: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Executor.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Executor", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Density\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"record.executor\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Scenario.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Scenario", 3 | "visState": "{\"title\":\"Browbeat-Shaker-UUID\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"browbeat_scenario\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Neutron/visualization/NeutronErrors.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Neutron errors: Top 10 Errors", 3 | "visState": "{\"title\":\"Neutron errors: Top 10 Errors\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"error_type\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"_type: error AND action: neutron*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/grafana-dashboards/templates/iostat.yaml.j2: -------------------------------------------------------------------------------- 1 | #jinja2:lstrip_blocks: True 2 | --- 3 | dashboard: 4 | title: IOStat 5 | templating: 6 | - name: Cloud 7 | query: "*" 8 | refresh: true 9 | type: query 10 | # - name: NodeType 11 | # options: 12 | # - undercloud 13 | # - controller 14 | # - compute 15 | # - cephstorage 16 | # - objectstorage 17 | # - blockstorage 18 | # - "*" 19 | # type: custom 20 | - name: Node 21 | query: "$Cloud.*" 22 | refresh: true 23 | type: query 24 | time: 25 | from: now-1h 26 | to: now 27 | rows: 28 | {% include 'partials/description.yaml' %} 29 | 30 | {% set partial_panel = {'collapse': 'false'} %} 31 | {% include 'partials/iostat.yaml.j2' %} 32 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_add_and_list_user_roles-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_add_and_list_user_roles: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_list_ec2credentials-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_list_ec2credentials: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Result.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Scenario", 3 | "visState": "{\"title\":\"Browbeat-Shaker-UUID\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"browbeat_scenario\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | 12 | -------------------------------------------------------------------------------- /visualization/OpenStack-Workers/dashboard/OpenStack-Worker.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "OpenStack - Worker Dashboard", 3 | "hits": 0, 4 | "description": "", 5 | "panelsJSON": "[{\"col\":1,\"id\":\"NovaAPI\",\"row\":1,\"size_x\":12,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"NeutronAPI\",\"row\":2,\"size_x\":12,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"NeutronTimes\",\"row\":3,\"size_x\":12,\"size_y\":5,\"type\":\"visualization\"},{\"col\":1,\"id\":\"KeystoneAPI\",\"row\":4,\"size_x\":12,\"size_y\":5,\"type\":\"visualization\"}]", 6 | "optionsJSON": "{\"darkTheme\":true}", 7 | "version": 1, 8 | "timeRestore": false, 9 | "kibanaSavedObjectMeta": { 10 | "searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_delete_ec2credentials-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_delete_ec2credential: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_update_and_delete_tenant-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_update_and_delete_tenant: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Density.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Density", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Density\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"accommodation.density\",\"size\":50,\"order\":\"asc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | 12 | -------------------------------------------------------------------------------- /browbeat-containers/collectd-rhoso/files/ovn_monitoring.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | HOSTNAME="${COLLECTD_HOSTNAME:-`hostname -f`}" 3 | INTERVAL="${COLLECTD_INTERVAL:-15}" 4 | 5 | if [ "$1" = "sb" ]; then 6 | IP=$OVN_SBDB_IP 7 | PORT=$OVN_SBDB_PORT 8 | DB="ovnsb" 9 | else 10 | IP=$OVN_NBDB_IP 11 | PORT=$OVN_NBDB_PORT 12 | DB="ovnnb" 13 | fi 14 | 15 | PRIVATE_KEY="/etc/pki/$DB/tls/private/ovndb.key" 16 | CERTIFICATE="/etc/pki/$DB/tls/certs/ovndb.crt" 17 | CA_CERT="/etc/pki/$DB/tls/certs/ovndbca.crt" 18 | 19 | while sleep "$INTERVAL"; do 20 | VALUE=$(sudo ovsdb-client dump --no-headings ssl:$IP:$PORT \ 21 | --private-key=$PRIVATE_KEY \ 22 | --certificate=$CERTIFICATE \ 23 | --ca-cert=$CA_CERT \ 24 | $2 | wc -l) 25 | VALUE=$[VALUE-1] 26 | echo "PUTVAL \"$HOSTNAME/ovn-$1db-$2/gauge-ovn_$1db_$2\" interval=$INTERVAL N:$VALUE" 27 | done 28 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Compute.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Compute", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Distribution\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"accommodation.compute_nodes\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Concurrency.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Concurrency", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Concurrency\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"record.concurrency\",\"size\":50,\"order\":\"asc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Placement.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Placement", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Placement\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"accommodation.placement\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/shaker_build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to build shaker-image 4 | # 5 | 6 | - hosts: Browbeat 7 | remote_user: "{{ browbeat_user }}" 8 | tasks: 9 | - name: Check external connectivity 10 | command: ping -c 1 google.com 11 | register: ping 12 | ignore_errors: true 13 | 14 | - name: Fail if no external connectivity 15 | fail: 16 | msg: "There should be external connectivity to build shaker image" 17 | when: ping.rc != 0 18 | 19 | - name: Build shaker image 20 | shell: > 21 | source {{ overcloudrc }}; source {{ shaker_venv }}/bin/activate; 22 | shaker-image-builder --image-builder-mode dib --image-builder-distro 23 | {{shaker_image}} --os-region-name {{ shaker_region }} 24 | register: image_result 25 | failed_when: image_result.rc != 0 26 | -------------------------------------------------------------------------------- /rally/keystonebasic/authenticate_user_and_validate_token-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.authenticate_user_and_validate_token: 6 | - 7 | args: {} 8 | context: 9 | users: 10 | project_domain: "default" 11 | resource_management_workers: 30 12 | tenants: 1 13 | user_domain: "default" 14 | users_per_tenant: 8 15 | runner: 16 | concurrency: {{concurrency}} 17 | times: {{times}} 18 | type: "constant" 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-metric.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | {% set metric_name = metric_name or '' %} 5 | {% set archive_policy_name = archive_policy_name or '' %} 6 | {% set unit = unit or '' %} 7 | --- 8 | BrowbeatGnocchi.create_metric: 9 | - 10 | args: 11 | metric_name: {{metric_name}} 12 | archive_policy_name: {{archive_policy_name}} 13 | unit: {{unit}} 14 | runner: 15 | concurrency: {{concurrency}} 16 | times: {{times}} 17 | type: "constant" 18 | context: {} 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /visualization/Network-Performance/visualization/Browbeat-Shaker-Distribution.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat-Shaker-Distribution", 3 | "visState": "{\"title\":\"Browbeat-Shaker-Placement\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showMeticsAtAllLevels\":false,\"showPartialRows\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"accommodation.distribution\",\"size\":50,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-shaker-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"shaker\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /ansible/install/roles/epel/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Tasks install EPEL packages 4 | # 5 | 6 | - name: Remove old EPEL 7 | package: 8 | name: epel-release 9 | state: absent 10 | become: true 11 | 12 | # The fedoraproject CDN has problems sometimes, this will keep trying 13 | # for up to 10 minutes before failing. 14 | - name: Import EPEL GPG Key 15 | rpm_key: 16 | state: present 17 | key: "{{ epel_rpmkey }}" 18 | become: true 19 | register: import_result 20 | until: import_result is success 21 | retries: 10 22 | delay: 10 23 | 24 | # Same as above but with the Centos CDN 25 | - name: Check for EPEL repo 26 | package: 27 | name: "{{ epel_rpm }}" 28 | state: present 29 | become: true 30 | register: install_result 31 | until: install_result is success 32 | retries: 10 33 | delay: 10 34 | notify: remove_epel 35 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-create-delete-metric.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | {% set metric_name = metric_name or '' %} 5 | {% set archive_policy_name = archive_policy_name or '' %} 6 | {% set unit = unit or '' %} 7 | --- 8 | BrowbeatGnocchi.create_delete_metric: 9 | - 10 | args: 11 | metric_name: {{metric_name}} 12 | archive_policy_name: {{archive_policy_name}} 13 | unit: {{unit}} 14 | runner: 15 | concurrency: {{concurrency}} 16 | times: {{times}} 17 | type: "constant" 18 | context: {} 19 | sla: 20 | max_avg_duration: {{sla_max_avg_duration}} 21 | max_seconds_per_iteration: {{sla_max_seconds}} 22 | failure_rate: 23 | max: {{sla_max_failure}} 24 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/nova-workers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Nova tasks for Browbeat 4 | # * Can change worker count 5 | # 6 | 7 | - name: Ensure nova.conf is properly configured 8 | become: true 9 | ini_file: 10 | dest: /etc/nova/nova.conf 11 | mode: 0640 12 | section: "{{ item.section }}" 13 | option: "{{ item.option }}" 14 | value: "{{ item.value }}" 15 | backup: yes 16 | with_items: 17 | - { section: DEFAULT, option: ec2_workers, value: "{{ workers }}" } 18 | - { section: DEFAULT, option: osapi_compute_workers, value: "{{ workers }}" } 19 | - { section: DEFAULT, option: metadata_workers, value: "{{ workers }}" } 20 | - { section: conductor, option: workers, value: "{{ workers }}" } 21 | notify: 22 | - unmanage nova services 23 | - restart nova services 24 | - manage nova services 25 | - cleanup nova services 26 | -------------------------------------------------------------------------------- /rally/manila/create-share-network-and-list.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | ManilaShares.create_share_network_and_list: 6 | - 7 | args: 8 | detailed: True 9 | search_opts: 10 | name: "rally" 11 | runner: 12 | type: "constant" 13 | times: {{ times }} 14 | concurrency: {{ concurrency }} 15 | context: 16 | quotas: 17 | manila: 18 | share_networks: -1 19 | users: 20 | tenants: 2 21 | users_per_tenant: 1 22 | sla: 23 | max_avg_duration: {{ sla_max_avg_duration }} 24 | max_seconds_per_iteration: {{ sla_max_seconds }} 25 | failure_rate: 26 | max: {{ sla_max_failure }} 27 | -------------------------------------------------------------------------------- /rally/CeilometerResource/get_tenant_resources-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerResource.get_tenant_resources: 6 | - 7 | args: {} 8 | context: 9 | ceilometer: 10 | counter_name: "cpu_util" 11 | counter_type: "gauge" 12 | counter_volume: 1.0 13 | counter_unit: "instance" 14 | users: 15 | tenants: 2 16 | users_per_tenant: 2 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/glance/list-images-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | GlanceImages.list_images: 6 | - 7 | runner: 8 | type: "constant" 9 | times: {{times}} 10 | concurrency: {{concurrency}} 11 | context: 12 | users: 13 | tenants: 1 14 | users_per_tenant: 1 15 | images: 16 | image_url: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" 17 | image_type: "qcow2" 18 | image_container: "bare" 19 | images_per_tenant: 4 20 | sla: 21 | max_avg_duration: {{sla_max_avg_duration}} 22 | max_seconds_per_iteration: {{sla_max_seconds}} 23 | failure_rate: 24 | max: {{sla_max_failure}} 25 | 26 | -------------------------------------------------------------------------------- /ocp_on_osp/vars/flavors.yaml: -------------------------------------------------------------------------------- 1 | flavors: 2 | - { name: 'm4.xlarge', ram: 16384, vcpus: 4, disk: 40 } 3 | - { name: 'm1.small', ram: 1740, vcpus: 1, disk: 71 } 4 | - { name: 'm5.xlarge', ram: 16384, vcpus: 4, disk: 96 } 5 | - { name: 'm5.4xlarge', ram: 65280, vcpus: 16, disk: 300 } 6 | - { name: 'm5.2xlarge', ram: 31488, vcpus: 8, disk: 128 } 7 | - { name: 'm5.large', ram: 7936, vcpus: 2, disk: 96 } 8 | - { name: 'ci.master', ram: 124672, vcpus: 16, disk: 220 } 9 | - { name: 'ci.worker', ram: 31488, vcpus: 8, disk: 100 } 10 | - { name: 'ci.infra', ram: 124672, vcpus: 24, disk: 100 } 11 | - { name: 'ci.workload', ram: 65280, vcpus: 16, disk: 300 } 12 | nvme_flavors: 13 | - { name: 'r5.4xlarge-pci', ram: 124672, vcpus: 16, disk: 128 } 14 | - { name: 'm5.10xlarge-pci', ram: 163584, vcpus: 40, disk: 256 } 15 | - { name: 'm5.4xlarge-pci', ram: 65280, vcpus: 16, disk: 200 } 16 | -------------------------------------------------------------------------------- /rally/CeilometerStats/create_meter_and_get_stats-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | CeilometerStats.create_meter_and_get_stats: 6 | - 7 | args: 8 | user_id: "user-id" 9 | resource_id: "resource-id" 10 | counter_volume: 1.0 11 | counter_unit: "" 12 | counter_type: "cumulative" 13 | context: 14 | users: 15 | tenants: 2 16 | users_per_tenant: 2 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-metric-aggregation.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | {% set all_metrics = all_metrics or false %} 5 | {% set aggregation = aggregation or 'mean' %} 6 | {% set refresh = refresh or false %} 7 | --- 8 | BrowbeatGnocchi.metric_aggregation: 9 | - 10 | args: 11 | aggregation: {{aggregation}} 12 | refresh: {{refresh}} 13 | runner: 14 | concurrency: {{concurrency}} 15 | times: {{times}} 16 | type: "constant" 17 | context: 18 | browbeat_gnocchi_metric_list: 19 | all: {{all_metrics}} 20 | sla: 21 | max_avg_duration: {{sla_max_avg_duration}} 22 | max_seconds_per_iteration: {{sla_max_seconds}} 23 | failure_rate: 24 | max: {{sla_max_failure}} 25 | -------------------------------------------------------------------------------- /ansible/install/roles/rsyslog-templates/templates/05-outputs.conf.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True 2 | #### OUTPUTS #### 3 | {% if not rsyslog_forwarding or rsyslog_aggregator %} 4 | action( 5 | name="send-es-prod" 6 | type="omelasticsearch" 7 | server="{{rsyslog_elasticsearch_server}}" 8 | serverport="{{rsyslog_elasticsearch_port}}" 9 | template="com-redhat-rsyslog-hier" 10 | searchIndex="logstash-index-pattern" 11 | dynSearchIndex="on" 12 | searchType="rsyslog" 13 | bulkmode="on" 14 | queue.type="linkedlist" 15 | queue.size="5000" 16 | queue.dequeuebatchsize="600" 17 | action.resumeretrycount="-1") 18 | {% endif %} 19 | {% if rsyslog_forwarding and not rsyslog_aggregator %} 20 | action(type="omfwd" Target="{{rsyslog_aggregator_server}}" Port="{{rsyslog_aggregator_port}}" Protocol="tcp" Template="ViaQ_SyslogProtocol23Format") 21 | {% endif %} 22 | -------------------------------------------------------------------------------- /rally/authenticate/validate_heat-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_heat: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/authenticate/validate_nova-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_nova: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_add_list_roles-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_list_roles: 6 | - 7 | args: 8 | create_role_kwargs: {} 9 | list_role_kwargs: {} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/rally-plugins/gnocchi/gnocchi-metric-get-measures.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | {% set all_metrics = all_metrics or false %} 5 | {% set aggregation = aggregation or 'mean' %} 6 | {% set refresh = refresh or false %} 7 | --- 8 | BrowbeatGnocchi.metric_get_measures: 9 | - 10 | args: 11 | aggregation: {{aggregation}} 12 | refresh: {{refresh}} 13 | runner: 14 | concurrency: {{concurrency}} 15 | times: {{times}} 16 | type: "constant" 17 | context: 18 | browbeat_gnocchi_metric_list: 19 | all: {{all_metrics}} 20 | sla: 21 | max_avg_duration: {{sla_max_avg_duration}} 22 | max_seconds_per_iteration: {{sla_max_seconds}} 23 | failure_rate: 24 | max: {{sla_max_failure}} 25 | -------------------------------------------------------------------------------- /rally/swift/create-container-and-object-then-delete-all.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | SwiftObjects.create_container_and_object_then_delete_all: 6 | - 7 | args: 8 | objects_per_container: {{ objects_per_container }} 9 | object_size: {{ object_size }} 10 | runner: 11 | type: "constant" 12 | times: {{ times }} 13 | concurrency: {{ concurrency }} 14 | context: 15 | users: 16 | tenants: 1 17 | users_per_tenant: 1 18 | roles: 19 | - "admin" 20 | sla: 21 | max_avg_duration: {{ sla_max_avg_duration }} 22 | max_seconds_per_iteration: {{ sla_max_seconds }} 23 | failure_rate: 24 | max: {{ sla_max_failure }} 25 | -------------------------------------------------------------------------------- /rally/authenticate/validate_cinder-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_cinder: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/authenticate/validate_glance-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_glance: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/authenticate/validate_monasca-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_monasca: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/authenticate/validate_neutron-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_neutron: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/swift/create-container-and-object-then-list-objects.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | SwiftObjects.create_container_and_object_then_list_objects: 6 | - 7 | args: 8 | objects_per_container: {{ objects_per_container }} 9 | object_size: {{ object_size }} 10 | runner: 11 | type: "constant" 12 | times: {{ times }} 13 | concurrency: {{ concurrency }} 14 | context: 15 | users: 16 | tenants: 1 17 | users_per_tenant: 1 18 | roles: 19 | - "admin" 20 | sla: 21 | max_avg_duration: {{ sla_max_avg_duration }} 22 | max_seconds_per_iteration: {{ sla_max_seconds }} 23 | failure_rate: 24 | max: {{ sla_max_failure }} 25 | -------------------------------------------------------------------------------- /rally/authenticate/validate_ceilometer-cc.yml: -------------------------------------------------------------------------------- 1 | {% set repetitions = repetitions or 2 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | Authenticate.validate_ceilometer: 7 | - 8 | args: 9 | repetitions: {{repetitions}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/cinder/cinder-create-and-list-volume.yml: -------------------------------------------------------------------------------- 1 | {% set size = size or 1 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | CinderVolumes.create_and_list_volume: 7 | - 8 | args: 9 | size: {{ size }} 10 | detailed: True 11 | runner: 12 | type: "constant" 13 | times: {{ times }} 14 | concurrency: {{ concurrency }} 15 | context: 16 | users: 17 | tenants: 10 18 | users_per_tenant: 20 19 | quotas: 20 | cinder: 21 | gigabytes: -1 22 | volumes: -1 23 | sla: 24 | max_avg_duration: {{ sla_max_avg_duration }} 25 | max_seconds_per_iteration: {{ sla_max_seconds }} 26 | failure_rate: 27 | max: {{ sla_max_failure }} 28 | -------------------------------------------------------------------------------- /ansible/install/roles/rsyslog-templates/templates/01-modules.conf.j2: -------------------------------------------------------------------------------- 1 | #### MODULES #### 2 | 3 | # Emit internal rsyslog counters 4 | module(load="impstats" format="cee" interval="60") 5 | 6 | {% if not rsyslog_aggregator %} 7 | module(load="imfile") 8 | {% endif %} 9 | 10 | # Read from systemd's journal 11 | module(load="imjournal" StateFile="imjournal.state" UsePidFromSystem="on" RateLimit.Burst="500000" RateLimit.Interval="1" IgnorePreviousMessages="on" PersistStateInterval="1000") 12 | 13 | {% if not rsyslog_forwarding or rsyslog_aggregator %} 14 | # ElasticSearch output module 15 | module(load="omelasticsearch") 16 | 17 | # Provides TCP syslog reception 18 | module(load="imptcp") 19 | input(type="imptcp" port="{{rsyslog_aggregator_port}}") 20 | {% endif %} 21 | 22 | # Parsing CEE JSON messages 23 | module(load="mmjsonparse") 24 | 25 | # Ensures we have UTF-8 encoded payloads 26 | module(load="mmutf8fix") 27 | -------------------------------------------------------------------------------- /ansible/browbeat/adjustment-haproxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Playbook to bump the number of max "defaults" (vs global) connections through haproxy 4 | # 5 | # Versions tested: Newton, Ocata 6 | # 7 | # Examples: 8 | # 9 | # ansible-playbook -i hosts browbeat/adjustment-haproxy.yml -e 'old_maxconn=4096 new_maxconn=8192' 10 | # 11 | 12 | - hosts: Controller 13 | remote_user: "{{ host_remote_user }}" 14 | gather_facts: false 15 | vars_files: 16 | - ../install/group_vars/all.yml 17 | vars: 18 | old_maxconn: 4096 19 | new_maxconn: 8192 20 | tasks: 21 | - name: Adjusting haproxy maxconn 22 | become: true 23 | replace: 24 | dest: /etc/haproxy/haproxy.cfg 25 | regexp: " maxconn {{old_maxconn}}" 26 | replace: " maxconn {{new_maxconn}}" 27 | backup: true 28 | 29 | - name: Reload haproxy 30 | systemd: 31 | name: haproxy 32 | state: reloaded 33 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_and_update_user-cc.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | KeystoneBasic.create_and_update_user: 6 | - 7 | args: 8 | create_user_kwargs: {} 9 | update_user_kwargs: 10 | enabled: false 11 | context: 12 | users: 13 | project_domain: "default" 14 | resource_management_workers: 30 15 | tenants: 1 16 | user_domain: "default" 17 | users_per_tenant: 8 18 | runner: 19 | concurrency: {{concurrency}} 20 | times: {{times}} 21 | type: "constant" 22 | sla: 23 | max_avg_duration: {{sla_max_avg_duration}} 24 | max_seconds_per_iteration: {{sla_max_seconds}} 25 | failure_rate: 26 | max: {{sla_max_failure}} 27 | -------------------------------------------------------------------------------- /rally/octavia/octavia-create-list-loadbalancers.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_failure = sla_max_failure or 0 %} 2 | --- 3 | Octavia.create_and_list_loadbalancers: 4 | - 5 | args: {} 6 | runner: 7 | type: "constant" 8 | times: {{ times }} 9 | concurrency: {{ concurrency }} 10 | context: 11 | users: 12 | tenants: 2 13 | users_per_tenant: 2 14 | roles: 15 | - load-balancer_member 16 | network: 17 | start_cidr: "10.0.0.0/16" 18 | networks_per_tenant: 1 19 | quotas: 20 | neutron: 21 | network: -1 22 | port: -1 23 | router: -1 24 | subnet: -1 25 | floatingip: -1 26 | nova: 27 | instances: -1 28 | cores: -1 29 | ram: -1 30 | sla: 31 | failure_rate: 32 | max: {{ sla_max_failure }} 33 | -------------------------------------------------------------------------------- /rally/octavia/octavia-create-show-loadbalancers.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_failure = sla_max_failure or 0 %} 2 | --- 3 | Octavia.create_and_show_loadbalancers: 4 | - 5 | args: {} 6 | runner: 7 | type: "constant" 8 | times: {{ times }} 9 | concurrency: {{ concurrency }} 10 | context: 11 | users: 12 | tenants: 2 13 | users_per_tenant: 2 14 | roles: 15 | - load-balancer_member 16 | network: 17 | start_cidr: "10.0.0.0/16" 18 | networks_per_tenant: 1 19 | quotas: 20 | neutron: 21 | network: -1 22 | port: -1 23 | router: -1 24 | subnet: -1 25 | floatingip: -1 26 | nova: 27 | instances: -1 28 | cores: -1 29 | ram: -1 30 | sla: 31 | failure_rate: 32 | max: {{ sla_max_failure }} 33 | -------------------------------------------------------------------------------- /ansible/browbeat/roles/cinder-workers/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Cinder handlers for browbeat adjustment 4 | # 5 | 6 | - name: unmanage cinder services 7 | become: true 8 | command: pcs resource unmanage {{ item }} 9 | with_items: 10 | - openstack-cinder-api 11 | ignore_errors: true 12 | when: pacemaker_controlled 13 | 14 | - name: restart cinder services 15 | become: true 16 | service: name={{ item }} state=restarted 17 | with_items: 18 | - openstack-cinder-api 19 | 20 | - name: manage cinder services 21 | become: true 22 | command: pcs resource manage {{ item }} 23 | with_items: 24 | - openstack-cinder-api 25 | ignore_errors: true 26 | when: pacemaker_controlled 27 | 28 | - name: cleanup cinder services 29 | become: true 30 | command: pcs resource cleanup {{ item }} 31 | with_items: 32 | - openstack-cinder-api 33 | ignore_errors: true 34 | when: pacemaker_controlled 35 | -------------------------------------------------------------------------------- /ansible/oooq/roles/grafana-dashboard-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Sets up Grafana dashboards for the system data. this must be run after 3 | # the overcloud setup because it checks the hosts file to determine what 4 | # hosts exist to be dasboarded 5 | 6 | - name: Setup Grafana Dashboards 7 | shell: 8 | "cd {{ ansible_env.HOME }}/browbeat/ansible; \ 9 | ansible-playbook -vvv -i hosts \ 10 | --extra-vars grafana_host={{ grafana_host_template }} \ 11 | --extra-vars grafana_username={{ grafana_username_template }} \ 12 | --extra-vars grafana_password={{ grafana_password_template }} \ 13 | --extra-vars dashboard_cloud_name={{ graphite_prefix_template }} \ 14 | install/grafana-dashboards.yml > {{ ansible_env.HOME }}/browbeat/results/dashboards.log" 15 | environment: 16 | ANSIBLE_SSH_ARGS: "-F {{ ansible_env.HOME }}/browbeat/ansible/ssh-config" 17 | when: "{{ grafana_enabled_template }}" 18 | -------------------------------------------------------------------------------- /rally/octavia/octavia-create-delete-loadbalancers.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_failure = sla_max_failure or 0 %} 2 | --- 3 | Octavia.create_and_delete_loadbalancers: 4 | - 5 | args: {} 6 | runner: 7 | type: "constant" 8 | times: {{ times }} 9 | concurrency: {{ concurrency }} 10 | context: 11 | users: 12 | tenants: 2 13 | users_per_tenant: 2 14 | roles: 15 | - load-balancer_member 16 | network: 17 | start_cidr: "10.0.0.0/16" 18 | networks_per_tenant: 1 19 | quotas: 20 | neutron: 21 | network: -1 22 | port: -1 23 | router: -1 24 | subnet: -1 25 | floatingip: -1 26 | nova: 27 | instances: -1 28 | cores: -1 29 | ram: -1 30 | sla: 31 | failure_rate: 32 | max: {{ sla_max_failure }} 33 | -------------------------------------------------------------------------------- /rally/octavia/octavia-create-update-loadbalancers.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_failure = sla_max_failure or 0 %} 2 | --- 3 | Octavia.create_and_update_loadbalancers: 4 | - 5 | args: {} 6 | runner: 7 | type: "constant" 8 | times: {{ times }} 9 | concurrency: {{ concurrency }} 10 | context: 11 | users: 12 | tenants: 2 13 | users_per_tenant: 2 14 | roles: 15 | - load-balancer_member 16 | network: 17 | start_cidr: "10.0.0.0/16" 18 | networks_per_tenant: 1 19 | quotas: 20 | neutron: 21 | network: -1 22 | port: -1 23 | router: -1 24 | subnet: -1 25 | floatingip: -1 26 | nova: 27 | instances: -1 28 | cores: -1 29 | ram: -1 30 | sla: 31 | failure_rate: 32 | max: {{ sla_max_failure }} 33 | -------------------------------------------------------------------------------- /visualization/Neutron/visualization/NeutronNumberOfAPIWorkers.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Neutron Results: Number of api workers", 3 | "visState": "{\"title\":\"Neutron Results: Number of api workers\",\"type\":\"pie\",\"params\":{\"shareYAxis\":true,\"addTooltip\":true,\"addLegend\":true,\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"software-metadata.software_details.openstack.config.neutron.openstack_neutron_api_workers\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"_type: result\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/Times.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat Times", 3 | "visState": "{\"title\":\"Times\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"scenario\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"4\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"rally_setup.kw.runner.times\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /rally/octavia/octavia-create-stats-show-loadbalancers.yml: -------------------------------------------------------------------------------- 1 | {% set sla_max_failure = sla_max_failure or 0 %} 2 | --- 3 | Octavia.create_and_stats_loadbalancers: 4 | - 5 | args: {} 6 | runner: 7 | type: "constant" 8 | times: {{ times }} 9 | concurrency: {{ concurrency }} 10 | context: 11 | users: 12 | tenants: 2 13 | users_per_tenant: 2 14 | roles: 15 | - load-balancer_member 16 | network: 17 | start_cidr: "10.0.0.0/16" 18 | networks_per_tenant: 1 19 | quotas: 20 | neutron: 21 | network: -1 22 | port: -1 23 | router: -1 24 | subnet: -1 25 | floatingip: -1 26 | nova: 27 | instances: -1 28 | cores: -1 29 | ram: -1 30 | sla: 31 | failure_rate: 32 | max: {{ sla_max_failure }} 33 | -------------------------------------------------------------------------------- /ansible/logs/roles/httpd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check that httpd is installed 3 | stat: 4 | path: /etc/httpd/conf/httpd.conf 5 | register: httpd_config 6 | 7 | - name: Set httpd log location (containerized) 8 | set_fact: 9 | httpd_logs: /var/log/containers/httpd 10 | when: osp_version >= 12 and httpd_config.stat.exists 11 | 12 | - name: Set httpd log location (non-containerized) 13 | set_fact: 14 | httpd_logs: /var/log/httpd 15 | when: osp_version < 12 and httpd_config.stat.exists 16 | 17 | - name: Check if log folder exists 18 | stat: 19 | path: "{{httpd_logs}}" 20 | register: logs_path 21 | 22 | - name: Copy logs to directory on host 23 | synchronize: 24 | src: "{{httpd_logs}}" 25 | dest: /home/{{host_remote_user}}/{{ansible_hostname}} 26 | delegate_to: "{{ inventory_hostname }}" 27 | when: httpd_config.stat.exists and logs_path.stat.isdir is defined and logs_path.stat.isdir 28 | 29 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_tenant_with_users-cc.yml: -------------------------------------------------------------------------------- 1 | {% set users_per_tenant = users_per_tenant or 5 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | KeystoneBasic.create_tenant_with_users: 7 | - 8 | args: 9 | users_per_tenant: {{users_per_tenant}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/keystonebasic/create_user_set_enabled_and_delete-cc.yml: -------------------------------------------------------------------------------- 1 | {% set enabled_flag = enabled_flag or true %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | KeystoneBasic.create_user_set_enabled_and_delete: 7 | - 8 | args: 9 | enabled: {{enabled_flag}} 10 | context: 11 | users: 12 | project_domain: "default" 13 | resource_management_workers: 30 14 | tenants: 1 15 | user_domain: "default" 16 | users_per_tenant: 8 17 | runner: 18 | concurrency: {{concurrency}} 19 | times: {{times}} 20 | type: "constant" 21 | sla: 22 | max_avg_duration: {{sla_max_avg_duration}} 23 | max_seconds_per_iteration: {{sla_max_seconds}} 24 | failure_rate: 25 | max: {{sla_max_failure}} 26 | -------------------------------------------------------------------------------- /rally/manila/create-share-and-extend.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | ManilaShares.create_and_extend_share: 6 | - 7 | args: 8 | share_type: {{share_type}} 9 | share_proto: "{{ share_proto }}" 10 | size: 1 11 | new_size: 2 12 | runner: 13 | type: "constant" 14 | times: {{ times }} 15 | concurrency: {{ concurrency }} 16 | context: 17 | quotas: 18 | manila: 19 | shares: -1 20 | gigabytes: -1 21 | users: 22 | tenants: 2 23 | users_per_tenant: 1 24 | sla: 25 | max_avg_duration: {{ sla_max_avg_duration }} 26 | max_seconds_per_iteration: {{ sla_max_seconds }} 27 | failure_rate: 28 | max: {{ sla_max_failure }} 29 | -------------------------------------------------------------------------------- /rally/manila/create-share-and-shrink.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | ManilaShares.create_and_shrink_share: 6 | - 7 | args: 8 | share_type: {{share_type}} 9 | share_proto: "{{ share_proto }}" 10 | size: 2 11 | new_size: 1 12 | runner: 13 | type: "constant" 14 | times: {{ times }} 15 | concurrency: {{ concurrency }} 16 | context: 17 | quotas: 18 | manila: 19 | shares: -1 20 | gigabytes: -1 21 | users: 22 | tenants: 2 23 | users_per_tenant: 1 24 | sla: 25 | max_avg_duration: {{ sla_max_avg_duration }} 26 | max_seconds_per_iteration: {{ sla_max_seconds }} 27 | failure_rate: 28 | max: {{ sla_max_failure }} 29 | -------------------------------------------------------------------------------- /rally/swift/list-objects-in-containers.yaml: -------------------------------------------------------------------------------- 1 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 2 | {% set sla_max_failure = sla_max_failure or 0 %} 3 | {% set sla_max_seconds = sla_max_seconds or 60 %} 4 | --- 5 | SwiftObjects.list_objects_in_containers: 6 | - 7 | runner: 8 | type: "constant" 9 | times: {{ times }} 10 | concurrency: {{ concurrency }} 11 | context: 12 | users: 13 | tenants: 1 14 | users_per_tenant: 1 15 | roles: 16 | - "admin" 17 | swift_objects: 18 | containers_per_tenant: {{ containers_per_tenant }} 19 | objects_per_container: {{ objects_per_container }} 20 | object_size: {{ object_size }} 21 | sla: 22 | max_avg_duration: {{ sla_max_avg_duration }} 23 | max_seconds_per_iteration: {{ sla_max_seconds }} 24 | failure_rate: 25 | max: {{ sla_max_failure }} 26 | -------------------------------------------------------------------------------- /ansible/oooq/roles/collectd-undercloud/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Template undercloud only hosts file 2 | template: 3 | "src=hosts.j2 \ 4 | dest={{ ansible_env.HOME }}/browbeat/ansible/hosts" 5 | 6 | - name: Template ssh-config 7 | template: 8 | "src=ssh-config.j2 \ 9 | dest={{ ansible_env.HOME }}/browbeat/ansible/ssh-config" 10 | 11 | - name: Install CollectD 12 | shell: 13 | "cd {{ ansible_env.HOME }}/browbeat/ansible; \ 14 | ansible-playbook -i hosts -c local \ 15 | --extra-vars graphite_host={{ graphite_host_template }} \ 16 | --extra-vars graphite_prefix={{ graphite_prefix_template }} \ 17 | install/collectd.yml \ 18 | > {{ ansible_env.HOME }}/browbeat/results/collecd_install.log" 19 | register: collectd_install 20 | until: collectd_install.rc == 0 21 | retries: 2 22 | delay: 60 23 | environment: 24 | ANSIBLE_SSH_ARGS: "-F {{ ansible_env.HOME }}/browbeat/ansible/ssh-config" 25 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/Concurrency.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat Concurrency", 3 | "visState": "{\"title\":\"Times\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"scenario\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"4\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"rally_setup.kw.runner.concurrency\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /visualization/Performance-Dashboard/visualization/RallyScenario.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Browbeat Rally Scenario", 3 | "visState": "{\"title\":\"Times\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"id\":\"1\",\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"scenario\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"4\",\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"rally_setup.kw.runner.times\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}", 4 | "uiStateJSON": "{}", 5 | "description": "", 6 | "version": 1, 7 | "kibanaSavedObjectMeta": { 8 | "searchSourceJSON": "{\"index\":\"[browbeat-rally-]YYYY.MM.DD\",\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}},\"filter\":[]}" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /browbeat/schema/rally.yml: -------------------------------------------------------------------------------- 1 | # This schema defines how a Rally workload is formated 2 | name: Rally workload schema 3 | type: map 4 | mapping: 5 | # Required items to be a Rally workload 6 | concurrency: 7 | type: seq 8 | required: True 9 | sequence: 10 | - type: int 11 | enabled: 12 | type: bool 13 | required: True 14 | name: 15 | type: str 16 | required: True 17 | times: 18 | type: int 19 | required: True 20 | type: 21 | type: str 22 | required: True 23 | enum: ["rally"] 24 | rally_deployment: 25 | type: str 26 | required: False 27 | scenarios: 28 | type: seq 29 | sequence: 30 | - type: map 31 | allowempty: True 32 | mapping: 33 | enabled: 34 | type: bool 35 | required: True 36 | file: 37 | type: str 38 | required: True 39 | name: 40 | type: str 41 | required: True 42 | -------------------------------------------------------------------------------- /ansible/install/roles/rally/files/browbeat-rally/browbeat_rally/db/models.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | import sqlalchemy as sa 13 | 14 | from rally.common.db import models 15 | 16 | 17 | BASE = models.BASE 18 | 19 | 20 | class RallyLock(BASE, models.RallyBase): 21 | __tablename__ = "rallylocks" 22 | 23 | lock_uuid = sa.Column(sa.String(36), primary_key=True, nullable=False) 24 | -------------------------------------------------------------------------------- /rally/rally-plugins/neutron/router_subnet_create_delete.yml: -------------------------------------------------------------------------------- 1 | {% set num_networks = num_networks or 1 %} 2 | {% set sla_max_avg_duration = sla_max_avg_duration or 60 %} 3 | {% set sla_max_failure = sla_max_failure or 0 %} 4 | {% set sla_max_seconds = sla_max_seconds or 60 %} 5 | --- 6 | BrowbeatPlugin.router_subnet_create_delete: 7 | - 8 | args: 9 | network_create_args: {} 10 | num_networks: {{num_networks}} 11 | runner: 12 | concurrency: {{concurrency}} 13 | times: {{times}} 14 | type: "constant" 15 | context: 16 | users: 17 | tenants: 1 18 | users_per_tenant: 8 19 | quotas: 20 | neutron: 21 | network: -1 22 | port: -1 23 | router: -1 24 | subnet: -1 25 | sla: 26 | max_avg_duration: {{sla_max_avg_duration}} 27 | max_seconds_per_iteration: {{sla_max_seconds}} 28 | failure_rate: 29 | max: {{sla_max_failure}} 30 | --------------------------------------------------------------------------------