├── .gitattributes ├── .gitignore ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── README.md ├── Vagrantfile ├── ansible.cfg ├── bin ├── install-osx ├── install-ubuntu └── shells ├── bootstrap.sh ├── branch-validation.yml ├── coding.md ├── doc ├── dev-test.md ├── endpoints.md ├── ironic.md ├── novadocker.md ├── todo.md └── vagrant.md ├── envs ├── example │ ├── allinone-centos │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ └── vagrant.yml │ ├── allinone-rhel │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ └── vagrant.yml │ ├── allinone │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ └── vagrant.yml │ ├── ci-ceph-redhat │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ ├── ceph_osds.yml │ │ │ ├── ceph_osds_ssd.yml │ │ │ └── compute.yml │ │ ├── heat_stack.yml │ │ └── hosts │ ├── ci-ceph-swift-rhel │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ ├── ceph_osds_ssd.yml │ │ │ └── compute.yml │ │ ├── heat_stack.yml │ │ └── hosts │ ├── ci-ceph-swift-ubuntu │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ ├── ceph_osds_ssd.yml │ │ │ └── compute.yml │ │ ├── heat_stack.yml │ │ └── hosts │ ├── ci-ceph-ubuntu │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ ├── ceph_osds_ssd.yml │ │ │ └── compute.yml │ │ ├── heat_stack.yml │ │ └── hosts │ ├── ci-full-centos │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ └── compute.yml │ │ ├── heat_stack.yml │ │ └── hosts │ ├── ci-full-rhel │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ ├── ceph_osds.yml │ │ │ └── compute.yml │ │ ├── heat_stack.yml │ │ └── hosts │ ├── ci-full-ubuntu │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ └── compute.yml │ │ ├── heat_stack.yml │ │ └── hosts │ ├── ci │ │ ├── group_vars │ │ │ ├── all.yml │ │ │ └── compute.yml │ │ └── hosts │ ├── common │ │ └── securitygroup.yaml │ ├── defaults-2.0.yml │ ├── ironic │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ └── vagrant.yml │ ├── jeofd │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ └── vagrant.yml │ ├── mirrors │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ └── vagrant.yml │ ├── novadocker │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── host_vars │ │ │ └── compute1.yml │ │ ├── hosts │ │ └── playbooks │ ├── standard │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ └── vagrant.yml │ ├── swift-rhel │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── playbooks │ │ │ ├── host_vars │ │ │ │ ├── allinone │ │ │ │ ├── compute1 │ │ │ │ ├── controller1 │ │ │ │ └── controller2 │ │ │ ├── predeploy.yml │ │ │ └── vars │ │ │ │ └── main.yml │ │ ├── ring_definition.yml │ │ └── vagrant.yml │ ├── swift │ │ ├── group_vars │ │ │ └── all.yml │ │ ├── hosts │ │ ├── ring_definition.yml │ │ └── vagrant.yml │ └── vagrant.yml └── vagrant │ └── cinder │ ├── group_vars │ └── all.yml │ ├── hosts │ └── playbooks ├── library ├── apache2_site ├── ceph_bcache.py ├── ceph_pool.py ├── cinder_volume_group.py ├── cinder_volume_type.py ├── iptables.py ├── keystone_federation_mapping.py ├── keystone_federation_protocol.py ├── keystone_identity_provider.py ├── keystone_service.py ├── keystone_service_provider.py ├── keystone_user.py ├── logrotate.py ├── lvol.py ├── modprobe.py ├── mongodb_replication.py ├── neutron_network.py ├── neutron_router.py ├── neutron_router_gateway.py ├── neutron_router_interface.py ├── os_floating_ip.py ├── os_network.py ├── os_nova_host_agg.py ├── os_nova_host_agg_host.py ├── ovs_bridge.py ├── rabbitmq_policy.py ├── rabbitmq_user.py ├── sefcontext.py ├── sensu_check.py ├── sensu_check_dict.py ├── sensu_metrics_check.py ├── sensu_process_check.py ├── service.py ├── swift_disk.py ├── swift_ring.py ├── systemd_service.py └── upstart_service.py ├── playbooks ├── adjust-cinder-backend.yml ├── check_compute_services.yml ├── ci-allinone │ ├── tasks │ │ ├── create.yml │ │ ├── delete.yml │ │ ├── keypair.yml │ │ └── pre-deploy.yml │ ├── templates │ │ └── etc │ │ │ └── network │ │ │ ├── interfaces │ │ │ └── interfaces.d │ │ │ ├── br-ex.cfg │ │ │ ├── eth0-controllers.cfg │ │ │ └── eth0.cfg │ └── vars │ │ └── main.yml ├── ci-ceph-redhat │ └── tasks │ │ └── pre-deploy.yml ├── ci-ceph-swift-rhel │ ├── tasks │ │ └── pre-deploy.yml │ └── templates │ │ └── usr │ │ └── sbin │ │ └── lshw ├── ci-ceph-swift-ubuntu │ ├── tasks │ │ └── pre-deploy.yml │ └── templates │ │ ├── etc │ │ └── network │ │ │ ├── interfaces │ │ │ └── interfaces.d │ │ │ ├── br-ex.cfg │ │ │ ├── eth0-controllers.cfg │ │ │ └── eth0.cfg │ │ └── usr │ │ └── bin │ │ └── lshw ├── ci-full-centos │ └── tasks │ │ └── pre-deploy.yml ├── ci-full-rhel │ └── tasks │ │ └── pre-deploy.yml ├── ci-full-ubuntu │ ├── tasks │ │ └── pre-deploy.yml │ └── templates │ │ └── etc │ │ └── network │ │ ├── interfaces │ │ └── interfaces.d │ │ ├── br-ex.cfg │ │ ├── eth0-controllers.cfg │ │ └── eth0.cfg ├── cinder-upgrade.yml ├── db-backup.yml ├── deploy-nimble-cinder.yml ├── dist-upgrade.yml ├── fix-mysql-gcache.yml ├── fix-nova-user.yml ├── install_deb_package.yml ├── logging.yml ├── neutron-l3-noha-to-ha.yml ├── neutron-ml2-vlan-to-vxlan-migration.yml ├── neutron-ml2-vxlan-kernel-prerequisite.yml ├── neutron-openvswitch-to-ml2-vlan-migration.yml ├── neutron-openvswitch-to-ml2-vlan-migration │ └── files │ │ └── migrate_to_ml2.py ├── reboot_cluster.yml ├── remove_controller_services │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── services.yml ├── restart_sensu_client.yml ├── rhn_unsubscribe.yml ├── run-openstack-ansible-security.yml ├── security-patches.yml ├── ssl-cert.yml ├── tests │ └── tasks │ │ ├── all.yml │ │ ├── aodh.yml │ │ ├── barbican.yml │ │ ├── ceilometer.yml │ │ ├── ceph.yml │ │ ├── cleanup.yml │ │ ├── controller.yml │ │ ├── integration.yml │ │ ├── ironic.yml │ │ ├── lb_stack.yml │ │ ├── lbaas.yml │ │ ├── main.yml │ │ ├── network.yml │ │ ├── rally.yml │ │ ├── rally │ │ ├── bbc-cloud-validate-ceph.yml │ │ ├── bbc-cloud-validate.yml │ │ └── run.sh │ │ ├── swift.yml │ │ └── workaround_network.yml ├── unit.yml ├── update-provider-admin-password.yml ├── upgrade_precise_for_docker.yml └── vagrant │ ├── host_vars │ ├── allinone │ ├── compute1 │ ├── controller1 │ └── controller2 │ ├── predeploy.yml │ └── vars │ └── main.yml ├── plugins ├── callbacks │ └── timestamp.py ├── connection │ └── sshbb.py ├── filters │ ├── sensu_filters.py │ └── ursula_filters.py ├── strategy │ ├── bb_free.py │ └── bb_linear.py └── vars │ └── default_vars.py ├── requirements.txt ├── roles ├── aodh │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ └── aodh │ │ ├── aodh.conf │ │ ├── api_paste.ini │ │ └── policy.json ├── apache │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ └── main.yml │ └── templates │ │ ├── etc │ │ └── httpd │ │ │ └── conf.modules.d │ │ │ └── 43-mod_proxy_uwsgi.conf │ │ └── httpd.conf ├── apt-repos ├── audit │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ ├── audisp │ │ └── plugins.d │ │ │ └── syslog.conf │ │ └── audit │ │ └── rules.d │ │ └── audit_default.rules ├── barbican │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ ├── barbican │ │ ├── api_audit_map.conf │ │ ├── barbican-api-paste.ini │ │ ├── barbican.conf │ │ └── policy.json │ │ ├── init │ │ └── barbican.conf │ │ └── uwsgi │ │ └── barbican-vassals-api.ini ├── branch-validation │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── api.yml │ │ ├── check_instances_connectivity.yml │ │ ├── ha.yml │ │ ├── main.yml │ │ ├── node_interconnectivity.yml │ │ └── node_internet_connectivity.yml ├── ceilometer-common │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── ceilometer │ │ ├── api_paste.ini │ │ ├── ceilometer.conf │ │ ├── ceilometer_api_audit_map.conf │ │ ├── event_definitions.yaml │ │ ├── event_pipeline.yaml │ │ ├── pipeline.yaml │ │ └── policy.json ├── ceilometer-control │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ └── monitoring.yml ├── ceilometer-data │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ └── monitoring.yml ├── ceph-client │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── ceph-compute │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── ceph │ │ └── secret.xml ├── ceph-config │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── ceph │ │ └── ceph.conf ├── ceph-defaults │ └── defaults │ │ └── main.yml ├── ceph-monitor │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── monitoring.yml │ │ └── restart_flags.yml │ └── templates │ │ └── etc │ │ └── bbg-ceph-utils │ │ └── ceph-utils.conf ├── ceph-osd │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── activate_fullssd.yml │ │ ├── bcache.yml │ │ ├── main.yml │ │ ├── pool_names.yml │ │ ├── standard.yml │ │ └── system_tuning.yml ├── ceph-update │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── cinder-common │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ └── main.yml │ └── templates │ │ └── etc │ │ ├── cinder │ │ ├── api-paste.ini │ │ ├── cinder.conf │ │ ├── cinder.encryption.conf │ │ ├── cinder_api_audit_map.conf │ │ ├── policy.json │ │ └── rootwrap.conf │ │ └── sudoers.d │ │ └── cinder ├── cinder-control │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── monitoring.yml │ │ ├── nimble-monitoring.yml │ │ ├── v7k-monitoring.yml │ │ └── v7k_integration.yml │ └── templates │ │ └── etc │ │ ├── collectd │ │ └── plugins │ │ │ └── cinder.conf │ │ └── v7k │ │ └── ssh │ │ ├── cinder │ │ └── id_rsa │ │ └── monitoring │ │ └── id_rsa ├── cinder-data │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── ceph_integration.yml │ │ ├── lvm_integration.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ └── tgt │ │ ├── conf.d │ │ └── cinder_tgt.conf │ │ └── targets.conf ├── cleanup │ ├── handlers │ │ └── main.yml │ └── tasks │ │ ├── logging.yml │ │ └── main.yml ├── client │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── root │ │ └── stackrc │ │ └── usr │ │ └── local │ │ └── bin │ │ └── migrate_neutron_services ├── collectd-client │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── collectd │ │ ├── collectd.conf │ │ └── plugins │ │ ├── amqp.conf │ │ ├── logfile.conf │ │ ├── sensors.conf │ │ └── system.conf ├── collectd-plugin │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── common │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── etc │ │ │ └── sysctl.d │ │ │ │ └── 10-disable-ipv6-default.conf │ │ └── usr │ │ │ └── local │ │ │ └── bin │ │ │ └── apt-get-update.sh │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── apt-update-cron.yml │ │ ├── audit-logging.yml │ │ ├── chrony.yml │ │ ├── disable-swap.yml │ │ ├── hwraid-ppc.yml │ │ ├── hwraid.yml │ │ ├── ipmi.yml │ │ ├── iptables.yml │ │ ├── kernel-tuning.yml │ │ ├── main.yml │ │ ├── monitoring.yml │ │ ├── networking.yml │ │ ├── ntpd.yml │ │ ├── password-policy.yml │ │ ├── python.yml │ │ ├── remove-default-users.yml │ │ ├── ruby.yml │ │ ├── serial-console.yml │ │ ├── serverspec.yml │ │ ├── ssh.yml │ │ ├── ssl.yml │ │ ├── system-file-permissions.yml │ │ ├── system-tools.yml │ │ ├── ucarp.yml │ │ └── ufw.yml │ └── templates │ │ ├── etc │ │ ├── apt │ │ │ ├── apt.conf.d │ │ │ │ └── 01proxy │ │ │ └── sources.list │ │ ├── chrony.conf │ │ ├── gemrc │ │ ├── hosts │ │ ├── init │ │ │ └── tty_console.conf │ │ ├── modprobe.d │ │ │ └── conntrack.conf │ │ ├── ntp.conf │ │ ├── pip.conf │ │ ├── pydistutils.cfg │ │ ├── security │ │ │ └── pwquality.conf │ │ ├── sensu │ │ │ └── stackrc │ │ ├── serverspec │ │ │ └── os_spec.rb │ │ ├── sshd_config │ │ ├── sysctl.d │ │ │ ├── 60-kernel-tuning.conf │ │ │ ├── 60-netfilter-tuning.conf │ │ │ └── 60-tcp-tuning.conf │ │ ├── timezone │ │ ├── ucarp │ │ │ └── vip-nnn.conf │ │ ├── update-motd.d │ │ │ └── 90-ursula-motd │ │ └── ursula-release │ │ ├── monitoring │ │ ├── config.json │ │ ├── sensu-client-cert.pem │ │ ├── sensu-client-key.pem │ │ ├── sensu-client-path │ │ └── sensu-sudoers │ │ ├── openstack.cacrt │ │ ├── openstack.crt │ │ ├── ssh-private-key │ │ └── usr │ │ └── local │ │ └── libexec │ │ ├── ucarp-vip-down │ │ └── ucarp-vip-up ├── current_symlink │ └── tasks │ │ └── main.yml ├── docker │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── endpoints │ └── defaults │ │ └── main.yml ├── glance │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── ceph_integration.yml │ │ ├── image-sync.yml │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ ├── collectd │ │ └── plugins │ │ │ └── glance.conf │ │ ├── cron.d │ │ └── glance-image-sync │ │ ├── glance │ │ ├── .my.cnf │ │ ├── glance-api-paste.ini │ │ ├── glance-api.conf │ │ ├── glance-registry-paste.ini │ │ ├── glance-registry.conf │ │ ├── glance-swift-store.conf │ │ ├── glance_api_audit_map.conf │ │ └── policy.json │ │ └── rsyncd.conf ├── haproxy │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── etc │ │ ├── collectd │ │ │ └── plugins │ │ │ │ └── haproxy.conf │ │ ├── default │ │ │ └── haproxy │ │ ├── haproxy │ │ │ ├── haproxy_openstack.cfg │ │ │ ├── haproxy_swift.cfg │ │ │ └── openstack.pem │ │ ├── init.d │ │ │ └── haproxy │ │ └── rsyslog.d │ │ │ └── haproxy.conf │ │ └── usr │ │ └── lib │ │ └── tmpfiles.d │ │ └── haproxy.conf ├── heat │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ └── heat │ │ ├── api-paste.ini │ │ ├── heat.conf │ │ ├── heat_api_audit_map.conf │ │ ├── heat_stack_domain.conf │ │ └── policy.json ├── horizon │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ ├── apache2 │ │ └── sites-available │ │ │ └── openstack_dashboard.conf │ │ ├── httpd │ │ └── conf.d │ │ │ └── openstack-dashboard.conf │ │ └── openstack-dashboard │ │ ├── __init__.py │ │ └── local_settings.py ├── inspec │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── inspec │ │ └── host-controls │ │ ├── attributes.yml │ │ ├── controls │ │ └── control.rb │ │ └── inspec.yml ├── iptables │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── network │ │ ├── if-up.d │ │ └── iptables │ │ └── iptables-firewall ├── ipv6ra │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── dnsmasq.d │ │ └── internal-ipv6-ra.conf ├── ironic-common │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ └── main.yml │ └── templates │ │ └── etc │ │ ├── ironic │ │ ├── ironic.conf │ │ ├── policy.json │ │ └── rootwrap.conf │ │ └── sudoers.d │ │ └── ironic ├── ironic-control │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── ironic-data │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ ├── apache2 │ │ └── sites-available │ │ │ └── ironic-ipxe.conf │ │ ├── default │ │ └── tftpd-hpa │ │ └── ironic │ │ ├── boot.ipxe │ │ └── map-file ├── keystone-defaults │ ├── defaults │ │ └── main.yml │ └── meta │ │ └── main.yml ├── keystone-setup │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── federation.yml │ │ ├── ldap.yml │ │ └── main.yml ├── keystone │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── k2k-idp.yml │ │ ├── ldap.yml │ │ ├── logging.yml │ │ ├── main.yml │ │ ├── monitoring.yml │ │ ├── openidc.yml │ │ └── saml.yml │ └── templates │ │ ├── etc │ │ ├── apache2 │ │ │ ├── openidc │ │ │ │ └── metadata │ │ │ │ │ ├── template.client │ │ │ │ │ ├── template.conf │ │ │ │ │ └── template.provider │ │ │ └── sites-available │ │ │ │ └── keystone.conf │ │ ├── collectd │ │ │ └── plugins │ │ │ │ └── keystone.conf │ │ ├── cron.d │ │ │ └── drop-expired-keystone-tokens │ │ ├── init │ │ │ └── keystone.conf │ │ ├── keystone │ │ │ ├── domains │ │ │ │ └── keystone.domain.conf │ │ │ ├── keystone-paste.ini │ │ │ ├── keystone-saml.crt │ │ │ ├── keystone-saml.pem │ │ │ ├── keystone.conf │ │ │ ├── policy.json │ │ │ ├── sso_callback_template.html │ │ │ └── uwsgi │ │ │ │ ├── keystone-admin.ini │ │ │ │ └── keystone-main.ini │ │ └── shibboleth │ │ │ ├── attribute-map.xml │ │ │ ├── idp_metadata.xml │ │ │ ├── shibboleth2.xml │ │ │ ├── sp-cert.pem │ │ │ └── sp-key.pem │ │ └── usr │ │ └── lib │ │ └── tmpfiles.d │ │ └── openstack-keystone.conf ├── logging-config │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── filebeat.yml │ │ ├── logstash.yml │ │ └── main.yml │ └── templates │ │ └── etc │ │ ├── filebeat │ │ └── filebeat.d │ │ │ └── template.yml │ │ └── logstash-forwarder.d │ │ └── template.conf ├── logging │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── filebeat.yml │ │ ├── logstash.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── etc │ │ ├── filebeat │ │ │ └── filebeat.yml │ │ ├── init │ │ │ └── logstash-forwarder.conf │ │ ├── logstash-forwarder.d │ │ │ └── main.conf │ │ └── rsyslog.d │ │ │ ├── 20-ufw.conf │ │ │ ├── 50-default.conf │ │ │ └── 60-remote-syslog.conf │ │ └── logging-forward.crt ├── magnum │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── magnum │ │ ├── api-paste.ini │ │ ├── magnum.conf │ │ └── policy.json ├── manage-disks │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── memcached │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── etc │ │ └── sysconfig │ │ │ └── memcached │ │ └── memcached.conf ├── mongodb-arbiter │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── mongodb-common │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ └── mongodb │ │ └── mongod.conf ├── mongodb-server │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── auth_initialization.yml │ │ ├── main.yml │ │ └── primary.yml ├── monitoring-common │ ├── defaults │ │ └── main.yml │ └── handlers │ │ └── main.yml ├── neutron-common │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ └── main.yml │ └── templates │ │ ├── etc │ │ ├── modprobe.d │ │ │ └── blacklist-openvswitch.conf │ │ ├── neutron │ │ │ ├── api-paste.ini │ │ │ ├── dhcp_agent.ini │ │ │ ├── l3_agent.ini │ │ │ ├── metadata_agent.ini │ │ │ ├── neutron.conf │ │ │ ├── neutron_api_audit_map.conf │ │ │ ├── neutron_lbaas.conf │ │ │ ├── plugins │ │ │ │ ├── ml2 │ │ │ │ │ └── ml2_plugin.ini │ │ │ │ └── openvswitch │ │ │ │ │ └── ovs_neutron_plugin.ini │ │ │ ├── policy.json │ │ │ ├── rootwrap-ursula.d │ │ │ │ └── ursula.filters │ │ │ └── rootwrap.conf │ │ └── sudoers.d │ │ │ └── neutron │ │ ├── opt │ │ └── stack │ │ │ └── neutron │ │ │ └── fix-ovs-klm-version-detection.patch │ │ └── usr │ │ └── local │ │ └── bin │ │ ├── neutron-reset │ │ ├── neutron-restart-all │ │ ├── neutron-start-all │ │ └── neutron-stop-all ├── neutron-control │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── check-neutron-l3-routers.sh │ │ └── etc │ │ └── collectd │ │ └── plugins │ │ └── neutron.conf ├── neutron-data-network │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── dnsmasq.yml │ │ ├── igmp-router.yml │ │ ├── ipchanged.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── etc │ │ ├── dnsmasq.conf │ │ ├── init │ │ │ └── ipchanged.conf │ │ ├── ipchanged │ │ │ ├── add_floating_ip │ │ │ └── add_internal_floating_ip │ │ ├── neutron │ │ │ ├── lbaas_templates │ │ │ │ └── haproxy_base.j2 │ │ │ └── services │ │ │ │ └── loadbalancer │ │ │ │ └── haproxy │ │ │ │ └── lbaas_agent.ini │ │ └── xorp │ │ │ └── config.boot │ │ └── usr │ │ └── local │ │ ├── libexec │ │ ├── ucarp-vip-down │ │ └── ucarp-vip-up │ │ └── sbin │ │ └── ipchanged ├── neutron-data │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── etc │ │ ├── cron.daily │ │ └── cleanup-neutron-interfaces │ │ └── neutron │ │ └── plugins │ │ ├── ml2 │ │ └── ml2_plugin_dataplane.ini │ │ └── openvswitch │ │ └── ovs_neutron_plugin.ini ├── nova-common │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── patches │ │ │ └── availability_zones.py.patch │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── selinux.yml │ └── templates │ │ └── etc │ │ ├── nova │ │ ├── api-paste.ini │ │ ├── nova.conf │ │ ├── nova_api_audit_map.conf │ │ ├── policy.json │ │ └── rootwrap.conf │ │ └── sudoers.d │ │ └── nova ├── nova-control │ ├── files │ │ └── nova-quota-sync │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── monitoring.yml │ │ └── novnc.yml │ └── templates │ │ ├── etc │ │ ├── collectd │ │ │ └── plugins │ │ │ │ └── nova.conf │ │ ├── init │ │ │ └── nova-placement-api.conf │ │ └── nova │ │ │ └── uwsgi │ │ │ └── placement.ini │ │ └── usr │ │ └── lib │ │ └── tmpfiles.d │ │ └── openstack-nova-placement-api.conf ├── nova-data │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── docker.yml │ │ ├── libvirt.yml │ │ ├── main.yml │ │ ├── monitoring.yml │ │ ├── ssh.yml │ │ └── v7k.yml │ └── templates │ │ ├── etc │ │ ├── machine-id │ │ ├── modprobe.d │ │ │ └── kvm-nested.conf │ │ ├── multipath.conf │ │ └── nova │ │ │ └── nova.cinder_encryption.conf │ │ └── var │ │ └── lib │ │ └── nova │ │ ├── bin │ │ └── verify-ssh │ │ └── ssh │ │ ├── authorized_keys │ │ └── known_hosts ├── openstack-database │ └── tasks │ │ └── main.yml ├── openstack-distro │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ └── osp.yml ├── openstack-firewall │ └── tasks │ │ └── main.yml ├── openstack-meta │ └── defaults │ │ └── main.yml ├── openstack-network │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── rhel_neutron_external_interface.cfg │ │ └── ubuntu_neutron_external_interface.cfg ├── openstack-package │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── openstack-setup │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── cinder.yml │ │ ├── computes.yml │ │ ├── dispersion.yml │ │ ├── flavors.yml │ │ ├── images.yml │ │ ├── main.yml │ │ ├── networks.yml │ │ └── users.yml ├── openstack-source │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── constraints.txt │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── percona-arbiter │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ ├── default │ │ └── garbd │ │ └── init.d │ │ └── percona-xtradb-cluster-garbd-2.x ├── percona-backup │ ├── tasks │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ └── percona-xtrabackup.sh ├── percona-common │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── percona-server │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── etc │ │ ├── collectd │ │ │ └── plugins │ │ │ │ └── mysql.conf │ │ ├── default │ │ │ └── mysql │ │ ├── my.cnf │ │ └── mysql │ │ │ └── conf.d │ │ │ ├── bind-inaddr-any.cnf │ │ │ ├── low_memory.cnf │ │ │ ├── replication.cnf │ │ │ ├── tuning.cnf │ │ │ └── utf8.cnf │ │ └── root │ │ └── .my.cnf ├── preflight-checks │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── ceph.yml │ │ ├── check_items.yml │ │ ├── main.yml │ │ ├── network.yml │ │ ├── neutron.yml │ │ ├── subnet.yml │ │ └── v7k.yml ├── rabbitmq │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── cluster.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── etc │ │ ├── collectd │ │ │ └── plugins │ │ │ │ └── rabbitmq.conf │ │ ├── default │ │ │ └── rabbitmq-server │ │ ├── rabbitmq │ │ │ ├── rabbitmq-env.conf │ │ │ └── rabbitmq.config │ │ └── security │ │ │ └── limits.d │ │ │ └── 10-rabbitmq.conf │ │ └── var │ │ └── lib │ │ └── rabbitmq │ │ └── erlang.cookie ├── repos │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── apt.yml │ │ ├── main.yml │ │ └── yum.yml ├── rhn-subscription │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── security_errata │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── detect_CVE-2015-0235 │ ├── handlers │ │ └── main.yml │ └── tasks │ │ ├── CVE-2015-0235.yml │ │ ├── CVE-2015-7547.yml │ │ └── main.yml ├── sensu-check │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── serverspec │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── serverspec │ │ ├── Rakefile │ │ └── spec │ │ └── spec_helper.rb ├── stop-services │ └── tasks │ │ └── main.yml ├── swift-account │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── swift │ │ └── account-server.conf ├── swift-common │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── etc │ │ │ └── rsyslog.d │ │ │ ├── 10-swift-udp.conf │ │ │ └── 49-swift.conf │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── logging.yml │ │ ├── main.yml │ │ └── monitoring.yml │ └── templates │ │ ├── check-swift-dispersion.sh │ │ ├── etc │ │ ├── rsyncd.conf │ │ ├── sudoers.d │ │ │ └── swiftops │ │ ├── swift │ │ │ ├── dispersion.conf │ │ │ └── swift.conf │ │ └── update-motd.d │ │ │ └── 99-swift-motd │ │ └── home │ │ └── swiftops │ │ └── .ssh │ │ ├── id_rsa │ │ └── id_rsa.pub ├── swift-container │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── swift │ │ └── container-server.conf ├── swift-object │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── etc │ │ └── swift │ │ │ ├── drive-audit.conf │ │ │ ├── object-expirer.conf │ │ │ └── object-server.conf │ │ └── usr │ │ └── local │ │ └── bin │ │ └── swift-drive-auditor ├── swift-proxy │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── etc │ │ │ └── swift │ │ │ └── proxy-server.conf │ └── vars │ │ └── main.yml ├── swift-ring │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml └── v7k-defaults │ └── defaults │ └── main.yml ├── site.yml ├── test.sh ├── test ├── check-deps ├── cleanup ├── common ├── run └── setup ├── tox.ini ├── upgrade-db-cluster.yml ├── upgrade.yml ├── ursula.png └── vagrant.yml /.gitattributes: -------------------------------------------------------------------------------- 1 | roles/common/templates/etc/motd.tail ident 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | envs/example/test 2 | *.pyc 3 | *.vdi 4 | *.retry 5 | .vagrant 6 | .tox 7 | build 8 | *.DS_Store 9 | *~ 10 | *\#*\# 11 | ursula.log 12 | openstack 13 | *.swp 14 | *.swo 15 | .ssh_config 16 | .ssh_key 17 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:trusty 2 | 3 | RUN apt-get update && \ 4 | apt-get install -yqq libssl-dev build-essential libffi-dev libxml2-dev \ 5 | libxslt-dev python-dev python-pip git curl wget 6 | 7 | 8 | ADD . /ursula 9 | 10 | WORKDIR /ursula 11 | 12 | RUN pip install -U pip 13 | 14 | RUN pip install -r requirements.txt 15 | 16 | RUN mkdir /root/.ssh 17 | 18 | CMD test/setup && test/run deploy 19 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = roles 3 | hash_behaviour = merge 4 | nocows = 1 5 | nocolor = 0 6 | transport = sshbb 7 | timeout = 120 8 | host_key_checking = True 9 | vars_plugins = plugins/vars 10 | connection_plugins = plugins/connection 11 | callback_plugins = plugins/callbacks 12 | filter_plugins = plugins/filters 13 | strategy_plugins = plugins/strategy 14 | var_defaults_file = ../defaults-2.0.yml 15 | log_path=ursula.log 16 | forks = 25 17 | gathering = smart 18 | sudo_flags = -HE 19 | ansible_managed = "This file is managed by ansible, don't make changes here - they will be overwritten." 20 | 21 | [ssh_connection] 22 | pipelining = false 23 | -------------------------------------------------------------------------------- /bin/install-osx: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ex 4 | 5 | BINDIR=$(dirname $0) 6 | 7 | install_pip() { 8 | which brew >/dev/null || \ 9 | die "This installation script requires a homebrew installation to proceed. Please see: http://mxcl.github.io/homebrew/" 10 | brew install python --with-brewed-openssl 11 | } 12 | 13 | pip_pkgs() { 14 | for pkg in $*; do 15 | pip freeze | grep ${pkg} || pip install ${pkg} 16 | done 17 | } 18 | 19 | virtualenv() { 20 | sudo pip install virtualenv 21 | } 22 | 23 | uname | grep Darwin || die "This is not an OS X system... Aborting." 24 | 25 | which pip >/dev/null || install_pip 26 | 27 | pip install -U -r $BINDIR/../requirements.txt 28 | 29 | echo -e "\nInstallation complete!\n" 30 | -------------------------------------------------------------------------------- /bin/install-ubuntu: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ex 4 | 5 | BINDIR=$(dirname $0) 6 | 7 | if ! grep Ubuntu /etc/lsb-release; then 8 | echo "This is not an Ubuntu system... Aborting." 9 | exit 1 10 | fi 11 | 12 | apt-get update 13 | DEBIAN_FRONTEND=noninteractive apt-get install --yes python-pip python-dev libxml2-dev libxslt-dev libffi-dev 14 | apt-get remove --yes ansible || echo "ok" 15 | 16 | pip install -U -r $BINDIR/../requirements.txt 17 | 18 | echo -e "\nInstallation complete!\n" 19 | -------------------------------------------------------------------------------- /bin/shells: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'yaml' 4 | 5 | screenrc = "/tmp/ansible-screen-#{ENV['USER']}" 6 | File.delete screenrc if File.exists? screenrc 7 | 8 | ssh_args = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' 9 | 10 | File.open(screenrc, 'w') do |f| 11 | f.write "startup_message off\n" 12 | f.write "hardstatus alwayslastline \"%w\"\n" 13 | 14 | cur_group = nil 15 | File.read('envs/example/test/hosts').split("\n").each do |l| 16 | next if l.empty? 17 | if l =~ /\[(.*)\]/ 18 | cur_group = $1 19 | next 20 | end 21 | f.write "screen -t #{cur_group} sh -c \"ssh #{ssh_args} ubuntu@#{l}\"\n" 22 | end 23 | 24 | f.flush 25 | end 26 | 27 | exec "screen -c #{screenrc}" 28 | -------------------------------------------------------------------------------- /bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | apt-get -y update 4 | apt-get install -y curl 5 | apt-get install -y vim 6 | apt-get install -y wget 7 | apt-get install -y git 8 | apt-get install -y tmux 9 | apt-get install -y python-pip 10 | apt-get install -y python-dev 11 | apt-get install -y libxml2-dev 12 | apt-get install -y libxslt-dev 13 | apt-get install -y libffi-dev 14 | -------------------------------------------------------------------------------- /branch-validation.yml: -------------------------------------------------------------------------------- 1 | # Playbook for validating a branch for release tagging 2 | # interactive, may prompt runner to affirm things have completed 3 | --- 4 | - name: Run rally and validate 5 | hosts: localhost 6 | gather_facts: false 7 | tasks: 8 | - name: run rally 9 | pause: 10 | prompt: "Was rally validation run successful? (yes/no)" 11 | register: rally 12 | 13 | - name: assert rally successful 14 | assert: 15 | that: rally.user_input|bool 16 | 17 | - name: test instance connectivity 18 | hosts: controller[0] 19 | roles: 20 | - branch-validation 21 | -------------------------------------------------------------------------------- /envs/example/allinone-centos/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | allinone 3 | 4 | [network] 5 | allinone 6 | 7 | [db] 8 | allinone 9 | 10 | [mongo_db] 11 | allinone 12 | 13 | [compute] 14 | allinone 15 | 16 | [cinder_volume] 17 | allinone 18 | -------------------------------------------------------------------------------- /envs/example/allinone-centos/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/allinone-centos/vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | box: centos/7 5 | box_url: https://atlas.hashicorp.com/centos/7 6 | 7 | vms: 8 | allinone: 9 | ip_address: 10 | - 172.16.0.100 11 | - 172.16.255.100 12 | - 192.168.255.100 13 | cpus: 2 14 | memory: 5500 15 | custom: 16 | - '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 17 | - '["modifyvm", :id, "--nicpromisc4", "allow-all"]' 18 | - '["modifyvm", :id, "--cableconnected1", "on"]' 19 | -------------------------------------------------------------------------------- /envs/example/allinone-rhel/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | allinone 3 | 4 | [network] 5 | allinone 6 | 7 | [db] 8 | allinone 9 | 10 | [mongo_db] 11 | allinone 12 | 13 | [compute] 14 | allinone 15 | 16 | [cinder_volume] 17 | allinone 18 | -------------------------------------------------------------------------------- /envs/example/allinone-rhel/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/allinone-rhel/vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | box: rhel/7 5 | box_url: XXXXX 6 | 7 | vms: 8 | allinone: 9 | ip_address: 10 | - 172.16.0.100 11 | - 172.16.255.100 12 | - 192.168.255.100 13 | cpus: 2 14 | memory: 5500 15 | custom: 16 | - '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 17 | - '["modifyvm", :id, "--nicpromisc4", "allow-all"]' 18 | - '["modifyvm", :id, "--cableconnected1", "on"]' 19 | -------------------------------------------------------------------------------- /envs/example/allinone/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | allinone 3 | 4 | [network] 5 | allinone 6 | 7 | [db] 8 | allinone 9 | 10 | [mongo_db] 11 | allinone 12 | 13 | [compute] 14 | allinone 15 | 16 | [cinder_volume] 17 | allinone 18 | -------------------------------------------------------------------------------- /envs/example/allinone/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/allinone/vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | 5 | vms: 6 | allinone: 7 | ip_address: 8 | - 172.16.0.100 9 | - 172.16.255.100 10 | - 192.168.255.100 11 | cpus: 2 12 | memory: 6144 13 | custom: 14 | - '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 15 | - '["modifyvm", :id, "--nicpromisc4", "allow-all"]' 16 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-redhat/group_vars/ceph_osds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rhn_subscription: 3 | pool_names_regex: "^Red Hat Satellite - Add-Ons for Providers" 4 | repos: 5 | enable: 6 | - rhel-7-server-rpms 7 | - rhel-7-server-rh-common-rpms 8 | - rhel-7-server-extras-rpms 9 | - rhel-7-server-optional-rpms 10 | - rhel-7-server-rhceph-2-tools-rpms 11 | - rhel-7-server-rhceph-2-osd-rpms 12 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-redhat/group_vars/ceph_osds_ssd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ceph: 3 | disks: 4 | - vdb 5 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-redhat/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-redhat/hosts: -------------------------------------------------------------------------------- 1 | [db] 2 | controller-0 3 | controller-1 4 | 5 | [db_arbiter] 6 | compute-0 7 | 8 | [mongo_db] 9 | controller-0 10 | controller-1 11 | 12 | [mongo_arbiter] 13 | compute-0 14 | 15 | [controller] 16 | controller-0 17 | controller-1 18 | 19 | [compute] 20 | compute-0 21 | controller-0 22 | controller-1 23 | 24 | [network] 25 | controller-0 26 | controller-1 27 | 28 | [cinder_volume] 29 | controller-0 30 | controller-1 31 | 32 | [ceph_monitors] 33 | cpm1 34 | cpm2 35 | cpm3 36 | 37 | [ceph_osds:children] 38 | ceph_osds_ssd 39 | 40 | [ceph_osds_ssd] 41 | ceph1 42 | ceph2 43 | ceph3 44 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-swift-rhel/group_vars/ceph_osds_ssd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rhn_subscription: 3 | pool_names_regex: "^Red Hat Satellite - Add-Ons for Providers" 4 | repos: 5 | enable: 6 | - rhel-7-server-rpms 7 | - rhel-7-server-rh-common-rpms 8 | - rhel-7-server-extras-rpms 9 | - rhel-7-server-optional-rpms 10 | - rhel-7-server-rhceph-2-tools-rpms 11 | - rhel-7-server-rhceph-2-osd-rpms 12 | 13 | ceph: 14 | disks: 15 | - vdb 16 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-swift-rhel/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-swift-rhel/hosts: -------------------------------------------------------------------------------- 1 | [db] 2 | controller-0 3 | controller-1 4 | 5 | [db_arbiter] 6 | compute-0 7 | 8 | [mongo_db] 9 | controller-0 10 | controller-1 11 | 12 | [mongo_arbiter] 13 | compute-0 14 | 15 | [controller] 16 | controller-0 17 | controller-1 18 | 19 | [compute] 20 | compute-0 21 | controller-0 22 | controller-1 23 | 24 | [network] 25 | controller-0 26 | controller-1 27 | 28 | [cinder_volume] 29 | controller-0 30 | controller-1 31 | 32 | [ceph_monitors] 33 | cpm-0 34 | cpm-1 35 | cpm-2 36 | 37 | [ceph_osds:children] 38 | ceph_osds_ssd 39 | 40 | [ceph_osds_ssd] 41 | ceph-0 42 | ceph-1 43 | ceph-2 44 | 45 | [swiftnode] 46 | swift-0 47 | swift-1 48 | swift-2 49 | 50 | [swiftnode_primary] 51 | swift-0 52 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-swift-ubuntu/group_vars/ceph_osds_ssd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ceph: 3 | disks: 4 | - vdb 5 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-swift-ubuntu/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-swift-ubuntu/hosts: -------------------------------------------------------------------------------- 1 | [db] 2 | controller-0 3 | controller-1 4 | 5 | [db_arbiter] 6 | compute-0 7 | 8 | [mongo_db] 9 | controller-0 10 | controller-1 11 | 12 | [mongo_arbiter] 13 | compute-0 14 | 15 | [controller] 16 | controller-0 17 | controller-1 18 | 19 | [compute] 20 | compute-0 21 | controller-0 22 | controller-1 23 | 24 | [network] 25 | controller-0 26 | controller-1 27 | 28 | [cinder_volume] 29 | compute-0 30 | 31 | [ceph_monitors] 32 | ceph-0 33 | ceph-1 34 | ceph-2 35 | 36 | [ceph_osds:children] 37 | ceph_osds_ssd 38 | 39 | [ceph_osds_ssd] 40 | ceph-0 41 | ceph-1 42 | ceph-2 43 | 44 | [swiftnode] 45 | swift-0 46 | swift-1 47 | swift-2 48 | 49 | [swiftnode_primary] 50 | swift-0 51 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-ubuntu/group_vars/ceph_osds_ssd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ceph: 3 | disks: 4 | - vdb 5 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-ubuntu/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci-ceph-ubuntu/hosts: -------------------------------------------------------------------------------- 1 | [db] 2 | controller-0 3 | controller-1 4 | 5 | [db_arbiter] 6 | compute-0 7 | 8 | [mongo_db] 9 | controller-0 10 | controller-1 11 | 12 | [mongo_arbiter] 13 | compute-0 14 | 15 | [controller] 16 | controller-0 17 | controller-1 18 | 19 | [compute] 20 | compute-0 21 | controller-0 22 | controller-1 23 | 24 | [network] 25 | controller-0 26 | controller-1 27 | 28 | [cinder_volume] 29 | controller-0 30 | controller-1 31 | 32 | [ceph_monitors] 33 | cpm1 34 | cpm2 35 | cpm3 36 | 37 | [ceph_osds:children] 38 | ceph_osds_ssd 39 | 40 | [ceph_osds_ssd] 41 | ceph1 42 | ceph2 43 | ceph3 44 | -------------------------------------------------------------------------------- /envs/example/ci-full-centos/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci-full-centos/hosts: -------------------------------------------------------------------------------- 1 | 2 | [db] 3 | controller-0 4 | controller-1 5 | 6 | #[db_arbiter] 7 | #compute-0 8 | 9 | [mongo_db] 10 | controller-0 11 | controller-1 12 | 13 | #[mongo_arbiter] 14 | #compute-0 15 | 16 | [controller] 17 | controller-0 18 | controller-1 19 | 20 | 21 | [compute] 22 | compute-0 23 | controller-0 24 | controller-1 25 | 26 | [network] 27 | controller-0 28 | controller-1 29 | 30 | [cinder_volume] 31 | compute-0 32 | -------------------------------------------------------------------------------- /envs/example/ci-full-rhel/group_vars/ceph_osds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rhn_subscription: 3 | pool_names_regex: "^Red Hat Satellite - Add-Ons for Providers" 4 | repos: 5 | enable: 6 | - rhel-7-server-rpms 7 | - rhel-7-server-rh-common-rpms 8 | - rhel-7-server-extras-rpms 9 | - rhel-7-server-optional-rpms 10 | - rhel-7-server-rhceph-2-tools-rpms 11 | - rhel-7-server-rhceph-2-osd-rpms 12 | -------------------------------------------------------------------------------- /envs/example/ci-full-rhel/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci-full-rhel/hosts: -------------------------------------------------------------------------------- 1 | 2 | [db] 3 | controller-0 4 | controller-1 5 | 6 | #[db_arbiter] 7 | #compute-0 8 | 9 | [mongo_db] 10 | controller-0 11 | controller-1 12 | 13 | #[mongo_arbiter] 14 | #compute-0 15 | 16 | [controller] 17 | controller-0 18 | controller-1 19 | 20 | 21 | [compute] 22 | compute-0 23 | controller-0 24 | controller-1 25 | 26 | [network] 27 | controller-0 28 | controller-1 29 | 30 | [cinder_volume] 31 | compute-0 32 | -------------------------------------------------------------------------------- /envs/example/ci-full-ubuntu/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci-full-ubuntu/hosts: -------------------------------------------------------------------------------- 1 | [db] 2 | controller-0 3 | controller-1 4 | 5 | [db_arbiter] 6 | compute-0 7 | 8 | [mongo_db] 9 | controller-0 10 | controller-1 11 | 12 | [mongo_arbiter] 13 | compute-0 14 | 15 | [controller] 16 | controller-0 17 | controller-1 18 | 19 | [compute] 20 | compute-0 21 | controller-0 22 | controller-1 23 | 24 | [network] 25 | controller-0 26 | controller-1 27 | 28 | [cinder_volume] 29 | compute-0 30 | -------------------------------------------------------------------------------- /envs/example/ci/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | stack_env: example_ci 3 | floating_ip: 172.16.0.100 4 | 5 | xtradb: 6 | sst_auth_password: asdf 7 | 8 | percona: 9 | replication: False 10 | 11 | ceilometer: 12 | enabled: True 13 | logging: 14 | debug: True 15 | verbose: True 16 | 17 | keystone: 18 | ldap_domain: 19 | enabled: True 20 | domain: users 21 | 22 | serverspec: 23 | enabled: True 24 | -------------------------------------------------------------------------------- /envs/example/ci/group_vars/compute.yml: -------------------------------------------------------------------------------- 1 | --- 2 | compute_ag: ci 3 | -------------------------------------------------------------------------------- /envs/example/ci/hosts: -------------------------------------------------------------------------------- 1 | [db] 2 | db.example.com 3 | db2.example.com 4 | 5 | [db_arbiter] 6 | bar.example.com 7 | 8 | [mongo_db] 9 | db.example.com 10 | db2.example.com 11 | 12 | [mongo_arbiter] 13 | bar.example.com 14 | 15 | [controller] 16 | foo.example.com 17 | 18 | [compute] 19 | bar.example.com 20 | 21 | [network] 22 | baz.example.com 23 | -------------------------------------------------------------------------------- /envs/example/ironic/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | allinone 3 | 4 | [network] 5 | allinone 6 | 7 | [db] 8 | allinone 9 | 10 | [compute] 11 | allinone 12 | 13 | [cinder_volume] 14 | allinone 15 | 16 | [ironic] 17 | allinone 18 | 19 | [compute_ironic] 20 | allinone 21 | -------------------------------------------------------------------------------- /envs/example/ironic/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/ironic/vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | 5 | vms: 6 | allinone: 7 | ip_address: 8 | - 172.16.0.100 9 | - 172.16.255.100 10 | - 192.168.255.100 11 | cpus: 2 12 | memory: 6144 13 | custom: 14 | - '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 15 | - '["modifyvm", :id, "--nicpromisc4", "allow-all"]' 16 | -------------------------------------------------------------------------------- /envs/example/jeofd/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | stack_env: example_jeofd 3 | floating_ip: 172.16.0.100 4 | 5 | xtradb: 6 | sst_auth_password: asdf 7 | 8 | cinder: 9 | enabled: False 10 | 11 | heat: 12 | enabled: False 13 | 14 | logging: 15 | enabled: False 16 | 17 | monitoring: 18 | enabled: False 19 | 20 | collectd: 21 | enabled: False 22 | 23 | haproxy: 24 | stats_group: root 25 | 26 | percona: 27 | replication: False 28 | -------------------------------------------------------------------------------- /envs/example/jeofd/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | allinone 3 | 4 | [network] 5 | allinone 6 | 7 | [db] 8 | allinone 9 | 10 | [compute] 11 | allinone 12 | 13 | [cinder_volume] 14 | allinone 15 | -------------------------------------------------------------------------------- /envs/example/jeofd/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/jeofd/vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | 5 | vms: 6 | allinone: 7 | ip_address: 8 | - 172.16.0.100 9 | - 172.16.255.100 10 | - 192.168.255.100 11 | cpus: 2 12 | memory: 6144 13 | custom: 14 | - '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 15 | - '["modifyvm", :id, "--nicpromisc4", "allow-all"]' 16 | -------------------------------------------------------------------------------- /envs/example/mirrors/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | allinone 3 | 4 | [network] 5 | allinone 6 | 7 | [db] 8 | allinone 9 | 10 | [compute] 11 | allinone 12 | 13 | [cinder_volume] 14 | allinone 15 | -------------------------------------------------------------------------------- /envs/example/mirrors/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/mirrors/vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | 5 | vms: 6 | allinone: 7 | ip_address: 8 | - 172.16.0.100 9 | - 172.16.255.100 10 | - 192.168.255.100 11 | cpus: 2 12 | memory: 6144 13 | custom: 14 | - '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 15 | - '["modifyvm", :id, "--nicpromisc4", "allow-all"]' 16 | -------------------------------------------------------------------------------- /envs/example/novadocker/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | stack_env: example_novadocker 3 | floating_ip: 172.16.0.100 4 | 5 | xtradb: 6 | sst_auth_password: asdf 7 | 8 | glance: 9 | container_formats: ami,ari,aki,bare,ovf,docker 10 | -------------------------------------------------------------------------------- /envs/example/novadocker/host_vars/compute1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nova: 3 | compute_driver: novadocker.virt.docker.DockerDriver 4 | -------------------------------------------------------------------------------- /envs/example/novadocker/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | controller1 3 | controller2 4 | 5 | [network] 6 | controller1 7 | controller2 8 | 9 | [db] 10 | controller1 11 | controller2 12 | 13 | [cinder_volume] 14 | controller1 15 | controller2 16 | 17 | [db_arbiter] 18 | compute1 19 | 20 | [compute] 21 | compute1 22 | -------------------------------------------------------------------------------- /envs/example/novadocker/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/standard/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | controller1 3 | controller2 4 | 5 | [network] 6 | controller1 7 | controller2 8 | 9 | [db] 10 | controller1 11 | controller2 12 | 13 | [mongo_db] 14 | controller1 15 | controller2 16 | 17 | [cinder_volume] 18 | controller1 19 | controller2 20 | 21 | [db_arbiter] 22 | compute1 23 | 24 | [mongo_arbiter] 25 | compute1 26 | 27 | [compute] 28 | compute1 29 | 30 | [ceph_monitors] 31 | # when expanding cluster, place new hosts BELOW existing hosts 32 | 33 | [ceph_osds:children] 34 | ceph_osds_ssd 35 | ceph_osds_hybrid 36 | 37 | [ceph_osds_ssd] 38 | # when expanding cluster, place new hosts BELOW existing hosts 39 | 40 | [ceph_osds_hybrid] 41 | # when expanding cluster, place new hosts BELOW existing hosts 42 | -------------------------------------------------------------------------------- /envs/example/standard/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /envs/example/standard/vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | 5 | vms: 6 | controller1: 7 | ip_address: 8 | - 172.16.0.101 9 | - 192.168.255.101 10 | memory: 3072 11 | custom: '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 12 | controller2: 13 | ip_address: 14 | - 172.16.0.102 15 | - 192.168.255.102 16 | memory: 3072 17 | custom: '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 18 | compute1: 19 | ip_address: 20 | - 172.16.0.111 21 | - 192.168.255.111 22 | memory: 1536 23 | custom: '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 24 | -------------------------------------------------------------------------------- /envs/example/swift-rhel/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | controller1 3 | 4 | [compute] 5 | controller1 6 | 7 | [db] 8 | controller1 9 | 10 | [swiftnode] 11 | swiftnode1 12 | swiftnode2 13 | swiftnode3 14 | 15 | [swiftnode_primary] 16 | swiftnode1 17 | -------------------------------------------------------------------------------- /envs/example/swift-rhel/playbooks/host_vars/allinone: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /envs/example/swift-rhel/playbooks/host_vars/compute1: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | -------------------------------------------------------------------------------- /envs/example/swift-rhel/playbooks/host_vars/controller1: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /envs/example/swift-rhel/playbooks/host_vars/controller2: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /envs/example/swift-rhel/playbooks/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- -------------------------------------------------------------------------------- /envs/example/swift-rhel/ring_definition.yml: -------------------------------------------------------------------------------- 1 | --- 2 | part_power: 13 3 | replicas: 3 4 | min_part_hours: 1 5 | zones: 6 | z1: 7 | 172.16.0.111: 8 | disks: 9 | - blockdev: sdb1 10 | weight: 1000 11 | z2: 12 | 172.16.0.112: 13 | disks: 14 | - blockdev: sdb1 15 | weight: 1000 16 | z3: 17 | 172.16.0.113: 18 | disks: 19 | - blockdev: sdb1 20 | weight: 1000 21 | -------------------------------------------------------------------------------- /envs/example/swift/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | controller1 3 | 4 | [db] 5 | controller1 6 | 7 | [swiftnode] 8 | swiftnode1 9 | swiftnode2 10 | swiftnode3 11 | 12 | [swiftnode_primary] 13 | swiftnode1 14 | -------------------------------------------------------------------------------- /envs/example/swift/ring_definition.yml: -------------------------------------------------------------------------------- 1 | --- 2 | part_power: 13 3 | replicas: 3 4 | min_part_hours: 1 5 | zones: 6 | z1: 7 | 172.16.0.111: 8 | disks: 9 | - blockdev: vda1 10 | weight: 1000 11 | z2: 12 | 172.16.0.112: 13 | disks: 14 | - blockdev: vda1 15 | weight: 1000 16 | z3: 17 | 172.16.0.113: 18 | disks: 19 | - blockdev: vda1 20 | weight: 1000 21 | -------------------------------------------------------------------------------- /envs/vagrant/cinder/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | stack_env: vagrant 3 | floating_ip: 10.1.1.100 4 | 5 | xtradb: 6 | sst_auth_password: asdf 7 | 8 | percona: 9 | replication: False 10 | 11 | nova: 12 | scheduler_default_filters: AvailabilityZoneFilter,ComputeFilter 13 | 14 | cinder: 15 | enabled: True 16 | volume_type: device 17 | volume_device: /dev/sdb 18 | encryption: 19 | enabled: true 20 | fixed_key: 6a5c55db5e250f234b6af7807dafda77433dddcf372b6d04801a45f578a35aa7 21 | logging: 22 | debug: True 23 | verbose: True 24 | volume_types: [] 25 | encrypted_volume_types: 26 | - volume_type: encrypted-aes-512 27 | cipher: aes-xts-plain64 28 | key_size: 512 29 | provider: nova.volume.encryptors.luks.LuksEncryptor 30 | control_location: front-end 31 | -------------------------------------------------------------------------------- /envs/vagrant/cinder/hosts: -------------------------------------------------------------------------------- 1 | [controller] 2 | allinone 3 | 4 | [network] 5 | allinone 6 | 7 | [db] 8 | allinone 9 | 10 | [compute] 11 | allinone 12 | 13 | [cinder_volume] 14 | cinder 15 | -------------------------------------------------------------------------------- /envs/vagrant/cinder/playbooks: -------------------------------------------------------------------------------- 1 | ../../../playbooks/vagrant -------------------------------------------------------------------------------- /playbooks/check_compute_services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check nova services 3 | command: /etc/sensu/plugins/check-nova-services.sh 4 | register: result 5 | until: result | succeeded 6 | retries: 5 7 | delay: 10 8 | delegate_to: "{{ groups['controller']|first }}" 9 | 10 | - name: check neutron agents 11 | command: /etc/sensu/plugins/check-neutron-agents.sh 12 | register: result 13 | until: result | succeeded 14 | retries: 5 15 | delay: 10 16 | 17 | - name: cluster health check on db_arbiter 18 | command: /etc/sensu/plugins/percona-cluster-size.rb -d /root/.my.cnf --expected 3 --criticality critical 19 | register: cstat 20 | until: cstat | succeeded 21 | retries: 5 22 | delay: 10 23 | delegate_to: "{{ groups['controller']|first }}" 24 | when: inventory_hostname in groups['db_arbiter'] 25 | -------------------------------------------------------------------------------- /playbooks/ci-allinone/tasks/delete.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: de-provision test instances 3 | hosts: localhost 4 | 5 | vars_files: 6 | - ../vars/main.yml 7 | vars: 8 | ansible_python_interpreter: "/usr/bin/env python" 9 | 10 | tasks: 11 | - name: delete test instances 12 | os_server: 13 | name: "{{ item }}" 14 | state: absent 15 | with_items: "{{ testenv_instance_names }}" 16 | 17 | - name: remove security group 18 | os_security_group: 19 | name: "{{ testenv_security_groups }}" 20 | state: absent 21 | when: testenv_security_groups_cleanup 22 | -------------------------------------------------------------------------------- /playbooks/ci-allinone/tasks/keypair.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test for presence of local keypair 3 | stat: path={{ testenv_keypair_path }} 4 | register: testenv_keypair_local 5 | 6 | - name: delete remote keypair 7 | os_keypair: 8 | name: "{{ testenv_keypair_name }}" 9 | state: absent 10 | when: not testenv_keypair_local.stat.exists 11 | 12 | - name: create the keypair 13 | os_keypair: 14 | name: "{{ testenv_keypair_name }}" 15 | state: present 16 | register: testenv_keypair 17 | 18 | - name: persist the keypair 19 | copy: 20 | dest: "{{ testenv_keypair_path }}" 21 | content: "{{ testenv_keypair.key.private_key }}" 22 | mode: 0600 23 | when: testenv_keypair.changed 24 | -------------------------------------------------------------------------------- /playbooks/ci-allinone/templates/etc/network/interfaces: -------------------------------------------------------------------------------- 1 | # This file describes the network interfaces available on your system 2 | # and how to activate them. For more information, see interfaces(5). 3 | 4 | # The loopback network interface 5 | auto lo 6 | iface lo inet loopback 7 | 8 | # Source interfaces 9 | # Please check /etc/network/interfaces.d before changing this file 10 | # as interfaces may have been defined in /etc/network/interfaces.d 11 | # NOTE: the primary ethernet device is defined in 12 | # /etc/network/interfaces.d/eth0 13 | # See LP: #1262951 14 | source /etc/network/interfaces.d/*.cfg 15 | 16 | -------------------------------------------------------------------------------- /playbooks/ci-allinone/templates/etc/network/interfaces.d/br-ex.cfg: -------------------------------------------------------------------------------- 1 | auto br-ex 2 | iface br-ex inet static 3 | address 192.168.255.1 4 | netmask 255.255.255.0 5 | up sysctl net.ipv4.conf.$IFACE.forwarding=1 6 | -------------------------------------------------------------------------------- /playbooks/ci-allinone/templates/etc/network/interfaces.d/eth0-controllers.cfg: -------------------------------------------------------------------------------- 1 | auto eth0 2 | iface eth0 inet dhcp 3 | up sysctl net.ipv4.conf.$IFACE.forwarding=1 4 | up iptables -t nat -A POSTROUTING -o $IFACE -s 192.168.255.0/24 ! -d 192.168.255.0/24 -j MASQUERADE 5 | -------------------------------------------------------------------------------- /playbooks/ci-allinone/templates/etc/network/interfaces.d/eth0.cfg: -------------------------------------------------------------------------------- 1 | # The primary network interface 2 | auto eth0 3 | iface eth0 inet dhcp 4 | -------------------------------------------------------------------------------- /playbooks/ci-ceph-swift-rhel/templates/usr/sbin/lshw: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | hostname="{{ inventory_hostname }}" 4 | 5 | {% raw -%} 6 | # Note: If there is a need to run the original lshw command, 7 | # please uncomment the following code, 8 | # and add an entry for "/usr/bin/lshw.original" in "/etc/sudoers.d/swiftops" 9 | #if [ "$1" != "-C" -o "$2" != "disk" ]; then 10 | # if [ -f /usr/bin/lshw.original ]; then 11 | # /usr/bin/lshw.original $@ 12 | # exit 0 13 | # fi 14 | #fi 15 | 16 | # Output additional content for disks 17 | arr_label=(`lsblk -db |awk 'NR>1{print $1}'`) 18 | GB=1073741824 19 | arr_size=(`lsblk -db |awk -v gb=$GB 'NR>1{print $4/gb}'`) 20 | for((i=0;i<${#arr_label[@]};i++)); 21 | do 22 | cat << EOF 23 | *-disk:$i 24 | logical name: /dev/${arr_label[$i]} 25 | serial: ${hostname}-${arr_label[$i]} 26 | size: ${arr_size[$i]}GiB 27 | EOF 28 | done 29 | {% endraw %} 30 | -------------------------------------------------------------------------------- /playbooks/ci-ceph-swift-ubuntu/templates/etc/network/interfaces: -------------------------------------------------------------------------------- 1 | # This file describes the network interfaces available on your system 2 | # and how to activate them. For more information, see interfaces(5). 3 | 4 | # The loopback network interface 5 | auto lo 6 | iface lo inet loopback 7 | 8 | # Source interfaces 9 | # Please check /etc/network/interfaces.d before changing this file 10 | # as interfaces may have been defined in /etc/network/interfaces.d 11 | # NOTE: the primary ethernet device is defined in 12 | # /etc/network/interfaces.d/eth0 13 | # See LP: #1262951 14 | source /etc/network/interfaces.d/*.cfg 15 | 16 | -------------------------------------------------------------------------------- /playbooks/ci-ceph-swift-ubuntu/templates/etc/network/interfaces.d/br-ex.cfg: -------------------------------------------------------------------------------- 1 | auto br-ex 2 | iface br-ex inet static 3 | address 192.168.255.1 4 | netmask 255.255.255.0 5 | up sysctl net.ipv4.conf.$IFACE.forwarding=1 6 | -------------------------------------------------------------------------------- /playbooks/ci-ceph-swift-ubuntu/templates/etc/network/interfaces.d/eth0-controllers.cfg: -------------------------------------------------------------------------------- 1 | auto eth0 2 | iface eth0 inet dhcp 3 | up sysctl net.ipv4.conf.$IFACE.forwarding=1 4 | up iptables -t nat -A POSTROUTING -o $IFACE -s 192.168.255.0/24 ! -d 192.168.255.0/24 -j MASQUERADE 5 | -------------------------------------------------------------------------------- /playbooks/ci-ceph-swift-ubuntu/templates/etc/network/interfaces.d/eth0.cfg: -------------------------------------------------------------------------------- 1 | # The primary network interface 2 | auto eth0 3 | iface eth0 inet dhcp 4 | -------------------------------------------------------------------------------- /playbooks/ci-ceph-swift-ubuntu/templates/usr/bin/lshw: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | hostname="{{ inventory_hostname }}" 4 | 5 | {% raw -%} 6 | # Note: If there is a need to run the original lshw command, 7 | # please uncomment the following code, 8 | # and add an entry for "/usr/bin/lshw.original" in "/etc/sudoers.d/swiftops" 9 | #if [ "$1" != "-C" -o "$2" != "disk" ]; then 10 | # if [ -f /usr/bin/lshw.original ]; then 11 | # /usr/bin/lshw.original $@ 12 | # exit 0 13 | # fi 14 | #fi 15 | 16 | # Output additional content for disks 17 | arr_label=(`lsblk -db |awk 'NR>1{print $1}'`) 18 | GB=1073741824 19 | arr_size=(`lsblk -db |awk -v gb=$GB 'NR>1{print $4/gb}'`) 20 | for((i=0;i<${#arr_label[@]};i++)); 21 | do 22 | cat << EOF 23 | *-disk:$i 24 | logical name: /dev/${arr_label[$i]} 25 | serial: ${hostname}-${arr_label[$i]} 26 | size: ${arr_size[$i]}GiB 27 | EOF 28 | done 29 | {% endraw %} 30 | -------------------------------------------------------------------------------- /playbooks/ci-full-ubuntu/templates/etc/network/interfaces: -------------------------------------------------------------------------------- 1 | # This file describes the network interfaces available on your system 2 | # and how to activate them. For more information, see interfaces(5). 3 | 4 | # The loopback network interface 5 | auto lo 6 | iface lo inet loopback 7 | 8 | # Source interfaces 9 | # Please check /etc/network/interfaces.d before changing this file 10 | # as interfaces may have been defined in /etc/network/interfaces.d 11 | # NOTE: the primary ethernet device is defined in 12 | # /etc/network/interfaces.d/eth0 13 | # See LP: #1262951 14 | source /etc/network/interfaces.d/*.cfg 15 | 16 | -------------------------------------------------------------------------------- /playbooks/ci-full-ubuntu/templates/etc/network/interfaces.d/br-ex.cfg: -------------------------------------------------------------------------------- 1 | auto br-ex 2 | iface br-ex inet static 3 | address 192.168.255.1 4 | netmask 255.255.255.0 5 | up sysctl net.ipv4.conf.$IFACE.forwarding=1 6 | -------------------------------------------------------------------------------- /playbooks/ci-full-ubuntu/templates/etc/network/interfaces.d/eth0-controllers.cfg: -------------------------------------------------------------------------------- 1 | auto eth0 2 | iface eth0 inet dhcp 3 | up sysctl net.ipv4.conf.$IFACE.forwarding=1 4 | up iptables -t nat -A POSTROUTING -o $IFACE -s 192.168.255.0/24 ! -d 192.168.255.0/24 -j MASQUERADE 5 | -------------------------------------------------------------------------------- /playbooks/ci-full-ubuntu/templates/etc/network/interfaces.d/eth0.cfg: -------------------------------------------------------------------------------- 1 | # The primary network interface 2 | auto eth0 3 | iface eth0 inet dhcp 4 | -------------------------------------------------------------------------------- /playbooks/db-backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: controller[0] 3 | tasks: 4 | - name: get time string 5 | shell: date "+%Y-%m-%d-%H:%M:%S" 6 | register: time_str 7 | 8 | - file: dest=/root/db-backups state=directory owner=root mode=0644 9 | 10 | - name: run the backup 11 | mysql_db: state=dump name=all target=/root/db-backups/{{ time_str.stdout }}.sql 12 | -------------------------------------------------------------------------------- /playbooks/dist-upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | serial: 10 4 | tasks: 5 | - apt: cache_valid_time=3600 update_cache=yes 6 | - apt: upgrade=dist 7 | -------------------------------------------------------------------------------- /playbooks/install_deb_package.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # simple playbook to install an deb package w/ apt 4 | # USAGE: pass an extra var w/ your ansible command 5 | # indicating the package. 6 | # 7 | # For example, this will install the tmux packaage: 8 | # ... -e package=tmux 9 | 10 | - name: install a deb package 11 | hosts: all:!vyatta-* 12 | any_errors_fatal: true 13 | gather_facts: false 14 | environment: "{{ env_vars|default({}) }}" 15 | 16 | tasks: 17 | - name: update apt index 18 | apt: update_cache=yes 19 | register: result 20 | until: result|succeeded 21 | retries: 5 22 | 23 | - name: install package 24 | apt: pkg={{ package }} 25 | register: result 26 | until: result|succeeded 27 | retries: 5 28 | -------------------------------------------------------------------------------- /playbooks/restart_sensu_client.yml: -------------------------------------------------------------------------------- 1 | - name: restart sensu client 2 | hosts: all 3 | tasks: 4 | - name: restart sensu client service 5 | service: name=sensu-client state=restarted enabled=yes 6 | 7 | -------------------------------------------------------------------------------- /playbooks/rhn_unsubscribe.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: unsubscribe rhel for all nodes 3 | hosts: all:!vyatta-* 4 | gather_facts: true 5 | environment: "{{ env_vars|default({}) }}" 6 | 7 | tasks: 8 | - name: unsubscribe from redhat subscription 9 | redhat_subscription: 10 | state: absent 11 | when: ansible_distribution == 'RedHat' 12 | register: unsubscribe 13 | until: unsubscribe|succeeded 14 | retries: 5 15 | -------------------------------------------------------------------------------- /playbooks/run-openstack-ansible-security.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: os-hardening for all hosts 3 | hosts: all:!vyatta-* 4 | any_errors_fatal: true 5 | roles: 6 | - role: ../../openstack-ansible-security 7 | -------------------------------------------------------------------------------- /playbooks/ssl-cert.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all:!vyatta-* 3 | roles: 4 | - ../roles/common 5 | 6 | - name: openstack controller services 7 | hosts: controller 8 | roles: 9 | - ../roles/haproxy 10 | 11 | - name: swift code and config 12 | hosts: swiftnode 13 | roles: 14 | - ../roles/haproxy 15 | -------------------------------------------------------------------------------- /playbooks/tests/tasks/ceilometer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test ceilometer 3 | hosts: controller[0] 4 | tasks: 5 | - name: list ceilometer meters 6 | shell: . /root/stackrc; ceilometer meter-list 7 | 8 | - name: list ceilometer samples cpu_util 9 | shell: . /root/stackrc; ceilometer sample-list -m cpu_util 10 | 11 | - name: list ceilometer statistics 12 | shell: . /root/stackrc; ceilometer statistics -m cpu -p 10000000 13 | 14 | - name: list ceilometer resources 15 | shell: . /root/stackrc; ceilometer resource-list 16 | 17 | - name: list ceilometer event types 18 | shell: . /root/stackrc; ceilometer event-type-list 19 | -------------------------------------------------------------------------------- /playbooks/tests/tasks/ironic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test ironic api and conductor 3 | hosts: controller 4 | tasks: 5 | - name: ironic has a working api 6 | shell: . /root/stackrc; ironic node-list | grep UUID 7 | 8 | - name: ironic-conductor is running 9 | shell: service ironic-conductor status | grep running 10 | -------------------------------------------------------------------------------- /playbooks/tests/tasks/rally.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Running Rally against test cloud 3 | hosts: controller[0] 4 | tasks: 5 | - name: copy basic Rally test profile template into place 6 | copy: src=rally/bbc-cloud-validate.yml dest=./bbc-cloud-validate.yml mode=0444 7 | 8 | - name: copy ceph Rally test profile template into place 9 | copy: src=rally/bbc-cloud-validate-ceph.yml dest=./bbc-cloud-validate-ceph.yml mode=0444 10 | when: cinder.enabled|bool 11 | 12 | - name: run Rally script 13 | environment: 14 | DEBIAN_FRONTEND: noninteractive 15 | script: ./rally/run.sh -e "{{ cinder.enabled }}" -t "{{ build_tag }}" 16 | become: yes 17 | become_user: root 18 | 19 | - name: fetch Rally report 20 | fetch: src=rally_report.html dest={{ workspace }}/rally_report.html flat=yes 21 | -------------------------------------------------------------------------------- /playbooks/tests/tasks/swift.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: tests on a single controller for swift 3 | hosts: controller[0] 4 | tasks: 5 | - name: OpenStack service list should include swift 6 | shell: . /root/stackrc; openstack service list | grep swift 7 | 8 | - name: OpenStack endpoint list should include swift 9 | shell: . /root/stackrc; openstack endpoint list | grep swift 10 | 11 | - name: swift has a working api 12 | shell: . /root/stackrc; openstack object store account show | grep Containers 13 | -------------------------------------------------------------------------------- /playbooks/tests/tasks/workaround_network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: shutdown interface in standby router 3 | shell: ROUTER_NS=$( ip netns show | grep qrouter- | awk '{print $1}' ); 4 | QR_INTERFACES=($(ip netns exec ${ROUTER_NS} ip a|grep ' qr'|awk '{print $2}'|awk -F "@" '{print $1}'|sed 's/\://g')); 5 | for i in "${QR_INTERFACES[@]}"; do ip netns exec ${ROUTER_NS} ip link set dev $i down ;done 6 | args: 7 | executable: /bin/bash 8 | register: result 9 | until: result|success 10 | retries: 6 11 | delay: 10 12 | -------------------------------------------------------------------------------- /playbooks/update-provider-admin-password.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: controller[0] 3 | gather_facts: no 4 | tasks: 5 | - name: update admin password in keystone 6 | shell: . /root/stackrc && keystone user-list | grep admin | awk '{print $2}' 7 | register: keystone_uuid 8 | - shell: . /root/stackrc && keystone user-password-update --pass {{ secrets.admin_password }} {{ keystone_uuid.stdout }} 9 | when: keystone_uuid.rc == 0 10 | - name: update stackrc 11 | template: src=../roles/client/templates/root/stackrc dest=/root/stackrc 12 | 13 | - hosts: all:!vyatta-* 14 | gather_facts: no 15 | user: root 16 | tasks: 17 | - name: update stackrc 18 | template: src=../roles/client/templates/root/stackrc dest=/root/stackrc 19 | - name: test stackrc 20 | shell: . /root/stackrc && keystone user-list 21 | -------------------------------------------------------------------------------- /playbooks/upgrade_precise_for_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Precise requires a newer kernel in order to support docker. 3 | # See: http://docs.docker.io/installation/ubuntulinux/#ubuntu-precise-1204-lts-64-bit 4 | # This playbook only handles the upgrade of the kernel. The installation of Docker 5 | # happens in a standard Ursula run. 6 | - hosts: all:!vyatta-* 7 | tasks: 8 | - apt: pkg={{item}} 9 | with_items: 10 | - linux-image-generic-lts-raring 11 | - linux-headers-generic-lts-raring 12 | 13 | - hosts: all:!vyatta-* 14 | tasks: 15 | - command: reboot 16 | -------------------------------------------------------------------------------- /playbooks/vagrant/host_vars/allinone: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /playbooks/vagrant/host_vars/compute1: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | -------------------------------------------------------------------------------- /playbooks/vagrant/host_vars/controller1: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /playbooks/vagrant/host_vars/controller2: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /playbooks/vagrant/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tox 2 | pep8 3 | oslo.utils 4 | shade>=1.7.0 5 | ansible>=2.1.0.0,<3,<=2.1.3 6 | python-novaclient 7 | python-glanceclient 8 | python-cinderclient 9 | python-neutronclient 10 | python-keystoneclient 11 | python-openstackclient 12 | -e git://github.com/blueboxgroup/ursula-cli.git@master#egg=ursula-cli 13 | PyYAML==4.2b4 14 | -------------------------------------------------------------------------------- /roles/aodh/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart aodh services 3 | service: name={{ item.name }} state=restarted must_exist=false 4 | when: restart|default(True) 5 | with_items: 6 | - "{{ aodh.services.aodh_api }}" 7 | - "{{ aodh.services.aodh_evaluator }}" 8 | - "{{ aodh.services.aodh_listener }}" 9 | - "{{ aodh.services.aodh_notifier }}" 10 | -------------------------------------------------------------------------------- /roles/aodh/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for aodh 3 | logrotate: name=aodh path=/var/log/aodh/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/aodh/templates/etc/aodh/policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "context_is_admin": "role:admin", 3 | "segregation": "rule:context_is_admin", 4 | "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s or role:cloud_admin or role:project_admin", 5 | "default": "rule:admin_or_owner", 6 | 7 | "telemetry:get_alarm": "rule:admin_or_owner", 8 | "telemetry:get_alarms": "rule:admin_or_owner", 9 | "telemetry:query_alarm": "rule:admin_or_owner", 10 | 11 | "telemetry:create_alarm": "", 12 | "telemetry:change_alarm": "rule:admin_or_owner", 13 | "telemetry:delete_alarm": "rule:admin_or_owner", 14 | 15 | "telemetry:get_alarm_state": "rule:admin_or_owner", 16 | "telemetry:change_alarm_state": "rule:admin_or_owner", 17 | 18 | "telemetry:alarm_history": "rule:admin_or_owner", 19 | "telemetry:query_alarm_history": "rule:admin_or_owner" 20 | } 21 | -------------------------------------------------------------------------------- /roles/apache/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apache_log_path: 3 | rhel: '/var/log/httpd' 4 | ubuntu: '/var/log/apache2' 5 | apache: 6 | package_name: 7 | ubuntu: apache2 8 | rhel: httpd 9 | service_name: 10 | ubuntu: apache2 11 | rhel: httpd 12 | modules: 13 | ubuntu: 14 | - libapache2-mod-wsgi 15 | - libapache2-mod-uwsgi 16 | - libapache2-mod-proxy-uwsgi 17 | rhel: 18 | - mod_wsgi 19 | - mod_proxy_uwsgi 20 | listen: [] 21 | logs: 22 | - paths: 23 | - "{{ apache_log_path[ursula_os] }}/access.log" 24 | fields: 25 | # type: 26 | tags: apache_access 27 | - paths: 28 | - "{{ apache_log_path[ursula_os] }}/error.log" 29 | fields: 30 | # type: 31 | tags: apache_error 32 | -------------------------------------------------------------------------------- /roles/apache/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart apache 3 | service: name="{{ apache.service_name[ursula_os] }}" state=restarted must_exist=false 4 | 5 | - name: reload apache 6 | service: name="{{ apache.service_name[ursula_os] }}" state=reloaded must_exist=false 7 | 8 | - name: stop apache 9 | service: name="{{ apache.service_name[ursula_os] }}" state=stopped 10 | -------------------------------------------------------------------------------- /roles/apache/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: logging-config 4 | service: apache 5 | logdata: "{{ apache.logs }}" 6 | when: logging.enabled|default('True')|bool 7 | - role: openstack-meta 8 | -------------------------------------------------------------------------------- /roles/apache/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for apache 3 | logrotate: 4 | name: apache2 5 | path: "{{ apache_log_path[ursula_os] }}/*.log" 6 | args: 7 | options: 8 | - daily 9 | - missingok 10 | - rotate 7 11 | - compress 12 | - delaycompress 13 | - notifempty 14 | - create 640 root adm 15 | - sharedscripts 16 | - postrotate if /etc/init.d/apache2 status > /dev/null ; then /etc/init.d/apache2 reload > /dev/null; fi; 17 | - endscript 18 | - prerotate if [ -d /etc/logrotate.d/httpd-prerotate ]; then run-parts /etc/logrotate.d/httpd-prerotate; fi; 19 | - endscript 20 | when: ursula_os == "ubuntu" 21 | 22 | -------------------------------------------------------------------------------- /roles/apache/templates/etc/httpd/conf.modules.d/43-mod_proxy_uwsgi.conf: -------------------------------------------------------------------------------- 1 | LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so 2 | -------------------------------------------------------------------------------- /roles/apt-repos: -------------------------------------------------------------------------------- 1 | repos -------------------------------------------------------------------------------- /roles/audit/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart auditd 3 | service: name=auditd state=restarted 4 | 5 | - name: generate audit.rules 6 | command: augenrules --load 7 | notify: restart auditd 8 | -------------------------------------------------------------------------------- /roles/audit/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure audisp syslog plugin 3 | template: 4 | src: etc/audisp/plugins.d/syslog.conf 5 | dest: /etc/audisp/plugins.d 6 | mode: 0640 7 | notify: restart auditd 8 | 9 | 10 | - name: configure audit rules 11 | template: 12 | src: etc/audit/rules.d/audit_default.rules 13 | dest: /etc/audit/rules.d 14 | mode: 0640 15 | notify: generate audit.rules 16 | -------------------------------------------------------------------------------- /roles/audit/templates/etc/audisp/plugins.d/syslog.conf: -------------------------------------------------------------------------------- 1 | # This file controls the configuration of the syslog plugin. 2 | # It simply takes events and writes them to syslog. The 3 | # arguments provided can be the default priority that you 4 | # want the events written with. And optionally, you can give 5 | # a second argument indicating the facility that you want events 6 | # logged to. Valid options are LOG_LOCAL0 through 7. 7 | 8 | {% if audit.enabled|bool %} 9 | active = yes 10 | {% else %} 11 | active = no 12 | {% endif %} 13 | direction = {{ audit.audisp.direction }} 14 | path = {{ audit.audisp.path }} 15 | type = {{ audit.audisp.type }} 16 | args = {{ audit.audisp.args }} 17 | format = {{ audit.audisp.format }} 18 | -------------------------------------------------------------------------------- /roles/audit/templates/etc/audit/rules.d/audit_default.rules: -------------------------------------------------------------------------------- 1 | {% for rule in audit.rules %} 2 | {{ rule }} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /roles/barbican/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart barbican services 3 | service: 4 | name: "{{ openstack_meta.barbican.services.barbican_api[ursula_os].name }}" 5 | state: restarted 6 | must_exist: false 7 | run_once: True 8 | delegate_to: "{{ item }}" 9 | when: restart|default(True) 10 | with_items: play_hosts 11 | 12 | -------------------------------------------------------------------------------- /roles/barbican/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for barbican 3 | logrotate: name=barbican path=/var/log/barbican/*.log 4 | args: 5 | options: 6 | - daily 7 | - rotate 7 8 | - missingok 9 | - compress 10 | - minsize 100k 11 | - copytruncate 12 | when: openstack_install_method != 'distro' 13 | -------------------------------------------------------------------------------- /roles/barbican/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: barbican process check installation 3 | sensu_process_check: service=barbican 4 | notify: restart sensu-client 5 | 6 | - name: barbican-api check 7 | sensu_check: name=check-barbican-api plugin=check-os-api.rb 8 | args="--service barbican --criticality {{ barbican.monitoring.sensu_checks.check_barbican_api.criticality }}" 9 | only_on_ip="{{ undercloud_floating_ip | default(floating_ip) }}" 10 | notify: restart sensu-client 11 | -------------------------------------------------------------------------------- /roles/barbican/templates/etc/barbican/api_audit_map.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | # default target endpoint type 3 | # should match the endpoint type defined in service catalog 4 | target_endpoint_type = key-manager 5 | 6 | # map urls ending with specific text to a unique action 7 | # Don't need custom mapping for other resource operations 8 | # Note: action should match action names defined in CADF taxonomy 9 | [custom_actions] 10 | acl/get = read 11 | 12 | 13 | # path of api requests for CADF target typeURI 14 | # Just need to include top resource path to identify class of resources 15 | [path_keywords] 16 | secrets= 17 | containers= 18 | orders= 19 | cas=None 20 | quotas= 21 | project-quotas= 22 | 23 | 24 | # map endpoint type defined in service catalog to CADF typeURI 25 | [service_endpoints] 26 | key-manager = service/security/keymanager 27 | -------------------------------------------------------------------------------- /roles/barbican/templates/etc/init/barbican.conf: -------------------------------------------------------------------------------- 1 | description "uwsgi for barbican" 2 | 3 | 4 | start on runlevel [2345] 5 | stop on runlevel [!2345] 6 | 7 | respawn 8 | 9 | pre-start script 10 | if [ ! -d /run/uwsgi/barbican ]; then 11 | mkdir /run/uwsgi/barbican 12 | chown barbican /run/uwsgi/barbican 13 | chmod 775 /run/uwsgi/barbican 14 | fi 15 | end script 16 | 17 | post-stop script 18 | if [ -d /run/uwsgi/barbican ]; then 19 | rm -r /run/uwsgi/barbican 20 | fi 21 | end script 22 | 23 | exec {{ uwsgi_path }} --uid barbican --gid barbican --master --emperor /etc/barbican/uwsgi 24 | -------------------------------------------------------------------------------- /roles/barbican/templates/etc/uwsgi/barbican-vassals-api.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | http-socket = 0.0.0.0:{{ endpoints.barbican.port.backend_api }} 3 | processes = {{ barbican.public_workers }} 4 | lazy = true 5 | vacuum = true 6 | no-default-app = true 7 | memory-report = true 8 | plugins = python 9 | paste = config:/etc/barbican/barbican-api-paste.ini 10 | add-header = Connection: close 11 | -------------------------------------------------------------------------------- /roles/branch-validation/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | temp_key_dest: /tmp/validate-key 3 | local_wait_for: false 4 | converged: false 5 | -------------------------------------------------------------------------------- /roles/branch-validation/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: endpoints 4 | -------------------------------------------------------------------------------- /roles/branch-validation/tasks/api.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check services 3 | shell: "curl -I --connect-timeout 2 --fail {{ endpoints[item].url.public }} 2>&1" 4 | with_items: 5 | - glance 6 | - keystone 7 | - neutron 8 | - nova 9 | register: curl 10 | delegate_to: localhost 11 | become: no 12 | -------------------------------------------------------------------------------- /roles/branch-validation/tasks/node_interconnectivity.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: run node_interconnectivity.yml 3 | debug: 4 | msg: Running node_interconnectivity.yml playbook 5 | 6 | - name: cross-node inter-instance connectivity test 7 | command: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 8 | -o IdentityFile={{ temp_key_dest }} 9 | cirros@{{ fip.floating_ip.floating_ip_address }} 10 | ping -c 5 {{ item.openstack.accessIPv4 }} 11 | become: no 12 | changed_when: false 13 | register: pings 14 | until: pings|success 15 | delay: 1 16 | retries: 5 17 | with_items: "{{ instances.results }}" 18 | delegate_to: localhost 19 | -------------------------------------------------------------------------------- /roles/ceilometer-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart ceilometer services 3 | service: name={{ item.name }} state=restarted must_exist=false 4 | when: restart|default(True) 5 | failed_when: false 6 | with_items: 7 | - "{{ ceilometer.services.ceilometer_api }}" 8 | - "{{ ceilometer.services.ceilometer_collector }}" 9 | - "{{ ceilometer.services.ceilometer_notification }}" 10 | - "{{ ceilometer.services.ceilometer_polling }}" 11 | 12 | - name: restart ceilometer data services 13 | service: name={{ item.name }} state=restarted must_exist=false 14 | when: restart|default(True) 15 | failed_when: false 16 | with_items: 17 | - "{{ ceilometer.services.ceilometer_polling }}" 18 | - "{{ ceilometer.services.ceilometer_polling_compute }}" 19 | -------------------------------------------------------------------------------- /roles/ceilometer-common/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for ceilometer 3 | logrotate: name=ceilometer path=/var/log/ceilometer/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/ceilometer-common/templates/etc/ceilometer/ceilometer_api_audit_map.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | # default target endpoint type 3 | # should match the endpoint type defined in service catalog 4 | target_endpoint_type = None 5 | 6 | # possible end path of api requests 7 | [path_keywords] 8 | meters = meter_name 9 | resources = resource_id 10 | statistics = None 11 | samples = sample_id 12 | capabilities = None 13 | alarms = alarm_id 14 | history = None 15 | state = None 16 | event_types = event_type 17 | traits = event_type 18 | events = message_id 19 | 20 | # map endpoint type defined in service catalog to CADF typeURI 21 | [service_endpoints] 22 | metering = service/metering 23 | -------------------------------------------------------------------------------- /roles/ceilometer-common/templates/etc/ceilometer/event_pipeline.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - name: event_source 4 | events: 5 | - "*" 6 | sinks: 7 | - event_sink 8 | sinks: 9 | - name: event_sink 10 | transformers: 11 | triggers: 12 | publishers: 13 | - notifier:// 14 | -------------------------------------------------------------------------------- /roles/ceilometer-common/templates/etc/ceilometer/pipeline.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - name: meter_source 4 | interval: 60 5 | meters: 6 | - "cpu" 7 | - "network.services.lb.member" 8 | sinks: 9 | - meter_sink 10 | - name: cpu_source 11 | interval: 60 12 | meters: 13 | - "cpu" 14 | sinks: 15 | - cpu_sink 16 | sinks: 17 | - name: meter_sink 18 | transformers: 19 | publishers: 20 | - notifier:// 21 | - name: cpu_sink 22 | transformers: 23 | - name: "rate_of_change" 24 | parameters: 25 | target: 26 | name: "cpu_util" 27 | unit: "%" 28 | type: "gauge" 29 | scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" 30 | publishers: 31 | - notifier:// 32 | -------------------------------------------------------------------------------- /roles/ceilometer-common/templates/etc/ceilometer/policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "context_is_admin": "role:admin or role:cloud_admin", 3 | "segregation": "rule:context_is_admin", 4 | 5 | "telemetry:get_samples": "", 6 | "telemetry:get_sample": "", 7 | "telemetry:query_sample": "", 8 | "telemetry:create_samples": "", 9 | 10 | "telemetry:compute_statistics": "", 11 | "telemetry:get_meters": "", 12 | 13 | "telemetry:get_resource": "", 14 | "telemetry:get_resources": "", 15 | 16 | "telemetry:events:index": "", 17 | "telemetry:events:show": "" 18 | } 19 | -------------------------------------------------------------------------------- /roles/ceilometer-control/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: ceilometer-common 4 | - role: openstack-firewall 5 | rule_name: ceilometer 6 | rules_type_input: 7 | - { protocol: tcp, port: "{{ endpoints.ceilometer.port.haproxy_api }}" } 8 | -------------------------------------------------------------------------------- /roles/ceilometer-data/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: ceilometer-common 4 | -------------------------------------------------------------------------------- /roles/ceilometer-data/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ceilometer-polling process check 3 | sensu_process_check: service=ceilometer-polling 4 | notify: restart sensu-client 5 | 6 | - name: remove retired process checks 7 | sensu_process_check: 8 | service: ceilometer-agent-compute 9 | state: absent 10 | notify: restart sensu-client 11 | -------------------------------------------------------------------------------- /roles/ceph-client/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.cloud_archive.repo }} {{ ansible_distribution_release }}-updates/mitaka main' 6 | key_package: ubuntu-cloud-keyring 7 | when: ursula_os == 'ubuntu' 8 | - role: ceph-config 9 | - role: monitoring-common 10 | when: monitoring.enabled|default(True)|bool 11 | -------------------------------------------------------------------------------- /roles/ceph-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install ceph client 3 | package: name={{ item }}{{ ceph.pkg_version_connector[ursula_os] }}{{ ceph.client_version[ursula_os] }} 4 | state=present 5 | register: result_cephclient 6 | until: result_cephclient|succeeded 7 | retries: 5 8 | with_items: 9 | - librgw2 10 | - ceph-common 11 | 12 | # set flag for ceph client upgrade 13 | - name: set upgrade flag true 14 | set_fact: 15 | cephclient_upgraded: result_cephclient.changed 16 | -------------------------------------------------------------------------------- /roles/ceph-compute/templates/etc/ceph/secret.xml: -------------------------------------------------------------------------------- 1 | 2 | {{ cinder_uuid_file.content | b64decode }} 3 | 4 | client.cinder secret 5 | 6 | 7 | -------------------------------------------------------------------------------- /roles/ceph-config/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: ceph-defaults 4 | - role: inspec 5 | install_inspec_controls: [ceph] 6 | tags: inspec 7 | when: 8 | - inspec.enabled|bool 9 | - inspec.controls.ceph.enabled|bool 10 | -------------------------------------------------------------------------------- /roles/ceph-config/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # these two tasks are used to removed duplicated 3 | # logrotate config file which created in previous 4 | # version(on ubuntu). We won't need these 2 tasks 5 | # when we have run these tasks on all envs at lest once 6 | - name: check if ceph.conf exists 7 | stat: path=/etc/logrotate.d/ceph.logrotate 8 | register: result 9 | 10 | - name: remove /etc/logrotate.d/ceph 11 | file: path=/etc/logrotate.d/ceph state=absent 12 | when: result.stat.exists 13 | -------------------------------------------------------------------------------- /roles/ceph-monitor/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: monitoring-common 4 | when: monitoring.enabled|default(True)|bool 5 | - role: logging-config 6 | when: logging.enabled|default(True)|bool 7 | service: ceph 8 | logdata: "{{ ceph.logs }}" 9 | - role: ceph-config 10 | - role: repos 11 | repos: 12 | - repo: 'deb {{ apt_repos.ceph.repo }} {{ ansible_lsb.codename }} main' 13 | key_url: '{{ apt_repos.ceph.key_url }}' 14 | when: ursula_os == 'ubuntu' 15 | # as redhat already has ceph repo added, we don't need to add it here 16 | -------------------------------------------------------------------------------- /roles/ceph-monitor/templates/etc/bbg-ceph-utils/ceph-utils.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [default] 3 | # ceph pool size 4 | pool_default_size = {{ ceph.pool_default_size }} 5 | 6 | # how many pgs do we want to place on each osd 7 | target_pgs_per_osd = {{ ceph.target_pgs_per_osd }} 8 | 9 | # PGs number on each osd will never exceed this value 10 | max_pgs_per_osd = {{ ceph.max_pgs_per_osd }} 11 | 12 | # max PG number we can increase in one time 13 | mon_osd_max_split_count = {{ ceph.mon_osd_max_split_count }} 14 | 15 | # number PGs to increase in one round 16 | step_length = 4 17 | 18 | # The number of active recovery requests per OSD at one time 19 | osd_recovery_max_active = {{ ceph.osd_recovery_max_active }} 20 | 21 | # The priority set for recovery operations 22 | osd_recovery_op_priority = {{ ceph.osd_recovery_op_priority }} 23 | -------------------------------------------------------------------------------- /roles/ceph-osd/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: monitoring-common 4 | when: monitoring.enabled|default(True)|bool 5 | - role: logging-config 6 | when: logging.enabled|default(True)|bool 7 | service: ceph 8 | logdata: "{{ ceph.logs }}" 9 | - role: ceph-config 10 | - role: repos 11 | repos: 12 | - repo: 'deb {{ apt_repos.ceph.repo }} {{ ansible_lsb.codename }} main' 13 | key_url: '{{ apt_repos.ceph.key_url }}' 14 | when: ursula_os == 'ubuntu' 15 | # as redhat already has ceph repo added, we don't need to add it here 16 | -------------------------------------------------------------------------------- /roles/ceph-osd/tasks/activate_fullssd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: prepare osd disks 3 | command: ceph-disk prepare /dev/{{ item.item }} 4 | with_items: "{{ hostvars[outer_item].ceph_partitions.results }}" 5 | when: item.rc != 0 6 | 7 | - name: activate osds 8 | command: ceph-disk activate /dev/{{ item.item }}1 9 | with_items: "{{ hostvars[outer_item].ceph_partitions.results }}" 10 | when: item.rc != 0 11 | -------------------------------------------------------------------------------- /roles/ceph-update/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: ceph-defaults 4 | -------------------------------------------------------------------------------- /roles/cinder-common/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for cinder 3 | logrotate: name=cinder path=/var/log/cinder/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/cinder-common/templates/etc/cinder/cinder.encryption.conf: -------------------------------------------------------------------------------- 1 | {% if cinder.encryption.fixed_key %} 2 | [key_manager] 3 | fixed_key = {{ cinder.encryption.fixed_key }} 4 | {% elif barbican.enabled|bool %} 5 | [key_manager] 6 | api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager 7 | 8 | [barbican] 9 | barbican_endpoint = {{ endpoints.barbican.url.public }} 10 | auth_endpoint = {{ endpoints.keystone.url.public }}/{{ endpoints.keystonev3.version }} 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /roles/cinder-common/templates/etc/cinder/cinder_api_audit_map.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | # default target endpoint type 3 | # should match the endpoint type defined in service catalog 4 | target_endpoint_type = None 5 | 6 | # map urls ending with specific text to a unique action 7 | [custom_actions] 8 | associate = update/associate 9 | disassociate = update/disassociate 10 | disassociate_all = update/disassociate_all 11 | associations = read/list/associations 12 | 13 | # possible end path of api requests 14 | [path_keywords] 15 | defaults = None 16 | detail = None 17 | limits = None 18 | os-quota-specs = project 19 | qos-specs = qos-spec 20 | snapshots = snapshot 21 | types = type 22 | volumes = volume 23 | 24 | # map endpoint type defined in service catalog to CADF typeURI 25 | [service_endpoints] 26 | volume = service/storage/block 27 | volumev2 = service/storage/block 28 | volumev3 = service/storage/block 29 | -------------------------------------------------------------------------------- /roles/cinder-common/templates/etc/sudoers.d/cinder: -------------------------------------------------------------------------------- 1 | {% if openstack_install_method != 'distro' %} 2 | cinder ALL=(root) NOPASSWD: /usr/local/bin/cinder-rootwrap 3 | {% else %} 4 | cinder ALL=(root) NOPASSWD: /bin/cinder-rootwrap 5 | {% endif %} 6 | 7 | -------------------------------------------------------------------------------- /roles/cinder-control/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: cinder-common 4 | - role: openstack-database 5 | database_name: cinder 6 | - role: collectd-plugin 7 | when: collectd is defined and collectd.enabled|bool 8 | - role: openstack-firewall 9 | rule_name: cinder 10 | rules_type_input: 11 | - { protocol: tcp, port: "{{ endpoints.cinder.port.haproxy_api }}" } 12 | -------------------------------------------------------------------------------- /roles/cinder-control/tasks/nimble-monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nimble load check 3 | sensu_check: name=nimble-load plugin=nimble-check.py 4 | args='-w 5 -c 7 -n {{ sensu.nimble_san_ip }} load' use_sudo=true 5 | notify: restart sensu-client 6 | 7 | - name: nimble raid check 8 | sensu_check: name=nimble-raid plugin=nimble-check.py 9 | args='-n {{ sensu.nimble_san_ip }} raid' use_sudo=true 10 | notify: restart sensu-client 11 | 12 | - name: nimble array free check 13 | sensu_check: name=nimble-array-free plugin=nimble-check.py 14 | args='-c 10 -w 15 -n {{ sensu.nimble_san_ip }} array_free' use_sudo=true 15 | notify: restart sensu-client 16 | -------------------------------------------------------------------------------- /roles/cinder-control/tasks/v7k_integration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create v7k sshkey directory 3 | file: dest=/etc/cinder/v7k/ssh/cinder state=directory owner=cinder 4 | group=cinder mode=0755 5 | 6 | - name: drop a cinder private key 7 | template: src=etc/v7k/ssh/cinder/id_rsa 8 | dest=/etc/cinder/v7k/ssh/cinder/id_rsa owner=cinder 9 | group=cinder mode=0600 10 | 11 | -------------------------------------------------------------------------------- /roles/cinder-control/templates/etc/collectd/plugins/cinder.conf: -------------------------------------------------------------------------------- 1 | 2 | Globals true 3 | 4 | 5 | 6 | ModulePath "/opt/ursula-monitoring/collectd/plugins/openstack" 7 | 8 | Import "cinder_plugin" 9 | 10 | 11 | Username "{{ monitoring.openstack.user.username }}" 12 | Password "{{ monitoring.openstack.user.password }}" 13 | TenantName "{{ monitoring.openstack.user.tenant }}" 14 | AuthURL "{{ endpoints.auth_uri }}" 15 | Verbose "False" 16 | {% if client.self_signed_cert %} 17 | 18 | CACert "/opt/stack/ssl/openstack.crt" 19 | {% endif %} 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /roles/cinder-control/templates/etc/v7k/ssh/cinder/id_rsa: -------------------------------------------------------------------------------- 1 | {{ v7k.users.cinder.ssh_pvt_key }} 2 | -------------------------------------------------------------------------------- /roles/cinder-control/templates/etc/v7k/ssh/monitoring/id_rsa: -------------------------------------------------------------------------------- 1 | {{ v7k.users.monitoring.ssh_pvt_key }} 2 | -------------------------------------------------------------------------------- /roles/cinder-data/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: cinder-common 4 | -------------------------------------------------------------------------------- /roles/cinder-data/templates/etc/tgt/conf.d/cinder_tgt.conf: -------------------------------------------------------------------------------- 1 | include {{ cinder.state_path }}/volumes/* 2 | -------------------------------------------------------------------------------- /roles/cinder-data/templates/etc/tgt/targets.conf: -------------------------------------------------------------------------------- 1 | include /etc/tgt/conf.d/cinder_tgt.conf 2 | -------------------------------------------------------------------------------- /roles/cleanup/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: refresh cert auths 3 | command: update-ca-certificates 4 | when: ansible_distribution == 'Ubuntu' 5 | 6 | - name: restart rsyslog 7 | service: name=rsyslog state=restarted 8 | -------------------------------------------------------------------------------- /roles/cleanup/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | - name: remove old log configs / files 2 | file: dest={{ item }} state=absent 3 | with_items: 4 | - /etc/nova/logging.conf 5 | - /etc/cinder/logging.conf 6 | - /etc/keystone/logging.conf 7 | - /var/log/keystone/error.log 8 | - /var/log/keystone/access.log 9 | 10 | - name: remove rsyslog configs 11 | file: dest=/etc/rsyslog.d/{{ item }} state=absent 12 | with_items: 13 | - follow.conf 14 | - forward.conf 15 | - udp.conf 16 | notify: restart rsyslog 17 | 18 | - name: remove rsyslog forward cert auth 19 | file: dest=/usr/local/share/ca-certificates/rsyslog-forward.crt state=absent 20 | notify: refresh cert auths 21 | -------------------------------------------------------------------------------- /roles/cleanup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | include: logging.yml 2 | tags: 3 | - cleanup 4 | - logging 5 | -------------------------------------------------------------------------------- /roles/client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update ca certs 3 | command: update-ca-certificates 4 | when: ansible_distribution == 'Ubuntu' 5 | -------------------------------------------------------------------------------- /roles/client/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: openstack-meta 4 | - role: endpoints 5 | -------------------------------------------------------------------------------- /roles/collectd-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collectd: 3 | client_name: "{{ ansible_hostname }}" 4 | plugin_conf_dir: /etc/collectd/plugins 5 | interval: 60 6 | timeout: 2 7 | threads: 5 8 | graphite_prefix: "stats." 9 | plugins: 10 | amqp: 11 | enabled: True 12 | verbose: False 13 | host: 172.16.0.15 14 | port: 5672 15 | vhost: /graphite 16 | user: graphite 17 | pass: graphite 18 | exchange: metrics 19 | logfile: 20 | enabled: True 21 | file: /var/log/collectd.log 22 | level: info 23 | sensors: 24 | enabled: True 25 | skip: [] 26 | -------------------------------------------------------------------------------- /roles/collectd-client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart collectd 3 | command: /etc/init.d/collectd restart 4 | -------------------------------------------------------------------------------- /roles/collectd-client/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - name: 'collectd package repo' 6 | repo: 'deb {{ apt_repos.collectd.repo }} {{ ansible_distribution_release }} main' 7 | key_url: '{{ apt_repos.collectd.key_url }}' 8 | when: ansible_distribution_version == "12.04" 9 | -------------------------------------------------------------------------------- /roles/collectd-client/templates/etc/collectd/collectd.conf: -------------------------------------------------------------------------------- 1 | Hostname {{ collectd.client_name|default(ansible_hostname) }} 2 | FQDNLookup false 3 | 4 | BaseDir "/var/lib/collectd" 5 | PluginDir "/usr/lib/collectd" 6 | 7 | {% if collectd.typesdb is defined %} 8 | TypesDB {% for path in collectd.typesdb %} "{{ path }} " {% endfor %} 9 | {% else %} 10 | TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db" 11 | {% endif %} 12 | 13 | Interval {{ collectd.interval }} 14 | Timeout {{ collectd.timeout }} 15 | ReadThreads {{ collectd.threads }} 16 | Include "{{ collectd.plugin_conf_dir }}/*.conf" 17 | -------------------------------------------------------------------------------- /roles/collectd-client/templates/etc/collectd/plugins/amqp.conf: -------------------------------------------------------------------------------- 1 | LoadPlugin amqp 2 | 3 | 4 | 5 | Host "{{ collectd.plugins.amqp.host }}" 6 | Port "{{ collectd.plugins.amqp.port }}" 7 | VHost "{{ collectd.plugins.amqp.vhost }}" 8 | User "{{ collectd.plugins.amqp.user }}" 9 | Password "{{ collectd.plugins.amqp.pass }}" 10 | Exchange "{{ collectd.plugins.amqp.exchange }}" 11 | Format "Graphite" 12 | Persistent true 13 | GraphitePrefix "{{ collectd.graphite_prefix }}" 14 | GraphiteEscapeChar "-" 15 | 16 | 17 | -------------------------------------------------------------------------------- /roles/collectd-client/templates/etc/collectd/plugins/logfile.conf: -------------------------------------------------------------------------------- 1 | LoadPlugin "logfile" 2 | 3 | 4 | LogLevel "{{ collectd.plugins.logfile.level }}" 5 | File "{{ collectd.plugins.logfile.file }}" 6 | Timestamp true 7 | 8 | -------------------------------------------------------------------------------- /roles/collectd-client/templates/etc/collectd/plugins/sensors.conf: -------------------------------------------------------------------------------- 1 | 2 | {% for sensor in collectd.plugins.sensors.skip %} 3 | Sensor "{{ sensor }}" 4 | {% endfor %} 5 | IgnoreSelected true 6 | 7 | -------------------------------------------------------------------------------- /roles/collectd-plugin/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collectd_plugin: 3 | path: /opt/ursula-monitoring/collectd/plugins 4 | -------------------------------------------------------------------------------- /roles/collectd-plugin/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart collectd 3 | service: name=collectd state=restarted 4 | -------------------------------------------------------------------------------- /roles/collectd-plugin/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/collectd-plugin/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/common/files/etc/sysctl.d/10-disable-ipv6-default.conf: -------------------------------------------------------------------------------- 1 | # Disable IPv6 on all interfaces 2 | net.ipv6.conf.all.disable_ipv6 = 1 3 | net.ipv6.conf.default.disable_ipv6 = 1 4 | # Enable on loopback 5 | net.ipv6.conf.lo.disable_ipv6 = 0 6 | -------------------------------------------------------------------------------- /roles/common/files/usr/local/bin/apt-get-update.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # A simple wrapper around "apt-get update" that returns a suitable exit 4 | # code on failure. Useful for cron jobs. 5 | 6 | if ! { apt-get update 2>&1 || echo "E: update failed"; } | grep -q '^[WE]:'; then 7 | exit 0 8 | else 9 | exit 1 10 | fi 11 | -------------------------------------------------------------------------------- /roles/common/tasks/apt-update-cron.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy apt-get update wrapper script for cron job 3 | copy: 4 | src: usr/local/bin/apt-get-update.sh 5 | dest: /usr/local/bin/apt-get-update.sh 6 | owner: root 7 | group: root 8 | mode: 0755 9 | 10 | - name: cron job for hour apt cache updates 11 | cron: 12 | name: "update apt cache hourly" 13 | cron_file: "apt-get-update" 14 | special_time: hourly 15 | job: "http_proxy={{ ( env_vars | default({})).http_proxy | default(None) }} /usr/local/bin/apt-get-update.sh && touch /var/lib/apt/update-cache-success" 16 | user: root 17 | -------------------------------------------------------------------------------- /roles/common/tasks/audit-logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: setup logrotate for audit.log 3 | logrotate: name=auditd path=/var/log/audit/audit.log 4 | args: 5 | options: 6 | - daily 7 | - rotate 7 8 | - missingok 9 | - compress 10 | - minsize 100k 11 | -------------------------------------------------------------------------------- /roles/common/tasks/disable-swap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: remove swaps 3 | lineinfile: dest=/etc/fstab regexp='\s+swap\s+' state=absent 4 | 5 | - name: disable unused swap devices 6 | shell: | 7 | awk '/^\// {print $1 "\t" $4}' /proc/swaps | \ 8 | while read SWAPDEV SWAPUSED; do 9 | if [ ${SWAPUSED} -eq 0 ]; then 10 | swapoff ${SWAPDEV} 11 | fi 12 | done 13 | -------------------------------------------------------------------------------- /roles/common/tasks/hwraid-ppc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install raid utilities 3 | apt: pkg=Arcconf 4 | register: result 5 | until: result|succeeded 6 | retries: 5 7 | -------------------------------------------------------------------------------- /roles/common/tasks/hwraid.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install raid utilities 3 | package: name={{ item }} 4 | register: result 5 | until: result|succeeded 6 | retries: 5 7 | 8 | with_items: "{{ common.hwraid.clients }}" 9 | -------------------------------------------------------------------------------- /roles/common/tasks/ipmi.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # NOTE: this file is included by the prereboot tag, as such it should work not include anything that will break older releases 4 | # 5 | - name: install ipmitool 6 | package: name=ipmitool 7 | register: result 8 | until: result|succeeded 9 | retries: 5 10 | 11 | - name: load kernel ipmi modules 12 | modprobe: name={{ item }} state={{ common.ipmi.state }} 13 | with_items: 14 | - ipmi_devintf 15 | - ipmi_si 16 | 17 | - include: serial-console.yml tty={{ common.ipmi.serial_console }} baud_rate={{ common.ipmi.baud_rate }} 18 | when: ansible_distribution == 'Ubuntu' 19 | -------------------------------------------------------------------------------- /roles/common/tasks/remove-default-users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Task to perform some of the miscellanious things 3 | # not done by openstack-ansible-security 4 | - name: remove default system accounts as prescribed by stig 5 | user: name={{ item }} state=absent 6 | with_items: "{{ common.compliance.rmusers }}" 7 | -------------------------------------------------------------------------------- /roles/common/tasks/ruby.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up gem sources if needed 3 | template: src=etc/gemrc dest=/root/.gemrc 4 | when: openstack.gem_sources is defined 5 | 6 | - name: set up gem sources if unused 7 | file: dest=/root/.gemrc state=absent 8 | when: openstack.gem_sources is not defined 9 | -------------------------------------------------------------------------------- /roles/common/tasks/serial-console.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # NOTE: this file is included by the prereboot tag, as such it should work not include anything that will break older releases 4 | # 5 | - name: serial console init script 6 | template: src=etc/init/tty_console.conf dest=/etc/init/{{ tty }}.conf 7 | 8 | - name: start serial console 9 | service: name={{ tty }} state=started enabled=yes 10 | 11 | - name: append serial console kernel command line 12 | set_fact: serial_console_cmdline="{{ serial_console_cmdline|default('') }} console={{ tty }},{{ baud_rate|default('9600') }}n8" 13 | -------------------------------------------------------------------------------- /roles/common/tasks/serverspec.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure serverspec directory exists 3 | file: dest=/etc/serverspec/spec/localhost state=directory 4 | owner=root mode=0755 recurse=true 5 | 6 | - name: operating system serverspec 7 | template: src={{ item }} 8 | dest=/etc/serverspec/spec/localhost 9 | mode=0755 10 | with_fileglob: ../templates/etc/serverspec/* 11 | -------------------------------------------------------------------------------- /roles/common/tasks/system-file-permissions.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set permissions for faillog 3 | file: path=/var/log/faillog state=touch mode=600 4 | 5 | - name: Set permissions for shadow 6 | file: path=/etc/shadow state=file mode=600 7 | 8 | - name: Remove default games directory 9 | file: path=/usr/games state=absent 10 | -------------------------------------------------------------------------------- /roles/common/tasks/ucarp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure ucarp installed 3 | package: 4 | name: ucarp 5 | state: present 6 | 7 | - name: write ucarp config 8 | copy: 9 | content: "{{ item.contents }}" 10 | dest: "/etc/ucarp/vip-{{ item.vid }}.conf" 11 | owner: root 12 | group: root 13 | mode: 0644 14 | with_items: "{{ network.ucarp.config | default([]) }}" 15 | 16 | - name: install custom ucarp scripts 17 | template: 18 | src: "usr/local/libexec/{{ item }}" 19 | dest: "/usr/local/libexec/{{ item }}" 20 | owner: root 21 | group: root 22 | mode: 0700 23 | with_items: 24 | - ucarp-vip-up 25 | - ucarp-vip-down 26 | 27 | - name: enable ucarp 28 | command: "systemctl enable ucarp@{{ item.vid }}" 29 | with_items: "{{ network.ucarp.config | default([]) }}" 30 | -------------------------------------------------------------------------------- /roles/common/templates/etc/apt/apt.conf.d/01proxy: -------------------------------------------------------------------------------- 1 | Acquire::http { Proxy "{{ common.apt_cache }}"; }; 2 | -------------------------------------------------------------------------------- /roles/common/templates/etc/gemrc: -------------------------------------------------------------------------------- 1 | --- 2 | :backtrace: false 3 | :benchmark: false 4 | :bulk_threshold: 1000 5 | :sources: 6 | {% for gem_source in openstack.gem_sources %} 7 | - {{ gem_source }} 8 | {% endfor %} 9 | :update_sources: true 10 | :verbose: true 11 | -------------------------------------------------------------------------------- /roles/common/templates/etc/init/tty_console.conf: -------------------------------------------------------------------------------- 1 | # {{ tty }} - getty 2 | # 3 | # This service maintains a getty on {{ tty }} from the point the system is 4 | # started until it is shut down again. 5 | 6 | start on stopped rc RUNLEVEL=[2345] and ( 7 | not-container or 8 | container CONTAINER=lxc or 9 | container CONTAINER=lxc-libvirt) 10 | 11 | stop on runlevel [!2345] 12 | 13 | respawn 14 | exec /sbin/getty -L {{ tty }} {{ baud_rate|default('9600') }} vt100 15 | 16 | -------------------------------------------------------------------------------- /roles/common/templates/etc/modprobe.d/conntrack.conf: -------------------------------------------------------------------------------- 1 | options nf_conntrack hashsize=190536 2 | -------------------------------------------------------------------------------- /roles/common/templates/etc/pip.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | index-url = {{ openstack.pypi_mirror|default('') }} 3 | trusted-host = {{ openstack.pip_trusted|default('')}} 4 | -------------------------------------------------------------------------------- /roles/common/templates/etc/pydistutils.cfg: -------------------------------------------------------------------------------- 1 | [easy_install] 2 | index-url = {{ openstack.easy_install_mirror|default('') }} 3 | -------------------------------------------------------------------------------- /roles/common/templates/etc/sysctl.d/60-kernel-tuning.conf: -------------------------------------------------------------------------------- 1 | vm.swappiness = 10 2 | kernel.shmmax = 68719476736 3 | kernel.shmall = 4294967296 4 | kernel.msgmax = 65536 5 | kernel.msgmnb = 65536 6 | -------------------------------------------------------------------------------- /roles/common/templates/etc/sysctl.d/60-netfilter-tuning.conf: -------------------------------------------------------------------------------- 1 | # Make sure nf_conntrack has the "hashsize=190536" parameter passed to it 2 | net.netfilter.nf_conntrack_max = 1524288 3 | net.netfilter.nf_conntrack_tcp_timeout_time_wait = 5 4 | net.netfilter.nf_conntrack_tcp_timeout_fin_wait = 5 5 | -------------------------------------------------------------------------------- /roles/common/templates/etc/timezone: -------------------------------------------------------------------------------- 1 | Etc/UTC 2 | -------------------------------------------------------------------------------- /roles/common/templates/etc/ucarp/vip-nnn.conf: -------------------------------------------------------------------------------- 1 | BIND_INTERFACE="{{ ucarp.interface }}" 2 | SOURCE_ADDRESS="{{ ucarp.source_address }}" 3 | VIP_ADDRESS="{{ ucarp.vip_address }}" 4 | PASSWORD="{{ ucarp.password }}" 5 | 6 | UPSCRIPT="/usr/local/libexec/ucarp-vip-up" 7 | DOWNSCRIPT="/usr/local/libexec/ucarp-vip-down" 8 | OPTIONS="--shutdown" 9 | -------------------------------------------------------------------------------- /roles/common/templates/etc/update-motd.d/90-ursula-motd: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import yaml 3 | 4 | URSULA_RELEASE_PATH = '/etc/ursula-release' 5 | 6 | 7 | def print_ursula_release_info(): 8 | with open(URSULA_RELEASE_PATH, 'r') as fh: 9 | data = yaml.load(fh) 10 | print 11 | print 'Ursula Node Data:' 12 | for key, value in data['ursula'].items(): 13 | print '%s: %s' % (key.capitalize(), value) 14 | print 15 | 16 | 17 | def main(): 18 | try: 19 | print_ursula_release_info() 20 | except Exception as e: 21 | pass 22 | 23 | 24 | if __name__ == '__main__': 25 | main() 26 | -------------------------------------------------------------------------------- /roles/common/templates/etc/ursula-release: -------------------------------------------------------------------------------- 1 | --- 2 | ursula: 3 | stack: {{ stack_env }} 4 | release: {{ ursula_revision }} 5 | deployed: {{ ansible_date_time['iso8601'] }} 6 | groups: {{ group_names | join(', ') }} 7 | -------------------------------------------------------------------------------- /roles/common/templates/monitoring/sensu-client-cert.pem: -------------------------------------------------------------------------------- 1 | {{ monitoring.client_cert }} 2 | -------------------------------------------------------------------------------- /roles/common/templates/monitoring/sensu-client-key.pem: -------------------------------------------------------------------------------- 1 | {{ monitoring.client_key }} 2 | -------------------------------------------------------------------------------- /roles/common/templates/monitoring/sensu-client-path: -------------------------------------------------------------------------------- 1 | export PATH="{{ common.python.base_venv }}/bin:/opt/sensu/embedded/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/etc/sensu/plugins" -------------------------------------------------------------------------------- /roles/common/templates/openstack.cacrt: -------------------------------------------------------------------------------- 1 | {{ ssl.cacrt }} 2 | -------------------------------------------------------------------------------- /roles/common/templates/openstack.crt: -------------------------------------------------------------------------------- 1 | {{ ssl.crt }} 2 | -------------------------------------------------------------------------------- /roles/common/templates/ssh-private-key: -------------------------------------------------------------------------------- 1 | {{ item.private_key }} 2 | -------------------------------------------------------------------------------- /roles/common/templates/usr/local/libexec/ucarp-vip-down: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec 2>/dev/null 3 | 4 | /sbin/ip address del "$2"/32 dev "$1" label "$1":ucarp 5 | -------------------------------------------------------------------------------- /roles/common/templates/usr/local/libexec/ucarp-vip-up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec 2>/dev/null 3 | 4 | /sbin/ip address add "$2"/32 dev "$1" label "$1":ucarp 5 | -------------------------------------------------------------------------------- /roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker: 3 | version: 1.4.1 4 | -------------------------------------------------------------------------------- /roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.docker.repo }} docker main' 6 | key_url: '{{ apt_repos.docker.key_url }}' 7 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install docker 3 | apt: pkg=lxc-docker 4 | when: not docker.version 5 | register: result 6 | until: result|succeeded 7 | retries: 5 8 | 9 | - name: install docker 10 | apt: pkg=lxc-docker-{{ docker.version }} 11 | register: result 12 | until: result|succeeded 13 | retries: 5 14 | 15 | when: docker.version 16 | -------------------------------------------------------------------------------- /roles/glance/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart glance services 3 | service: name={{ item.name }} state=restarted must_exist=false 4 | when: restart|default(True) 5 | with_items: 6 | - "{{ glance.services.glance_api }}" 7 | - "{{ glance.services.glance_registry }}" 8 | 9 | # Restarting rsync 3.0.9-1ubuntu1 on 12.04.4 was failing: 10 | # bind () failed: Address already in use (address-family 2) 11 | # Adding retry logic to workaround 12 | - name: restart rsync 13 | service: name={{ glance.rsync.service }} state=restarted 14 | register: result 15 | until: result|success 16 | retries: 3 17 | delay: 10 18 | -------------------------------------------------------------------------------- /roles/glance/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for glance 3 | logrotate: name=glance path=/var/log/glance/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/glance/templates/etc/collectd/plugins/glance.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | 4 | Globals true 5 | 6 | 7 | 8 | ModulePath "/opt/ursula-monitoring/collectd/plugins/openstack" 9 | 10 | Import "glance_plugin" 11 | 12 | 13 | Username "{{ monitoring.openstack.user.username }}" 14 | Password "{{ monitoring.openstack.user.password }}" 15 | TenantName "{{ monitoring.openstack.user.tenant }}" 16 | AuthURL "{{ endpoints.auth_uri }}" 17 | Verbose "False" 18 | {% if client.self_signed_cert %} 19 | 20 | CACert "/opt/stack/ssl/openstack.crt" 21 | {% endif %} 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /roles/glance/templates/etc/glance/.my.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | user=glance 3 | password={{ secrets.db_password }} 4 | host={{ undercloud_floating_ip }} 5 | port=3307 6 | database=glance 7 | -------------------------------------------------------------------------------- /roles/glance/templates/etc/glance/glance-swift-store.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [ref1] 4 | auth_version = 2 5 | # project and user domain id will be default unless auth_version = 3 6 | project_domain_id = default 7 | user_domain_id = default 8 | auth_address = {{ endpoints.keystone.url.internal }}/{{ endpoints.keystone.version }} 9 | user = service:swift 10 | key = {{ secrets.service_password }} 11 | -------------------------------------------------------------------------------- /roles/glance/templates/etc/glance/glance_api_audit_map.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [DEFAULT] 4 | # default target endpoint type 5 | # should match the endpoint type defined in service catalog 6 | target_endpoint_type = None 7 | 8 | # possible end path of api requests 9 | [path_keywords] 10 | detail = None 11 | file = None 12 | images = image 13 | members = member 14 | tags = tag 15 | 16 | # map endpoint type defined in service catalog to CADF typeURI 17 | [service_endpoints] 18 | image = service/storage/image 19 | -------------------------------------------------------------------------------- /roles/glance/templates/etc/rsyncd.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [glance] 4 | comment = "Glance Images" 5 | path = {{ glance.sync.dir }} 6 | use chroot = yes 7 | read only = yes 8 | list = yes 9 | uid = glance 10 | gid = glance 11 | hosts allow = {{ hostvars|ursula_controller_ips(groups)|join(', ') }} 12 | strict modes = yes 13 | ignore errors = no 14 | ignore nonreadable = yes 15 | timeout = 600 16 | refuse options = checksum dry-run 17 | -------------------------------------------------------------------------------- /roles/haproxy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | haproxy: 3 | stats_group: monitor 4 | global: 5 | maxconn: 20480 6 | bufsize: 16384 7 | defaults: 8 | timeout_http_request: 30s 9 | timeout_http_keep_alive: 60s 10 | timeout_queue: 1m 11 | timeout_connect: 5s 12 | timeout_client: 300s 13 | timeout_server: 300s 14 | timeout_check: 10s 15 | check_interval: 2s 16 | stats_refresh: 10s 17 | mysql: 18 | maxconn: 4096 19 | timeout_connect: 5000ms 20 | timeout_client: 300s 21 | timeout_server: 300s 22 | retries: 3 23 | -------------------------------------------------------------------------------- /roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload haproxy 3 | service: 4 | name: haproxy 5 | state: reloaded 6 | 7 | - name: restart rsyslog 8 | service: 9 | name: rsyslog 10 | state: restarted 11 | -------------------------------------------------------------------------------- /roles/haproxy/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: endpoints 4 | - role: apt-repos 5 | repos: 6 | - name: 'haproxy package repo' 7 | repo: 'deb {{ apt_repos.haproxy.repo }} {{ ansible_lsb.codename }} main' 8 | key_url: '{{ apt_repos.haproxy.key_url }}' 9 | when: ansible_distribution in ['Ubuntu'] 10 | - role: monitoring-common 11 | when: monitoring.enabled|default(True)|bool 12 | - role: collectd-plugin 13 | when: collectd is defined and collectd.enabled|bool 14 | -------------------------------------------------------------------------------- /roles/haproxy/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: haproxy check process 3 | sensu_check: name=haproxy_process plugin=check-procs.rb 4 | args="-p 'haproxy.*-f /etc/haproxy/haproxy.cfg' -w 5 -c 10 -W 1 -C 1" 5 | notify: restart sensu-client 6 | 7 | - name: haproxy check backends 8 | sensu_check: name=haproxy_servers plugin=check-haproxy.rb 9 | args="-c 100 -S /var/run/haproxy/stats.sock -A" 10 | use_sudo=true 11 | notify: restart sensu-client 12 | 13 | - name: haproxy metrics 14 | template: src=etc/collectd/plugins/haproxy.conf dest=/etc/collectd/plugins/haproxy.conf 15 | notify: restart collectd 16 | when: collectd is defined and collectd.enabled|bool 17 | tags: collectd 18 | -------------------------------------------------------------------------------- /roles/haproxy/templates/etc/collectd/plugins/haproxy.conf: -------------------------------------------------------------------------------- 1 | 2 | Globals true 3 | 4 | 5 | 6 | ModulePath "/opt/ursula-monitoring/collectd/plugins/haproxy" 7 | 8 | Import "haproxy" 9 | 10 | 11 | Socket "/var/run/haproxy/stats.sock" 12 | ProxyMonitor "server" 13 | ProxyMonitor "backend" 14 | ProxyMonitor "frontend" 15 | 16 | 17 | -------------------------------------------------------------------------------- /roles/haproxy/templates/etc/default/haproxy: -------------------------------------------------------------------------------- 1 | # Defaults file for HAProxy 2 | # 3 | # This is sourced by both, the initscript and the systemd unit file, so do not 4 | # treat it as a shell script fragment. 5 | ENABLED=1 6 | 7 | PIDFILE=/var/run/haproxy/haproxy.pid 8 | 9 | # Change the config file location if needed 10 | CONFIG="/etc/haproxy/haproxy.cfg" 11 | 12 | # Add extra flags here, see haproxy(1) for a few options 13 | #EXTRAOPTS="-de -m 16" 14 | -------------------------------------------------------------------------------- /roles/haproxy/templates/etc/haproxy/openstack.pem: -------------------------------------------------------------------------------- 1 | {{ ssl.crt }} 2 | {{ ssl.key }} 3 | -------------------------------------------------------------------------------- /roles/haproxy/templates/etc/rsyslog.d/haproxy.conf: -------------------------------------------------------------------------------- 1 | # Create an additional socket in haproxy's chroot in order to allow logging via 2 | # /dev/log to chroot'ed HAProxy processes 3 | $AddUnixListenSocket /var/lib/haproxy/dev/log 4 | 5 | # Write HAProxy messages to async dedicated logfile 6 | if $programname startswith 'haproxy' then -/var/log/haproxy.log 7 | & stop 8 | 9 | -------------------------------------------------------------------------------- /roles/haproxy/templates/usr/lib/tmpfiles.d/haproxy.conf: -------------------------------------------------------------------------------- 1 | d /run/haproxy 755 root root 2 | -------------------------------------------------------------------------------- /roles/heat/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart heat services 3 | service: name={{ item.name }} state=restarted must_exist=false 4 | when: restart|default(True) 5 | with_items: 6 | - "{{ heat.services.heat_api }}" 7 | - "{{ heat.services.heat_api_cfn }}" 8 | - "{{ heat.services.heat_engine }}" 9 | -------------------------------------------------------------------------------- /roles/heat/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for heat 3 | logrotate: name=heat path=/var/log/heat/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/heat/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: heat process check installation 3 | sensu_process_check: service={{ item }} 4 | with_items: 5 | - heat-api 6 | - heat-api-cfn 7 | - heat-engine 8 | notify: restart sensu-client 9 | 10 | - name: heat-api check 11 | sensu_check: name=check-heat-api plugin=check-os-api.rb 12 | args="--service heat --criticality {{ heat.monitoring.sensu_checks.check_heat_api.criticality }}" 13 | only_on_ip="{{ undercloud_floating_ip | default(floating_ip) }}" 14 | notify: restart sensu-client 15 | 16 | - name: heat sla metrics 17 | sensu_metrics_check: name=heat-sla-metrics plugin=metrics-os-api.py 18 | args='-S heat --scheme {{ monitoring.graphite.cluster_prefix }}' 19 | only_on_ip="{{ undercloud_floating_ip | default(floating_ip) }}" 20 | notify: restart sensu-client 21 | -------------------------------------------------------------------------------- /roles/heat/templates/etc/heat/heat_stack_domain.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | 3 | stack_user_domain_id={{ heat_domain.stdout }} 4 | stack_domain_admin=heat_stack_admin 5 | stack_domain_admin_password={{ secrets.service_password }} 6 | -------------------------------------------------------------------------------- /roles/horizon/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # not really a "horizon service" but horizon is the outlier 3 | #- name: restart horizon services 4 | # service: name=apache2 state=restarted must_exist=false 5 | 6 | #- name: restart apache 7 | # service: name=apache2 state=restarted must_exist=false 8 | 9 | - name: compress horizon assets 10 | command: tools/with_venv.sh ./manage.py compress 11 | args: 12 | chdir: /opt/stack/horizon/ 13 | become: true 14 | become_user: www-data 15 | -------------------------------------------------------------------------------- /roles/horizon/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/horizon/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: apache2 process check 3 | sensu_process_check: 4 | service: "{{ ( ursula_os == 'ubuntu' )| ternary('apache2', 'httpd') }}" 5 | notify: restart sensu-client 6 | 7 | - name: dashboard port check 8 | sensu_check: name=check-dashboard-clear-port-redirects plugin=check-http.rb 9 | args='-u http://localhost/ -r -z {{ horizon.monitoring.sensu_checks.check_http.criticality }}' 10 | when: haproxy.enabled|bool 11 | notify: restart sensu-client 12 | -------------------------------------------------------------------------------- /roles/horizon/templates/etc/httpd/conf.d/openstack-dashboard.conf: -------------------------------------------------------------------------------- 1 | Listen 8080 2 | WSGIDaemonProcess dashboard 3 | WSGIProcessGroup dashboard 4 | WSGISocketPrefix run/wsgi 5 | 6 | 7 | 8 | WSGIScriptAlias / {{ horizon.rhel_lib_dir }}/openstack_dashboard/wsgi/django.wsgi 9 | Alias /static {{ horizon.rhel_lib_dir }}/static 10 | 11 | 12 | Options ExecCGI 13 | AuthType None 14 | Require all granted 15 | Satisfy Any 16 | AllowOverride None 17 | 18 | 19 | 20 | Options FollowSymLinks 21 | AuthType None 22 | Require all granted 23 | Satisfy Any 24 | AllowOverride None 25 | 26 | 27 | -------------------------------------------------------------------------------- /roles/horizon/templates/etc/openstack-dashboard/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blueboxgroup/ursula/b70ccc4a6bda2830559b99991025ee275301c121/roles/horizon/templates/etc/openstack-dashboard/__init__.py -------------------------------------------------------------------------------- /roles/inspec/handlers/main.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blueboxgroup/ursula/b70ccc4a6bda2830559b99991025ee275301c121/roles/inspec/handlers/main.yml -------------------------------------------------------------------------------- /roles/inspec/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.chef.repo }} trusty main' 6 | key_url: '{{ apt_repos.chef.key_url }}' 7 | when: ursula_os == 'ubuntu' 8 | - role: repos 9 | repo: chef 10 | when: ursula_os == 'rhel' 11 | -------------------------------------------------------------------------------- /roles/inspec/templates/etc/inspec/host-controls/attributes.yml: -------------------------------------------------------------------------------- 1 | {% for profile_name,profile in inspec.controls.iteritems() %} 2 | {% if profile.enabled|default('False')|bool and profile.attributes is defined %} 3 | {% for name,value in profile.attributes.iteritems() %} 4 | {% if value is iterable and value is not string %} 5 | {{ name }}: 6 | {% for list_item in value %} 7 | - {{ list_item }} 8 | {% endfor %} 9 | {% else %} 10 | {{ name }}: {{ value }} 11 | {% endif %} 12 | {% endfor %} 13 | {% endif %} 14 | {% endfor %} 15 | -------------------------------------------------------------------------------- /roles/inspec/templates/etc/inspec/host-controls/controls/control.rb: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # license: Apache 2.0 3 | # title '{{ item }} controls' 4 | 5 | 6 | {% if inspec.controls[item].skip_controls|default([])|length > 0 %} 7 | include_controls "{{ inspec.profiles[inspec.controls[item].profile].name }}" do 8 | {% for control in inspec.controls[item].skip_controls %} 9 | skip_control "{{ control }}" 10 | {% endfor %} 11 | end 12 | {% elif inspec.controls[item].required_controls|default([])|length > 0 %} 13 | require_controls "{{ inspec.profiles[inspec.controls[item].profile].name }}" do 14 | {% for control in inspec.controls[item].required_controls %} 15 | control "{{ control }}" 16 | {% endfor %} 17 | end 18 | {% else %} 19 | include_controls "{{ inspec.profiles[inspec.controls[item].profile].name }}" do 20 | end 21 | {% endif %} 22 | -------------------------------------------------------------------------------- /roles/inspec/templates/etc/inspec/host-controls/inspec.yml: -------------------------------------------------------------------------------- 1 | name: Ursula 2 | title: Compliance Audit 3 | maintainer: Ursula Team 4 | copyright: IBM 5 | copyright_email: 6 | license: Apache 2.0 7 | summary: An InSpec Compliance Profile 8 | version: 0.1.0 9 | depends: 10 | - name: {{ inspec.profiles[inspec.controls[item].profile].name }} 11 | path: '/etc/inspec/profiles/{{ inspec.profiles[inspec.controls[item].profile].name }}-current' 12 | -------------------------------------------------------------------------------- /roles/iptables/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload iptables 3 | shell: iptables-restore --noflush < /etc/network/iptables-firewall 4 | -------------------------------------------------------------------------------- /roles/iptables/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # firewall for public-facing controller nodes 3 | - name: iptables rules 4 | template: src=etc/network/iptables-firewall 5 | dest=/etc/network/iptables-firewall owner=root group=root mode=0644 6 | notify: reload iptables 7 | 8 | - name: iptables load on boot 9 | template: src=etc/network/if-up.d/iptables dest=/etc/network/if-up.d/iptables 10 | owner=root group=root mode=0755 11 | -------------------------------------------------------------------------------- /roles/iptables/templates/etc/network/if-up.d/iptables: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | iptables-restore < /etc/network/iptables-firewall 3 | -------------------------------------------------------------------------------- /roles/iptables/templates/etc/network/iptables-firewall: -------------------------------------------------------------------------------- 1 | # NOTE we are only using this to manage the mangle table 2 | # The filter table is managed by ufw-framework and the NAT table is manged by /etc/network/interfaces 3 | *mangle 4 | :PREROUTING ACCEPT [0:0] 5 | :INPUT ACCEPT [0:0] 6 | :FORWARD ACCEPT [0:0] 7 | :OUTPUT ACCEPT [0:0] 8 | :POSTROUTING ACCEPT [0:0] 9 | 10 | -A POSTROUTING -p udp --dport bootpc -j CHECKSUM --checksum-fill 11 | 12 | COMMIT 13 | -------------------------------------------------------------------------------- /roles/ipv6ra/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart-dnsmasq 3 | service: name=dnsmasq state=restarted 4 | -------------------------------------------------------------------------------- /roles/ipv6ra/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | # IPv6 RA requires dnsmasq >= 2.64 6 | - repo: 'deb {{ apt_repos.bbg_openstack_ppa.repo }} precise main' 7 | key_url: '{{ apt_repos.bbg_openstack_ppa.key_url }}' 8 | when: ansible_distribution == "Ubuntu" and 9 | ansible_distribution_version == "12.04" 10 | -------------------------------------------------------------------------------- /roles/ipv6ra/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update dnsmasq to the ppa version 3 | package: name=dnsmasq state=latest 4 | register: result 5 | until: result|succeeded 6 | retries: 5 7 | 8 | - name: Configure dnsmasq 9 | template: src=etc/dnsmasq.d/internal-ipv6-ra.conf 10 | dest=/etc/dnsmasq.d/internal-ipv6-ra.conf owner=root group=root 11 | mode=0644 12 | notify: restart-dnsmasq 13 | 14 | - name: prevent dnsmasq from add itself to /etc/resolv.conf via resolvconf 15 | lineinfile: dest=/etc/default/dnsmasq regexp="^DNSMASQ_EXCEPT=" 16 | line="DNSMASQ_EXCEPT=lo" 17 | notify: restart-dnsmasq 18 | when: ansible_distribution == 'Ubuntu' 19 | 20 | - name: Enable/start dnsmasq 21 | service: name=dnsmasq state=started enabled=yes 22 | -------------------------------------------------------------------------------- /roles/ipv6ra/templates/etc/dnsmasq.d/internal-ipv6-ra.conf: -------------------------------------------------------------------------------- 1 | dhcp-range=::,constructor:{{ hostvars[inventory_hostname][primary_interface].device }},ra-stateless 2 | 3 | -------------------------------------------------------------------------------- /roles/ironic-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart ironic services 3 | service: name={{ item }} state=restarted must_exist=false 4 | when: restart|default(True) 5 | with_items: 6 | - ironic-api 7 | - ironic-conductor 8 | 9 | - name: restart tftpd 10 | service: name=tftpd-hpa state=restarted 11 | -------------------------------------------------------------------------------- /roles/ironic-common/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for ironic 3 | logrotate: name=ironic path=/var/log/ironic/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/ironic-common/templates/etc/ironic/policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "admin_api": "role:admin or role:cloud_admin or role:service", 3 | "default": "rule:admin_api", 4 | "show_password": "!" 5 | } 6 | -------------------------------------------------------------------------------- /roles/ironic-common/templates/etc/sudoers.d/ironic: -------------------------------------------------------------------------------- 1 | {% if openstack_install_method != 'distro' %} 2 | ironic ALL=(root) NOPASSWD: /usr/local/bin/ironic-rootwrap 3 | {% else %} 4 | ironic ALL=(root) NOPASSWD: /bin/ironic-rootwrap 5 | {% endif %} 6 | 7 | -------------------------------------------------------------------------------- /roles/ironic-control/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/ironic-control/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/ironic-control/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: ironic-common 4 | - role: openstack-database 5 | database_name: ironic 6 | db_version_table: 'alembic_version' 7 | db_version_col: 'version_num' 8 | db_version: 'db versions' 9 | db_sync_cmd: 'ironic-dbsync' 10 | db_sync: 'upgrade' 11 | -------------------------------------------------------------------------------- /roles/ironic-data/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart apache 3 | service: name=apache2 state=restarted 4 | -------------------------------------------------------------------------------- /roles/ironic-data/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: ironic-common 4 | - role: apache 5 | -------------------------------------------------------------------------------- /roles/ironic-data/templates/etc/apache2/sites-available/ironic-ipxe.conf: -------------------------------------------------------------------------------- 1 | # NOTE(deva) This address may need to be changed to a different internal network 2 | # eg, the provisioning and cleaning network IPs 3 | Listen {{ endpoints.ironic.port.internal_ipxe }} 4 | 5 | 6 | 7 | DocumentRoot {{ ironic.httpboot_path }} 8 | 9 | AuthType None 10 | Require all granted 11 | Satisfy Any 12 | AllowOverride None 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /roles/ironic-data/templates/etc/default/tftpd-hpa: -------------------------------------------------------------------------------- 1 | # /etc/default/tftpd-hpa 2 | 3 | TFTP_USERNAME="tftp" 4 | TFTP_DIRECTORY="{{ ironic.tftpboot_path }}" 5 | TFTP_ADDRESS="{{ ironic.tftp_server }}:69" 6 | TFTP_OPTIONS="--map-file /{{ ironic.tftpboot_path }}/map-file" 7 | -------------------------------------------------------------------------------- /roles/ironic-data/templates/etc/ironic/boot.ipxe: -------------------------------------------------------------------------------- 1 | #!ipxe 2 | 3 | isset ${mac:hexhyp} && goto boot_system || 4 | chain ipxe.pxe 5 | 6 | # load the MAC-specific file or fail if it's not found 7 | :boot_system 8 | chain pxelinux.cfg/${mac:hexhyp} || goto inspector_ipa 9 | 10 | :inspector_ipa 11 | chain pxelinux.cfg/default || goto error_no_config 12 | 13 | :error_no_config 14 | echo PXE boot failed. No configuration found for MAC ${mac} 15 | echo Press any key to reboot... 16 | prompt --timeout 180 17 | reboot 18 | 19 | -------------------------------------------------------------------------------- /roles/ironic-data/templates/etc/ironic/map-file: -------------------------------------------------------------------------------- 1 | r ^([^/]) /tftpboot/\1 2 | r ^(/tftpboot/) /tftpboot/\2 3 | -------------------------------------------------------------------------------- /roles/keystone-defaults/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: openstack-meta 4 | -------------------------------------------------------------------------------- /roles/keystone-setup/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart keystone services 3 | service: 4 | name: "{{ openstack_meta.keystone.services.keystone_api[ursula_os].name }}" 5 | state: restarted 6 | must_exist: false 7 | when: restart|default(True) 8 | -------------------------------------------------------------------------------- /roles/keystone-setup/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: openstack-meta 4 | - role: keystone-defaults 5 | - role: endpoints 6 | -------------------------------------------------------------------------------- /roles/keystone/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Restart serially as to not take keystone completely offline 3 | - name: restart keystone services 4 | service: 5 | name: "{{ openstack_meta.keystone.services.keystone_api[ursula_os].name }}" 6 | state: restarted 7 | must_exist: false 8 | run_once: True 9 | delegate_to: "{{ item }}" 10 | when: restart|default(True) 11 | with_items: play_hosts 12 | 13 | - name: restart shibboleth 14 | service: name=shibd state=restarted 15 | when: restart|default(True) 16 | -------------------------------------------------------------------------------- /roles/keystone/tasks/ldap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: keystone domain specific conf dir 3 | file: dest=/etc/keystone/domains state=directory 4 | 5 | - name: keystone ldap domain conf file 6 | template: src="etc/keystone/domains/keystone.domain.conf" 7 | dest="/etc/keystone/domains/keystone.{{ keystone.ldap_domain.domain }}.conf" 8 | notify: restart keystone services 9 | -------------------------------------------------------------------------------- /roles/keystone/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for keystone 3 | logrotate: name=keystone path=/var/log/keystone/*.log 4 | args: 5 | options: 6 | - daily 7 | - rotate 7 8 | - missingok 9 | - compress 10 | - minsize 100k 11 | - copytruncate 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/apache2/openidc/metadata/template.client: -------------------------------------------------------------------------------- 1 | { 2 | "client_id": "{{ item.client_id }}", 3 | "client_secret": "{{ item.client_secret }}" 4 | 5 | } 6 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/apache2/openidc/metadata/template.conf: -------------------------------------------------------------------------------- 1 | { 2 | "scope": "{{ item.scope }}", 3 | "response_type": "{{ item.response_type }}", 4 | "ssl_validate_server": {{ item.ssl_validate_server}} 5 | } 6 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/apache2/openidc/metadata/template.provider: -------------------------------------------------------------------------------- 1 | { 2 | "issuer": "{{ item.issuer }}", 3 | "authorization_endpoint": "{{ item.authorization_endpoint}}", 4 | "token_endpoint": "{{ item.token_endpoint }}", 5 | "introspection_endpoint": "{{ item.introspection_endpoint }}" 6 | } 7 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/collectd/plugins/keystone.conf: -------------------------------------------------------------------------------- 1 | 2 | Globals true 3 | 4 | 5 | 6 | ModulePath "/opt/ursula-monitoring/collectd/plugins/openstack" 7 | 8 | Import "keystone_plugin" 9 | 10 | 11 | Username "{{ monitoring.openstack.user.username }}" 12 | Password "{{ monitoring.openstack.user.password }}" 13 | TenantName "{{ monitoring.openstack.user.tenant }}" 14 | AuthURL "{{ endpoints.auth_uri }}" 15 | Verbose "False" 16 | {% if client.self_signed_cert %} 17 | 18 | CACert "/opt/stack/ssl/openstack.crt" 19 | {% endif %} 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/cron.d/drop-expired-keystone-tokens: -------------------------------------------------------------------------------- 1 | PATH=/sbin:/usr/sbin:/bin:{{ (ursula_os == 'rhel' ) | ternary('/usr/bin/', '/usr/local/bin/') }} 2 | 3 | 4 | # Drop the tokens every 5 minutes 5 | */5 * * * * root if ip a | grep {{ undercloud_floating_ip | default(floating_ip) }} > /dev/null 2>&1; then keystone-manage token_flush; fi 6 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/init/keystone.conf: -------------------------------------------------------------------------------- 1 | description "uwsgi for keystone" 2 | 3 | 4 | start on runlevel [2345] 5 | stop on runlevel [!2345] 6 | 7 | respawn 8 | 9 | pre-start script 10 | if [ ! -d /run/uwsgi/keystone ]; then 11 | mkdir -p /run/uwsgi/keystone 12 | chown keystone /run/uwsgi/keystone 13 | chmod 775 /run/uwsgi/keystone 14 | fi 15 | end script 16 | 17 | post-stop script 18 | if [ -d /run/uwsgi/keystone ]; then 19 | rm -r /run/uwsgi/keystone 20 | fi 21 | end script 22 | 23 | exec {{ keystone_uwsgi_path }} --uid keystone --gid keystone --master --emperor /etc/keystone/uwsgi 24 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/keystone/keystone-saml.crt: -------------------------------------------------------------------------------- 1 | {{ keystone.federation.idp.k2k.saml_signing_cert }} 2 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/keystone/keystone-saml.pem: -------------------------------------------------------------------------------- 1 | {{ keystone.federation.idp.k2k.saml_signing_cert }} 2 | {{ keystone.federation.idp.k2k.saml_signing_key }} 3 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/keystone/sso_callback_template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Keystone WebSSO redirect 5 | 6 | 7 |
8 | Please wait... 9 |
10 | 11 | 15 |
16 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/shibboleth/attribute-map.xml: -------------------------------------------------------------------------------- 1 | 2 | {% for attribute in keystone.federation.sp.saml.providers[0].attributes -%} 3 | 4 | {% endfor %} 5 | 6 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/shibboleth/idp_metadata.xml: -------------------------------------------------------------------------------- 1 | {{ keystone.federation.sp.saml.providers[0].metadata_file_content }} 2 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/shibboleth/sp-cert.pem: -------------------------------------------------------------------------------- 1 | {{ keystone.federation.sp.saml.shibboleth_crt }} 2 | -------------------------------------------------------------------------------- /roles/keystone/templates/etc/shibboleth/sp-key.pem: -------------------------------------------------------------------------------- 1 | {{ keystone.federation.sp.saml.shibboleth_crt }} 2 | {{ keystone.federation.sp.saml.shibboleth_key }} 3 | -------------------------------------------------------------------------------- /roles/keystone/templates/usr/lib/tmpfiles.d/openstack-keystone.conf: -------------------------------------------------------------------------------- 1 | d /run/uwsgi/keystone 755 keystone keystone 2 | -------------------------------------------------------------------------------- /roles/logging-config/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart logstash-forwarder 3 | service: name=logstash-forwarder state=restarted must_exist=false 4 | failed_when: false 5 | 6 | - name: restart filebeat 7 | service: name=filebeat state=restarted must_exist=false 8 | failed_when: false 9 | -------------------------------------------------------------------------------- /roles/logging-config/tasks/filebeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "install {{ service }} log template" 3 | template: 4 | src: etc/filebeat/filebeat.d/template.yml 5 | dest: "/etc/filebeat/filebeat.d/{{ service }}.yml" 6 | notify: restart filebeat 7 | 8 | - name: "remove {{ service }} log template for logstash-forwarder" 9 | file: 10 | path: "/etc/logstash-forwarder.d/{{ service }}.conf" 11 | state: absent 12 | notify: restart logstash-forwarder 13 | -------------------------------------------------------------------------------- /roles/logging-config/tasks/logstash.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: logstash-forwarder config directory 3 | file: dest=/etc/logstash-forwarder.d state=directory mode=0755 4 | 5 | - name: install log template 6 | template: src=etc/logstash-forwarder.d/template.conf dest=/etc/logstash-forwarder.d/{{ service }}.conf 7 | notify: restart logstash-forwarder 8 | 9 | -------------------------------------------------------------------------------- /roles/logging-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: logstash.yml 2 | when: logging.forward_type|default('filebeat') != 'filebeat' 3 | 4 | - include: filebeat.yml 5 | when: logging.forward_type|default('filebeat') == 'filebeat' 6 | 7 | -------------------------------------------------------------------------------- /roles/logging-config/templates/etc/filebeat/filebeat.d/template.yml: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | --- 3 | filebeat: 4 | {% if logdata | length > 0 %} 5 | prospectors: 6 | {% for log in logdata %} 7 | - fields_under_root: true 8 | paths: {{ log.paths | to_yaml }} 9 | {%- if log.fields is defined %}{% set _dummy = log.fields.update(logging.follow.global_fields) %}{% endif %} 10 | fields: {{ log.fields | default(logging.follow.global_fields) | to_yaml }} 11 | {% endfor %} 12 | {% endif %} 13 | 14 | -------------------------------------------------------------------------------- /roles/logging-config/templates/etc/logstash-forwarder.d/template.conf: -------------------------------------------------------------------------------- 1 | { 2 | "files": [ 3 | {% for item in logdata %} 4 | { 5 | "paths": 6 | {{ item.paths | to_nice_json }} 7 | , 8 | "fields": 9 | {% if item.fields and logging.follow.global_fields %} 10 | {# Merge the two field dicts together #} 11 | {% set _dummy = item.fields.update(logging.follow.global_fields) %} 12 | {{ item.fields | to_nice_json }} 13 | {% elif logging.follow.global_fields %} 14 | {{ logging.follow.global_fields | to_nice_json }} 15 | {% endif %} 16 | }{% if not loop.last %},{% endif %} 17 | 18 | {% endfor -%} 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /roles/logging/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: refresh cert auths 3 | command: update-ca-certificates 4 | when: ursula_os == 'ubuntu' 5 | 6 | - name: restart logstash-forwarder 7 | service: name=logstash-forwarder state=restarted must_exist=false 8 | 9 | - name: stop logstash-forwarder 10 | service: name=logstash-forwarder state=stopped must_exist=false 11 | 12 | - name: start logstash-forwarder 13 | service: name=logstash-forwarder state=started must_exist=false 14 | 15 | - name: restart rsyslog 16 | service: name=rsyslog state=restarted 17 | 18 | - name: restart filebeat 19 | service: name=filebeat state=restarted must_exist=false 20 | failed_when: false 21 | 22 | -------------------------------------------------------------------------------- /roles/logging/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.elastic.repo }} stable main' 6 | key_url: '{{ apt_repos.elastic.key_url }}' 7 | when: ursula_os == 'ubuntu' 8 | - role: sensu-check 9 | - role: repos 10 | repo: elastic 11 | when: ursula_os == 'rhel' 12 | -------------------------------------------------------------------------------- /roles/logging/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: logstash.yml 2 | when: logging.forward_type != 'filebeat' 3 | 4 | - include: filebeat.yml 5 | when: logging.forward_type == 'filebeat' 6 | 7 | - name: configure remote syslog forwarding 8 | template: 9 | src: etc/rsyslog.d/60-remote-syslog.conf 10 | dest: /etc/rsyslog.d 11 | mode: 0640 12 | notify: restart rsyslog 13 | when: logging.syslog_forwarding.enabled 14 | 15 | - include: monitoring.yml 16 | tags: 17 | - monitoring 18 | - sensu-checks 19 | when: monitoring.enabled|default('True')|bool 20 | -------------------------------------------------------------------------------- /roles/logging/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install logstash-forwarder process check 3 | sensu_process_check: service=logstash-forwarder 4 | when: logging.forward_type != 'filebeat' 5 | 6 | - name: remove filebeat process check 7 | sensu_process_check: service=filebeat state=absent 8 | when: logging.forward_type != 'filebeat' 9 | 10 | - name: install filebeat process check 11 | sensu_process_check: service=filebeat 12 | when: logging.forward_type == 'filebeat' 13 | 14 | - name: remove logstash-forwarder process check 15 | sensu_process_check: service=logstash-forwarder state=absent 16 | when: logging.forward_type == 'filebeat' 17 | -------------------------------------------------------------------------------- /roles/logging/templates/etc/filebeat/filebeat.yml: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | --- 3 | output: 4 | logstash: 5 | hosts: 6 | - "{{ logging.forward.host }}:{{ logging.forward.port }}" 7 | {% if logging.forward.tls.enabled|bool and logging.forward.tls.ca_cert %} 8 | ssl: 9 | certificate_authorities: 10 | - "/usr/local/share/ca-certificates/logging-forward.crt" 11 | {% endif %} 12 | 13 | filebeat: 14 | config_dir: "{{ logging.forward.config_dir }}" 15 | {% if logging.follow.logs | length > 0 %} 16 | prospectors: 17 | {% for log in logging.follow.logs %} 18 | - fields_under_root: true 19 | paths: {{ log.paths | to_yaml }} 20 | {%- if log.fields is defined %}{% set _dummy = log.fields.update(logging.follow.global_fields) %}{% endif %} 21 | fields: {{ log.fields | default(logging.follow.global_fields) | to_yaml }} 22 | {% endfor %} 23 | {% endif %} 24 | 25 | -------------------------------------------------------------------------------- /roles/logging/templates/etc/init/logstash-forwarder.conf: -------------------------------------------------------------------------------- 1 | description "Logstash Forwarding Agent" 2 | author "Myles Steinhauser" 3 | 4 | setuid logstash 5 | setgid adm 6 | 7 | respawn 8 | respawn limit 3 30 9 | 10 | start on runlevel [2345] 11 | stop on runlevel [!2345] 12 | 13 | chdir /var/lib/logstash 14 | 15 | {% if logging.version == '0.3.1' %} 16 | exec /opt/logstash-forwarder/bin/logstash-forwarder.sh -config /etc/logstash-forwarder.d 17 | {% else %} 18 | exec /opt/logstash-forwarder/bin/logstash-forwarder -config /etc/logstash-forwarder.d 19 | {% endif %} 20 | -------------------------------------------------------------------------------- /roles/logging/templates/etc/rsyslog.d/20-ufw.conf: -------------------------------------------------------------------------------- 1 | # Log kernel generated UFW log messages to file 2 | :msg,contains,"[UFW " /var/log/ufw.log 3 | 4 | # Uncomment the following to stop logging anything that matches the last rule. 5 | # Doing this will stop logging kernel generated UFW log messages to the file 6 | # normally containing kern.* messages (eg, /var/log/kern.log) 7 | & ~ 8 | -------------------------------------------------------------------------------- /roles/logging/templates/etc/rsyslog.d/60-remote-syslog.conf: -------------------------------------------------------------------------------- 1 | $WorkDirectory {{ logging.syslog_forwarding.work_directory }} 2 | 3 | {% for action_var in logging.syslog_forwarding.config_vars %} 4 | ${{ action_var.name}} {{ action_var.value }} 5 | {% endfor %} 6 | 7 | {{ logging.syslog_forwarding.selector }} @@{{ logging.syslog_forwarding.host }}:{{ logging.syslog_forwarding.port }} 8 | -------------------------------------------------------------------------------- /roles/logging/templates/logging-forward.crt: -------------------------------------------------------------------------------- 1 | {{ logging.forward.tls.ca_cert }} 2 | -------------------------------------------------------------------------------- /roles/magnum/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart magnum services 3 | service: name={{ item }} state=restarted must_exist=false 4 | with_items: 5 | - magnum-api 6 | - magnum-conductor 7 | when: restart|default(True) 8 | -------------------------------------------------------------------------------- /roles/magnum/templates/etc/magnum/api-paste.ini: -------------------------------------------------------------------------------- 1 | [pipeline:main] 2 | pipeline = cors healthcheck request_id authtoken api_v1 3 | 4 | [app:api_v1] 5 | paste.app_factory = magnum.api.app:app_factory 6 | 7 | [filter:authtoken] 8 | acl_public_routes = /, /v1 9 | paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory 10 | 11 | [filter:request_id] 12 | paste.filter_factory = oslo_middleware:RequestId.factory 13 | 14 | [filter:cors] 15 | paste.filter_factory = oslo_middleware.cors:filter_factory 16 | oslo_config_project = magnum 17 | 18 | [filter:healthcheck] 19 | paste.filter_factory = oslo_middleware:Healthcheck.factory 20 | backends = disable_by_file 21 | disable_by_file_path = /etc/magnum/healthcheck_disable 22 | -------------------------------------------------------------------------------- /roles/manage-disks/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | manage_disks: 3 | enabled: False 4 | defaults: 5 | pesize: 4 6 | loopback: 7 | enabled: False 8 | file: /tmp/loopback 9 | device: /dev/loop2 10 | size: 1G 11 | volume_groups: [] 12 | # EXAMPLE: 13 | # - name: test 14 | # pvs: 15 | # - /dev/loop2 16 | logical_volumes: [] 17 | # EXAMPLE: 18 | # - name: test 19 | # volume_group: test 20 | # size: 500m 21 | # filesystem: ext4 22 | # filesystem_opts: 23 | # mount_point: /mnt/test 24 | # mount_opts: 25 | -------------------------------------------------------------------------------- /roles/manage-disks/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/manage-disks/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/memcached/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | memcached: 3 | gem: 4 | url: https://rubygems.org/downloads/memcached-1.8.0.gem 5 | name: memcached-1.8.0.gem 6 | bind_ip: '0.0.0.0' 7 | monitoring: 8 | sensu_checks: 9 | memcached_stats: 10 | criticality: 'critical' 11 | -------------------------------------------------------------------------------- /roles/memcached/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart memcached 3 | action: service name=memcached state=restarted enabled=yes 4 | -------------------------------------------------------------------------------- /roles/memcached/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: monitoring-common 4 | when: monitoring.enabled|default(True)|bool 5 | -------------------------------------------------------------------------------- /roles/memcached/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: memcached gem for sensu check 3 | gem: name=memcached user_install=no include_dependencies=yes 4 | executable=/opt/sensu/embedded/bin/gem 5 | environment: 6 | PATH: "/opt/sensu/embedded/bin:{{ ansible_env.PATH }}" 7 | register: result 8 | until: result|succeeded 9 | retries: 5 10 | 11 | - name: memcached stats check 12 | sensu_check: name=memcached-stats plugin=check-memcached-stats.rb 13 | args='--host {{ primary_ip }} --criticality {{ memcached.monitoring.sensu_checks.memcached_stats.criticality }}' 14 | notify: restart sensu-client 15 | 16 | - name: memcached graphite check 17 | sensu_check: name=memcached-graphite plugin=memcached-graphite.rb 18 | args='--host {{ primary_ip }} --scheme {{ monitoring.graphite.cluster_prefix }}' 19 | notify: restart sensu-client 20 | -------------------------------------------------------------------------------- /roles/memcached/templates/etc/sysconfig/memcached: -------------------------------------------------------------------------------- 1 | PORT="{{ memcached.port }}" 2 | USER="{{ memcached.user }}" 3 | MAXCONN="{{ memcached.max_connections }}" 4 | CACHESIZE="{{ memcached.memory }}" 5 | OPTIONS="-l {{ memcached.bind_ip }}" 6 | -------------------------------------------------------------------------------- /roles/mongodb-arbiter/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: mongodb-common 4 | -------------------------------------------------------------------------------- /roles/mongodb-arbiter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add mongodb arbiter to replica set 3 | mongodb_replication: 4 | login_host: "{{ mongodb.endpoint_addr }}" 5 | login_port: "{{ mongodb.port }}" 6 | login_user: "admin" 7 | login_password: "{{ mongodb.db_password }}" 8 | replica_set: "{{ mongodb.replica_name }}" 9 | host_name: "{{ primary_ip }}" 10 | host_port: "{{ mongodb.port }}" 11 | host_type: "arbiter" 12 | when: inventory_hostname in groups['mongo_arbiter'] 13 | no_log: true 14 | -------------------------------------------------------------------------------- /roles/mongodb-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart mongodb service 3 | service: name=mongod state=restarted 4 | -------------------------------------------------------------------------------- /roles/mongodb-common/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: endpoints 4 | - role: monitoring-common 5 | when: monitoring.enabled|default(True)|bool 6 | - role: logging-config 7 | when: logging.enabled|default(True)|bool 8 | service: mongodb 9 | logdata: "{{ mongodb.logs }}" 10 | - role: inspec 11 | install_inspec_controls: [mongodb] 12 | tags: inspec 13 | when: 14 | - inspec.enabled|bool 15 | - inspec.controls.mongodb.enabled|bool 16 | - role: apt-repos 17 | repos: 18 | - repo: 'deb {{ apt_repos.mongodb.repo }} {{ ansible_distribution_release }}/mongodb-org/3.0 multiverse' 19 | key_url: '{{ apt_repos.mongodb.key_url }}' 20 | key_id: '7F0CEB10' 21 | validate_certs: no 22 | -------------------------------------------------------------------------------- /roles/mongodb-common/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for mongodb 3 | logrotate: name=mongodb path=/var/log/mongodb/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | - copytruncate 13 | -------------------------------------------------------------------------------- /roles/mongodb-common/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: mongod process check 3 | sensu_process_check: service=mongod 4 | -------------------------------------------------------------------------------- /roles/mongodb-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: mongodb-common 4 | -------------------------------------------------------------------------------- /roles/mongodb-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: primary.yml 3 | when: inventory_hostname in groups['mongo_db'][0] 4 | 5 | - name: add mongodb secondary servers to replica set 6 | mongodb_replication: 7 | login_host: "{{ mongodb.endpoint_addr }}" 8 | login_port: "{{ mongodb.port }}" 9 | login_user: "admin" 10 | login_password: "{{ mongodb.db_password }}" 11 | replica_set: "{{ mongodb.replica_name }}" 12 | host_name: "{{ primary_ip }}" 13 | host_port: "{{ mongodb.port }}" 14 | host_type: "replica" 15 | when: inventory_hostname not in groups['mongo_db'][0] and inventory_hostname in groups['mongo_db'] 16 | no_log: true 17 | -------------------------------------------------------------------------------- /roles/monitoring-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart sensu-client 3 | service: name=sensu-client state=restarted sleep=2 4 | -------------------------------------------------------------------------------- /roles/neutron-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart neutron services 3 | service: name={{ item.name }} state=restarted_if_running must_exist=false 4 | when: restart|default(True) 5 | failed_when: false 6 | with_items: 7 | - "{{ neutron.services.neutron_server }}" 8 | - "{{ neutron.services.neutron_dhcp_agent }}" 9 | - "{{ neutron.services.neutron_l3_agent }}" 10 | - "{{ neutron.services.neutron_openvswitch_agent }}" 11 | - "{{ neutron.services.neutron_linuxbridge_agent }}" 12 | - "{{ neutron.services.neutron_metadata_agent }}" 13 | - "{{ neutron.services.neutron_lbaasv2_agent }}" 14 | -------------------------------------------------------------------------------- /roles/neutron-common/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for neutron 3 | logrotate: name=neutron path=/var/log/neutron/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/etc/modprobe.d/blacklist-openvswitch.conf: -------------------------------------------------------------------------------- 1 | blacklist openvswitch 2 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/etc/neutron/dhcp_agent.ini: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [DEFAULT] 4 | 5 | debug = {{ neutron.logging.debug }} 6 | 7 | enable_isolated_metadata = True 8 | 9 | state_path = {{ neutron.state_path }} 10 | 11 | # Interface driver 12 | {% if neutron.plugin == 'ml2' %} 13 | interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver 14 | {% endif %} 15 | 16 | # DHCP driver and extra configuration 17 | dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq 18 | 19 | dnsmasq_config_file = /etc/dnsmasq.conf 20 | dhcp_delete_namespaces = True 21 | resync_interval = 50 22 | 23 | [AGENT] 24 | {% if openstack_install_method == 'distro' %} 25 | root_helper = sudo /bin/neutron-rootwrap /etc/neutron/rootwrap.conf 26 | {% else %} 27 | root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf 28 | {% endif %} 29 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/etc/neutron/l3_agent.ini: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | debug = {{ neutron.logging.debug }} 3 | 4 | state_path = {{ neutron.state_path }} 5 | 6 | # Interface driver # 7 | {% if neutron.plugin == 'ml2' %} 8 | interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver 9 | {% endif %} 10 | 11 | {% if neutron.plugin == 'ml2' %} 12 | # External network bridge (leave blank) # 13 | external_network_bridge = 14 | {% endif %} 15 | 16 | router_delete_namespaces = True 17 | 18 | # Layer-3 high-availability (VRRP) 19 | send_arp_for_ha = 3 20 | agent_mode = legacy 21 | ha_confs_path = $state_path/ha_confs 22 | ha_keepalived_state_change_server_threads = {{ neutron.ha_keepalive_threads }} 23 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/etc/neutron/metadata_agent.ini: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [DEFAULT] 4 | debug = {{ neutron.logging.debug }} 5 | 6 | # Workers 7 | metadata_workers = {{ neutron.metadata_workers }} 8 | 9 | nova_metadata_ip = {{ endpoints.main }} 10 | metadata_proxy_shared_secret = {{ secrets.metadata_proxy_shared_secret }} 11 | 12 | nova_metadata_port = 8775 13 | 14 | nova_metadata_protocol = http 15 | 16 | [cache] 17 | backend = oslo_cache.memcache_pool 18 | enabled = True 19 | memcache_servers = {{ hostvars|ursula_memcache_hosts(groups, memcached.port) }} 20 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/etc/neutron/neutron_lbaas.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | 3 | [quotas] 4 | 5 | [service_providers] 6 | service_provider = {{ neutron.lbaas.service_provider }} 7 | 8 | [haproxy] 9 | jinja_config_template = /etc/neutron/lbaas_templates/haproxy.loadbalancer.j2 10 | 11 | [certificates] 12 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/etc/neutron/rootwrap-ursula.d/ursula.filters: -------------------------------------------------------------------------------- 1 | {% set basepath = openstack_source.virtualenv_base if 2 | openstack_install_method == 'source' else 3 | openstack_package.virtualenv_base %} 4 | ## Filters we add to deal with Ursula install paths 5 | 6 | [Filters] 7 | kill_ursulapaths: KillFilter, root, {{ basepath }}/neutron/bin/python, -9, -15 8 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/etc/sudoers.d/neutron: -------------------------------------------------------------------------------- 1 | neutron ALL=(root) NOPASSWD: /sbin/brctl 2 | neutron ALL=(root) NOPASSWD: /sbin/bridge 3 | neutron ALL=(root) NOPASSWD: /sbin/ip 4 | {% if openstack_install_method != 'distro' %} 5 | neutron ALL=(root) NOPASSWD: /usr/local/bin/neutron-rootwrap 6 | {% else %} 7 | neutron ALL=(root) NOPASSWD: /bin/neutron-rootwrap 8 | {% endif %} 9 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/opt/stack/neutron/fix-ovs-klm-version-detection.patch: -------------------------------------------------------------------------------- 1 | diff --git a/neutron/agent/linux/ovs_lib.py b/neutron/agent/linux/ovs_lib.py 2 | index 0976307..5dcc6c8 100644 3 | --- a/neutron/agent/linux/ovs_lib.py 4 | +++ b/neutron/agent/linux/ovs_lib.py 5 | @@ -390,6 +390,7 @@ def get_installed_ovs_klm_version(): 6 | if 'version: ' in line and not 'srcversion' in line: 7 | ver = re.findall("\d+\.\d+", line) 8 | return ver[0] 9 | + return '2.0' 10 | except Exception: 11 | LOG.exception(_("Unable to retrieve OVS kernel module version.")) 12 | 13 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/usr/local/bin/neutron-restart-all: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service {{ neutron.services.neutron_server.name }} restart 4 | service {{ neutron.services.neutron_linuxbridge_agent.name }} restart 5 | service {{ neutron.services.neutron_metadata_agent.name }} restart 6 | 7 | # neutron-dhcp-agent and neutron-l3-agent will be restarted 8 | # when neutron-linuxbridge-agent is restarted due to service deps 9 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/usr/local/bin/neutron-start-all: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service {{ neutron.services.neutron_server.name }} start 4 | service {{ neutron.services.neutron_linuxbridge_agent.name }} start 5 | service {{ neutron.services.neutron_metadata_agent.name }} start 6 | 7 | # neutron-dhcp-agent and neutron-l3-agent will be started 8 | # when neutron-linuxbridge-agent is started due to service deps 9 | -------------------------------------------------------------------------------- /roles/neutron-common/templates/usr/local/bin/neutron-stop-all: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service {{ neutron.services.neutron_server.name }} stop 4 | service {{ neutron.services.neutron_linuxbridge_agent.name }} stop 5 | service {{ neutron.services.neutron_metadata_agent.name }} stop 6 | 7 | # neutron-dhcp-agent and neutron-l3-agent will be stopped 8 | # when neutron-linuxbridge-agent is stopped due to service deps 9 | -------------------------------------------------------------------------------- /roles/neutron-control/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: openstack-database 4 | database_name: neutron 5 | - role: inspec 6 | install_inspec_controls: [neutron_control] 7 | tags: inspec 8 | when: 9 | - inspec.enabled|bool 10 | - inspec.controls.neutron_control.enabled|bool 11 | - role: neutron-common 12 | - role: sensu-check 13 | - role: collectd-plugin 14 | when: collectd is defined and collectd.enabled|bool 15 | - role: openstack-firewall 16 | rule_name: neutron 17 | rules_type_input: 18 | - { protocol: tcp, port: "{{ endpoints.neutron.port.haproxy_api }}" } 19 | -------------------------------------------------------------------------------- /roles/neutron-control/templates/check-neutron-l3-routers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # check neutron l3 agents on routers 4 | # only run on node with the floating ip 5 | 6 | if ifconfig | grep {{ undercloud_floating_ip | default(floating_ip) }} > /dev/null 2>&1; then 7 | /etc/sensu/plugins/check-neutron-l3-routers.py -r {{ sensu_checks.neutron.check_neutron_l3_routers.max_routers }} \ 8 | -d {{ sensu_checks.neutron.check_neutron_l3_routers.delay_seconds }} 9 | fi; 10 | -------------------------------------------------------------------------------- /roles/neutron-control/templates/etc/collectd/plugins/neutron.conf: -------------------------------------------------------------------------------- 1 | 2 | Globals true 3 | 4 | 5 | 6 | ModulePath "/opt/ursula-monitoring/collectd/plugins/openstack" 7 | 8 | Import "neutron_plugin" 9 | 10 | 11 | Username "{{ monitoring.openstack.user.username }}" 12 | Password "{{ monitoring.openstack.user.password }}" 13 | TenantName "{{ monitoring.openstack.user.tenant }}" 14 | AuthURL "{{ endpoints.auth_uri }}" 15 | Verbose "False" 16 | {% if client.self_signed_cert %} 17 | 18 | CACert "/opt/stack/ssl/openstack.crt" 19 | {% endif %} 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /roles/neutron-data-network/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ifup br-ex 3 | command: ifup br-ex 4 | failed_when: False 5 | 6 | - name: restart neutron lbaas agent 7 | service: 8 | name: "{{ neutron.services.neutron_lbaasv2_agent.name }}" 9 | state: restarted_if_running 10 | when: restart|default('True') and ((neutron.lbaas.enabled == "smart" and 11 | groups['controller'][0] not in groups['compute']) or 12 | neutron.lbaas.enabled|bool) 13 | 14 | - name: restart xorp 15 | service: name=xorp state=restarted sleep=10 16 | ignore_errors: True 17 | -------------------------------------------------------------------------------- /roles/neutron-data-network/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: neutron-common 4 | -------------------------------------------------------------------------------- /roles/neutron-data-network/tasks/dnsmasq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | - name: prevent dnsmasq from add itself to /etc/resolv.conf via resolvconf 4 | lineinfile: dest=/etc/default/dnsmasq regexp="^DNSMASQ_EXCEPT=" 5 | line="DNSMASQ_EXCEPT=lo" 6 | 7 | - name: delete localhost dnsmask nameserver 8 | command: resolvconf -d lo.dnsmasq 9 | when: ursula_os == 'ubuntu' 10 | 11 | - name: neutron dnsmasq config 12 | template: src=etc/dnsmasq.conf dest=/etc/dnsmasq.conf mode=0644 13 | notify: 14 | - restart neutron services 15 | -------------------------------------------------------------------------------- /roles/neutron-data-network/tasks/igmp-router.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install XORP to provide IGMP router functionality 3 | package: name=xorp 4 | register: result 5 | until: result|succeeded 6 | retries: 5 7 | 8 | - name: configure xorp 9 | template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot 10 | notify: 11 | - restart xorp 12 | 13 | - name: set xorp defaults 14 | lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes 15 | notify: 16 | - restart xorp 17 | 18 | - meta: flush_handlers 19 | 20 | - name: start and enable xorp service 21 | service: name=xorp state=started enabled=yes 22 | retries: 2 23 | delay: 10 24 | -------------------------------------------------------------------------------- /roles/neutron-data-network/templates/etc/dnsmasq.conf: -------------------------------------------------------------------------------- 1 | {% for server in neutron.dhcp_dns_servers -%} 2 | server={{ server }} 3 | {% endfor -%} 4 | -------------------------------------------------------------------------------- /roles/neutron-data-network/templates/etc/init/ipchanged.conf: -------------------------------------------------------------------------------- 1 | start on runlevel [2345] 2 | stop on runlevel [016] 3 | 4 | respawn 5 | 6 | exec start-stop-daemon --start \ 7 | --exec /usr/local/sbin/ipchanged 8 | -------------------------------------------------------------------------------- /roles/neutron-data-network/templates/etc/ipchanged/add_internal_floating_ip: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Send gratuitous ARP to update other nodes ARP cache 4 | arping -A -U -I {{ hostvars[inventory_hostname][primary_interface].device }} {{ undercloud_floating_ip }} -c 10 5 | -------------------------------------------------------------------------------- /roles/neutron-data-network/templates/etc/neutron/services/loadbalancer/haproxy/lbaas_agent.ini: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | interface_driver = {{ neutron.lbaas.interface_driver }} 3 | 4 | {% if ursula_os == 'rhel' %} 5 | user_group = nobody 6 | {% endif %} 7 | 8 | [haproxy] 9 | -------------------------------------------------------------------------------- /roles/neutron-data-network/templates/usr/local/libexec/ucarp-vip-down: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec 2>/dev/null 3 | 4 | /sbin/ip address del "$2"/32 dev "$1" label "$1":ucarp 5 | 6 | -------------------------------------------------------------------------------- /roles/neutron-data-network/templates/usr/local/libexec/ucarp-vip-up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec 2>/dev/null 3 | 4 | /sbin/ip address add "$2"/32 dev "$1" label "$1":ucarp 5 | 6 | -------------------------------------------------------------------------------- /roles/neutron-data/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | neutron: 3 | l2_population: False 4 | tunnel_types: [] 5 | -------------------------------------------------------------------------------- /roles/neutron-data/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.bbg_openstack_ppa.repo }} precise main' 6 | key_url: '{{ apt_repos.bbg_openstack_ppa.key_url }}' 7 | when: ansible_distribution_version == "12.04" 8 | - role: sensu-check 9 | - role: inspec 10 | install_inspec_controls: [neutron_data] 11 | tags: inspec 12 | when: 13 | - inspec.enabled|bool 14 | - inspec.controls.neutron_data.enabled|bool 15 | - role: neutron-common 16 | - role: openstack-firewall 17 | rule_name: VXLAN 18 | rules_type_input: 19 | - { protocol: udp, port: 4789 } 20 | -------------------------------------------------------------------------------- /roles/neutron-data/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | -------------------------------------------------------------------------------- /roles/neutron-data/templates/etc/cron.daily/cleanup-neutron-interfaces: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Remove logs for Neutron TAP interfaces 4 | find /var/log/upstart/ -name network-interface-tap\*.log -mtime +1 -delete 5 | -------------------------------------------------------------------------------- /roles/neutron-data/templates/etc/neutron/plugins/ml2/ml2_plugin_dataplane.ini: -------------------------------------------------------------------------------- 1 | [vxlan] 2 | local_ip = {{ hostvars[inventory_hostname][neutron.vxlan_interface|default(primary_interface)]['ipv4']['address'] }} 3 | {% if not neutron.l2_population %} 4 | # Set TTL on VXLAN datagrams to 1 to confine to local broadcast domain 5 | ttl = {{ neutron.vxlan.ttl }} 6 | vxlan_group = {{ neutron.vxlan.group_prefix }} 7 | {% endif %} 8 | 9 | [linux_bridge] 10 | physical_interface_mappings = {{ neutron.bridge_mappings }} 11 | -------------------------------------------------------------------------------- /roles/nova-common/files/patches/availability_zones.py.patch: -------------------------------------------------------------------------------- 1 | diff --git a/nova/availability_zones.py b/nova/availability_zones.py 2 | index d627fa0..68b4475 100644 3 | --- a/nova/availability_zones.py 4 | +++ b/nova/availability_zones.py 5 | @@ -56,7 +56,7 @@ def _reset_cache(): 6 | 7 | 8 | def _make_cache_key(host): 9 | - return "azcache-%s" % host 10 | + return "azcache-%s" % host.encode('utf-8') 11 | 12 | 13 | def set_availability_zones(context, services): 14 | -------------------------------------------------------------------------------- /roles/nova-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Perform restarts serially as to not take a subservice completely offline. 3 | # Only really matters for nova-api, fix this up later. 4 | - name: restart nova services 5 | service: 6 | name: "{{ item[1].name }}" 7 | state: restarted 8 | must_exist: false 9 | failed_when: false 10 | run_once: True 11 | delegate_to: "{{ item[0] }}" 12 | when: restart|default(True) 13 | with_nested: 14 | - "{{ play_hosts }}" 15 | - ["{{ nova.services.nova_api }}", 16 | "{{ nova.services.nova_conductor }}", 17 | "{{ nova.services.nova_compute }}", 18 | "{{ nova.services.nova_consoleauth }}", 19 | "{{ nova.services.nova_novncproxy }}", 20 | "{{ nova.services.nova_placement_api }}", 21 | "{{ nova.services.nova_scheduler }}"] 22 | -------------------------------------------------------------------------------- /roles/nova-common/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for nova 3 | logrotate: name=nova path=/var/log/nova/*.log 4 | args: 5 | options: 6 | - daily 7 | - missingok 8 | - rotate 7 9 | - compress 10 | - notifempty 11 | - delaycompress 12 | when: openstack_install_method != 'distro' 13 | 14 | -------------------------------------------------------------------------------- /roles/nova-common/templates/etc/sudoers.d/nova: -------------------------------------------------------------------------------- 1 | {% if openstack_install_method != 'distro' %} 2 | nova ALL=(root) NOPASSWD: /usr/local/bin/nova-rootwrap 3 | {% else %} 4 | nova ALL=(root) NOPASSWD: /bin/nova-rootwrap 5 | {% endif %} 6 | -------------------------------------------------------------------------------- /roles/nova-control/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: inspec 4 | install_inspec_controls: [nova_control] 5 | tags: inspec 6 | when: 7 | - inspec.enabled|bool 8 | - inspec.controls.nova_control.enabled|bool 9 | - role: nova-common 10 | - role: openstack-database 11 | database_name: nova 12 | - role: openstack-database 13 | database_name: nova_api 14 | - role: collectd-plugin 15 | when: collectd is defined and collectd.enabled|bool 16 | - role: openstack-firewall 17 | rule_name: nova 18 | rules_type_input: 19 | - { protocol: tcp, port: "{{ endpoints.nova.port.haproxy_api }}" } 20 | - { protocol: tcp, port: "{{ endpoints.novnc.port.haproxy_api }}" } 21 | - { protocol: tcp, port: "{{ endpoints.placement.port.haproxy_api }}" } 22 | 23 | -------------------------------------------------------------------------------- /roles/nova-control/templates/etc/init/nova-placement-api.conf: -------------------------------------------------------------------------------- 1 | description "uwsgi for nova" 2 | 3 | 4 | start on runlevel [2345] 5 | stop on runlevel [!2345] 6 | 7 | respawn 8 | 9 | pre-start script 10 | if [ ! -d /run/uwsgi/nova ]; then 11 | mkdir -p /run/uwsgi/nova 12 | chown nova /run/uwsgi/nova 13 | chmod 775 /run/uwsgi/nova 14 | fi 15 | end script 16 | 17 | post-stop script 18 | if [ -d /run/uwsgi/nova ]; then 19 | rm -r /run/uwsgi/nova 20 | fi 21 | end script 22 | 23 | exec {{ nova_uwsgi_path }} --uid nova --gid nova --master --emperor /etc/nova/uwsgi 24 | -------------------------------------------------------------------------------- /roles/nova-control/templates/etc/nova/uwsgi/placement.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | master = true 3 | processes = {{ nova.placement.workers }} 4 | chmod-socket = 666 5 | 6 | {% if openstack_install_method != 'distro' %} 7 | home = {{ nova.placement.uwsgi.home[openstack_install_method] }} 8 | {% endif %} 9 | pidfile = /run/uwsgi/nova/nova-placement-api.pid 10 | logto = /var/log/nova/nova-placement-api.log 11 | logfile-chmod = 644 12 | 13 | {% if nova.placement.uwsgi.method == 'socket' %} 14 | socket = /run/uwsgi/nova/nova-placement-api.socket 15 | {% else %} 16 | http-socket = 0.0.0.0:{{ nova.placement.uwsgi.http_port }} 17 | {% endif %} 18 | 19 | name = placement 20 | uid = nova 21 | gid = nova 22 | 23 | plugins = python 24 | 25 | {% if openstack_install_method == 'distro' %} 26 | wsgi-file = /bin/nova-placement-api 27 | {% else %} 28 | wsgi-file = /usr/local/bin/nova-placement-api 29 | {% endif %} 30 | -------------------------------------------------------------------------------- /roles/nova-control/templates/usr/lib/tmpfiles.d/openstack-nova-placement-api.conf: -------------------------------------------------------------------------------- 1 | d /run/uwsgi/nova 755 nova nova 2 | -------------------------------------------------------------------------------- /roles/nova-data/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart libvirt-bin 3 | service: 4 | name: "{{ nova.services.libvirt.name }}" 5 | state: restarted 6 | 7 | - name: novadocker rootwrap 8 | command: rsync -avh {{ nova.driver.docker.dest }}/etc/nova/rootwrap.d/ /etc/nova/rootwrap.d/ 9 | 10 | - name: restart nova compute 11 | service: name={{ item.name }} state=restarted must_exist=false 12 | with_items: 13 | - "{{ nova.services.nova_compute }}" 14 | 15 | - name: restart multipath service 16 | service: 17 | name: "{{ v7k.multipath[ursula_os].service_name }}" 18 | state: restarted 19 | 20 | 21 | -------------------------------------------------------------------------------- /roles/nova-data/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: inspec 4 | install_inspec_controls: 5 | - nova_data 6 | tags: inspec 7 | when: 8 | - inspec.enabled|bool 9 | - inspec.controls.nova_data.enabled|bool 10 | - role: inspec 11 | install_inspec_controls: 12 | - kvm 13 | tags: inspec 14 | when: 15 | - inspec.enabled|bool 16 | - inspec.controls.kvm.enabled|bool 17 | - role: nova-common 18 | - role: docker 19 | when: nova.compute_driver == "novadocker.virt.docker.DockerDriver" 20 | -------------------------------------------------------------------------------- /roles/nova-data/tasks/v7k.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install device-mapper-multipath packages 3 | package: name="{{ v7k.multipath[ursula_os].package_name }}" 4 | register: result 5 | until: result|succeeded 6 | retries: 5 7 | 8 | - name: generate multipath.conf 9 | template: src=etc/multipath.conf dest=/etc/multipath.conf 10 | mode=0640 11 | owner=root group=root 12 | notify: 13 | - restart multipath service 14 | 15 | - name: start multipath 16 | service: 17 | name: "{{ v7k.multipath[ursula_os].service_name }}" 18 | state: started 19 | enabled: True 20 | 21 | -------------------------------------------------------------------------------- /roles/nova-data/templates/etc/machine-id: -------------------------------------------------------------------------------- 1 | {{ machine_id.stdout }} 2 | -------------------------------------------------------------------------------- /roles/nova-data/templates/etc/modprobe.d/kvm-nested.conf: -------------------------------------------------------------------------------- 1 | options kvm_intel nested=1 2 | options kvm_amd nested=1 3 | -------------------------------------------------------------------------------- /roles/nova-data/templates/etc/multipath.conf: -------------------------------------------------------------------------------- 1 | defaults { 2 | user_friendly_names no 3 | } 4 | -------------------------------------------------------------------------------- /roles/nova-data/templates/etc/nova/nova.cinder_encryption.conf: -------------------------------------------------------------------------------- 1 | {% if cinder.encryption.fixed_key %} 2 | [key_manager] 3 | fixed_key = {{ cinder.encryption.fixed_key }} 4 | {% elif barbican.enabled|bool %} 5 | [key_manager] 6 | api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager 7 | 8 | [barbican] 9 | barbican_endpoint = {{ endpoints.barbican.url.public }} 10 | auth_endpoint = {{ endpoints.keystone.url.public }}/{{ endpoints.keystonev3.version }} 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /roles/nova-data/templates/var/lib/nova/bin/verify-ssh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ips="{% for host in groups['compute'] -%}{{ hostvars[host][primary_interface]['ipv4']['address'] }} {% endfor -%}" 3 | 4 | for ip in $ips; do 5 | echo testing ssh to $ip ... 6 | ssh $ip uptime 7 | done 8 | -------------------------------------------------------------------------------- /roles/nova-data/templates/var/lib/nova/ssh/authorized_keys: -------------------------------------------------------------------------------- 1 | {% for host in groups['compute'] %} 2 | {{ hostvars[host].nova_user.ssh_public_key }} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /roles/nova-data/templates/var/lib/nova/ssh/known_hosts: -------------------------------------------------------------------------------- 1 | {% for host in groups['compute'] %} 2 | {{ hostvars[host].ansible_nodename }},{{ hostvars[host][primary_interface].ipv4.address }} ssh-rsa {{ hostvars[host].ansible_ssh_host_key_rsa_public }} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /roles/openstack-database/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: database users 3 | run_once: true 4 | mysql_user: name={{ database_name }} host=% 5 | password={{ secrets.db_password }} 6 | priv={{ database_name }}.*:ALL 7 | state=present 8 | 9 | - name: remove local database users (since absence of anonymous user will allow access as wildcard) 10 | run_once: true 11 | mysql_user: name={{ database_name }} host=localhost state=absent 12 | 13 | - name: "create database {{ database_name }}" 14 | run_once: true 15 | mysql_db: db={{ database_name }} state=present 16 | register: database_create 17 | -------------------------------------------------------------------------------- /roles/openstack-distro/defaults/main.yml: -------------------------------------------------------------------------------- 1 | openstack_distro: 2 | project_packages: "{{ project_packages|default([]) }}" 3 | dependent_packages: "{{ dependent_packages|default([]) }}" 4 | python_post_dependencies: "{{ python_post_dependencies|default([]) }}" 5 | pip_extra_args: "" 6 | -------------------------------------------------------------------------------- /roles/openstack-distro/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: osp.yml 2 | when: openstack_distro_type == 'osp' 3 | -------------------------------------------------------------------------------- /roles/openstack-network/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: endpoints 4 | - role: openstack-meta 5 | -------------------------------------------------------------------------------- /roles/openstack-network/templates/rhel_neutron_external_interface.cfg: -------------------------------------------------------------------------------- 1 | DEVICE="{{ neutron_external_interface }}" 2 | BOOTPROTO="static" 3 | {% set count = 0 %} 4 | {% for subnet in neutron.subnets %} 5 | {% if subnet.name == 'external' %} 6 | IPADDR{{ count }}="192.168.255.1" 7 | NETMASK{{ count }}="255.255.255.0" 8 | {% set count = count + 1 %} 9 | {% endif %} 10 | {% endfor %} 11 | ONBOOT="yes" 12 | TYPE="Bridge" 13 | NM_CONTROLLED="no" 14 | 15 | -------------------------------------------------------------------------------- /roles/openstack-network/templates/ubuntu_neutron_external_interface.cfg: -------------------------------------------------------------------------------- 1 | allow-hotplug {{ neutron_external_interface }} 2 | iface {{ neutron_external_interface }} inet static 3 | {% for subnet in neutron.subnets %} 4 | {% if subnet.name == 'external' %} 5 | address {{ subnet.cidr|ipaddr('net')|ipaddr('1')|ipaddr('address') }} 6 | netmask {{ subnet.cidr|ipaddr('net')|ipaddr('netmask') }} 7 | {% endif %} 8 | {% endfor %} 9 | up sysctl net.ipv4.conf.all.forwarding=1 10 | -------------------------------------------------------------------------------- /roles/openstack-package/defaults/main.yml: -------------------------------------------------------------------------------- 1 | openstack_package_version: 11.0-bbc73 2 | openstack_package: 3 | package_name: 'openstack-{{ project_name }}-{{ openstack_package_version }}' 4 | rootwrap: "{{ rootwrap|default(False)|bool }}" 5 | virtualenv_base: /opt/bbc/openstack-{{ openstack_package_version }} 6 | -------------------------------------------------------------------------------- /roles/openstack-package/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.blueboxcloud_giftwrap.repo }} {{ ansible_distribution_release }} main' 6 | key_url: '{{ apt_repos.blueboxcloud_giftwrap.key_url }}' 7 | when: ursula_os == 'ubuntu' 8 | -------------------------------------------------------------------------------- /roles/openstack-setup/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: endpoints 4 | - role: openstack-meta 5 | - role: v7k-defaults 6 | -------------------------------------------------------------------------------- /roles/openstack-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: users.yml 3 | when: openstack_setup.add_users|bool or openstack_setup.add_users is undefined 4 | 5 | - include: images.yml 6 | when: openstack_setup.add_images|bool or openstack_setup.add_images is undefined 7 | 8 | - include: networks.yml 9 | when: openstack_setup.add_networks|bool or openstack_setup.add_networks is undefined 10 | 11 | - include: cinder.yml 12 | when: cinder.enabled|default('True')|bool 13 | tags: cinder 14 | 15 | - include: flavors.yml 16 | 17 | - include: computes.yml 18 | tags: hostagg 19 | 20 | - include: dispersion.yml 21 | when: (services_setup.changed or upgrade|default('False')|bool) and 22 | swift.enabled|default('False')|bool 23 | 24 | -------------------------------------------------------------------------------- /roles/openstack-source/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | openstack_source: 3 | git_mirror: "{{ git_mirror|default('https://github.com/openstack') }}" 4 | pypi_mirror: "{{ pypi_mirror|default(None) }}" 5 | project_rev: "{{ project_rev|default('master') }}" 6 | virtualenv_base: /opt/bbc/openstack-2016.2-newton 7 | git_update: no 8 | rootwrap: "{{ rootwrap|default(False)|bool }}" 9 | system_dependencies: "{{ system_dependencies|default({'ubuntu':[], 'rhel':[]}) }}" 10 | python_dependencies: "{{ python_dependencies|default([]) }}" 11 | python_post_dependencies: "{{ python_post_dependencies|default([]) }}" 12 | pip_extra_args: "" 13 | -------------------------------------------------------------------------------- /roles/openstack-source/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update ca-certs 3 | command: "update-ca-certificates" 4 | when: ansible_distribution == 'Ubuntu' 5 | -------------------------------------------------------------------------------- /roles/percona-arbiter/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: percona-common 4 | -------------------------------------------------------------------------------- /roles/percona-arbiter/templates/etc/default/garbd: -------------------------------------------------------------------------------- 1 | {% macro garbd_hosts() -%} 2 | {% for host in groups['db'] -%} {{ hostvars[host][primary_interface]['ipv4']['address'] -}}:4567 {% endfor -%} 3 | {% endmacro -%} 4 | GALERA_NODES="{{ garbd_hosts() }}" 5 | GALERA_GROUP="mstack_db_cluster" 6 | LOG_FILE="/var/log/garbd.log" 7 | -------------------------------------------------------------------------------- /roles/percona-backup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create backup directory 3 | file: path=/backup/percona state=directory mode=0755 4 | 5 | - name: add percona-xtrabackup.sh to cron.daily 6 | template: src=percona-xtrabackup.sh dest=/etc/cron.daily/percona-xtrabackup owner=root group=root mode=0755 7 | 8 | - include: monitoring.yml 9 | tags: 10 | - monitoring 11 | when: monitoring.enabled|default('True')|bool 12 | -------------------------------------------------------------------------------- /roles/percona-backup/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: percona xtradb backup 3 | sensu_check: 4 | name: check-percona-xtradb-backup 5 | plugin: check-percona-xtrabackup.py 6 | interval: 43200 7 | occurrences: 1 8 | -------------------------------------------------------------------------------- /roles/percona-common/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.percona.repo }} {{ ansible_distribution_release }} main' 6 | key_url: '{{ apt_repos.percona.key_url }}' 7 | when: ansible_distribution in ['Ubuntu'] 8 | - role: repos 9 | repo: percona 10 | -------------------------------------------------------------------------------- /roles/percona-common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - set_fact: 3 | garbd_service: garbd 4 | when: ursula_os == 'ubuntu' 5 | - set_fact: 6 | garbd_service: garb 7 | when: ursula_os == 'rhel' 8 | 9 | - name: install python-pycurl 10 | package: 11 | name: python-pycurl 12 | register: result 13 | until: result|succeeded 14 | retries: 5 15 | 16 | # this is needed so that mysql when run by 17 | # systemd can execute the wsrep commands 18 | - name: set selinux permissive for mysql 19 | selinux_permissive: 20 | name: mysqld_t 21 | permissive: true 22 | when: ursula_os == 'rhel' 23 | -------------------------------------------------------------------------------- /roles/percona-server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | percona: 3 | replication: True 4 | monitoring: 5 | sensu_checks: 6 | percona_cluster_size: 7 | criticality: 'critical' 8 | logs: 9 | - paths: 10 | - /var/log/mysql/*.err 11 | fields: 12 | type: mysql 13 | tags: mysql 14 | -------------------------------------------------------------------------------- /roles/percona-server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart mysql server 3 | service: name=mysql state=restarted 4 | 5 | - name: restart mysql server if upgrade 6 | service: 7 | name: mysql 8 | state: restarted 9 | when: upgrade | default('False') | bool 10 | -------------------------------------------------------------------------------- /roles/percona-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: monitoring-common 4 | when: monitoring.enabled|default(True)|bool 5 | - role: sensu-check 6 | - role: inspec 7 | install_inspec_controls: [mysql] 8 | tags: inspec 9 | when: 10 | - inspec.enabled|bool 11 | - inspec.controls.mysql.enabled|bool 12 | - role: logging-config 13 | when: logging.enabled|default(True)|bool 14 | service: mysql 15 | logdata: "{{ percona.logs }}" 16 | - role: collectd-plugin 17 | when: collectd is defined and collectd.enabled|bool 18 | -------------------------------------------------------------------------------- /roles/percona-server/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up log rotation for mysql 3 | logrotate: name=mysql path=/var/log/mysql/*.err 4 | args: 5 | options: 6 | - daily 7 | - rotate 7 8 | - missingok 9 | - compress 10 | - postrotate 11 | - test -x /usr/bin/mysqladmin && /usr/bin/mysqladmin ping > /dev/null && /usr/bin/mysqladmin flush-logs > /dev/null 12 | - endscript 13 | - minsize 100k 14 | -------------------------------------------------------------------------------- /roles/percona-server/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check percona cluster size 3 | sensu_check_dict: 4 | name: "percona-cluster-size" 5 | check: "{{ sensu_checks.percona.check_cluster_size }}" 6 | notify: restart sensu-client missing ok 7 | 8 | - name: check mysql process 9 | sensu_check_dict: 10 | name: "check-mysql-process" 11 | check: "{{ sensu_checks.percona.check_mysql_process }}" 12 | notify: restart sensu-client missing ok 13 | 14 | - name: mysql metrics 15 | template: src=etc/collectd/plugins/mysql.conf dest=/etc/collectd/plugins/mysql.conf 16 | notify: restart collectd 17 | when: collectd is defined and collectd.enabled|bool 18 | tags: collectd -------------------------------------------------------------------------------- /roles/percona-server/templates/etc/collectd/plugins/mysql.conf: -------------------------------------------------------------------------------- 1 | 2 | Globals true 3 | 4 | 5 | 6 | ModulePath "{{ collectd_plugin.path }}/mysql" 7 | Import "mysql" 8 | 9 | Host "localhost" 10 | Port 3306 11 | User "root" 12 | Password "{{ secrets.db_password }}" 13 | Verbose false 14 | 15 | 16 | -------------------------------------------------------------------------------- /roles/percona-server/templates/etc/default/mysql: -------------------------------------------------------------------------------- 1 | startup_timeout=900 2 | stop_timeout=300 3 | UMASK=0644 4 | -------------------------------------------------------------------------------- /roles/percona-server/templates/etc/my.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | 3 | general-log = 0 4 | general-log-file = /var/log/mysql/mysql.log 5 | log-error = /var/log/mysql/mysql.err 6 | log-warnings = 10 7 | datadir = /var/lib/mysql 8 | user = mysql 9 | bind-address = 0.0.0.0 10 | #workaround mysql bug #84940 11 | innodb-stats-persistent = 0 12 | innodb-stats-transient-sample-pages = 20 13 | innodb-stats-auto-recalc = 0 14 | 15 | 16 | !includedir /etc/mysql/conf.d 17 | -------------------------------------------------------------------------------- /roles/percona-server/templates/etc/mysql/conf.d/bind-inaddr-any.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | bind-address = 0.0.0.0 3 | -------------------------------------------------------------------------------- /roles/percona-server/templates/etc/mysql/conf.d/low_memory.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | 3 | innodb_buffer_pool_size=5M 4 | innodb_log_buffer_size=256K 5 | query_cache_size=0 6 | max_connections=10 7 | key_buffer_size=8 8 | thread_cache_size=0 9 | host_cache_size=0 10 | innodb_ft_cache_size=1600000 11 | innodb_ft_total_cache_size=32000000 12 | 13 | # per thread or per operation settings 14 | thread_stack=131072 15 | sort_buffer_size=32K 16 | read_buffer_size=8200 17 | read_rnd_buffer_size=8200 18 | max_heap_table_size=16K 19 | tmp_table_size=1K 20 | bulk_insert_buffer_size=0 21 | join_buffer_size=128 22 | net_buffer_length=1K 23 | innodb_sort_buffer_size=64K 24 | 25 | #settings that relate to the binary log (if enabled) 26 | binlog_cache_size=4K 27 | binlog_stmt_cache_size=4K 28 | -------------------------------------------------------------------------------- /roles/percona-server/templates/etc/mysql/conf.d/tuning.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | 3 | max_connections = 1000 4 | innodb_log_file_size=50331648 5 | lock_wait_timeout = 1800 6 | -------------------------------------------------------------------------------- /roles/percona-server/templates/etc/mysql/conf.d/utf8.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | default-character-set=utf8 3 | 4 | [mysql] 5 | default-character-set=utf8 6 | 7 | [mysqld] 8 | collation-server = utf8_unicode_ci 9 | character-set-server = utf8 10 | -------------------------------------------------------------------------------- /roles/percona-server/templates/root/.my.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | user=root 3 | password={{ mysql.root_password }} 4 | -------------------------------------------------------------------------------- /roles/preflight-checks/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | preflight_checks: 3 | ceph_minimum_version: 10 4 | -------------------------------------------------------------------------------- /roles/preflight-checks/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: endpoints 4 | - role: v7k-defaults 5 | -------------------------------------------------------------------------------- /roles/preflight-checks/tasks/neutron.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: network.yml param={{ item }} 3 | with_items: "{{ neutron.networks }}" 4 | 5 | - include: subnet.yml param={{ item }} 6 | with_items: "{{ neutron.subnets }}" 7 | -------------------------------------------------------------------------------- /roles/rabbitmq/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rabbitmq: 3 | cluster: False 4 | erlang_cookie: 6IMgelGs3Ygu 5 | user: openstack 6 | nodename: 'rabbit@{{ ansible_hostname }}' 7 | ip: '0.0.0.0' 8 | port: 5672 9 | management_port: 15672 10 | admin_cli_url: 'http://localhost:15672/cli/rabbitmqadmin' 11 | numqueues_warning_multiplier: 60 12 | numqueues_critical_multiplier: 120 13 | nofile: 10240 14 | version: 15 | rabbitmq_server: '3.6.15*' 16 | esl_erlang: '1:19*' 17 | monitoring: 18 | sensu_checks: 19 | rabbitmq_cluster: 20 | criticality: 'critical' 21 | timeout: 5 22 | systemd: 23 | custom: | 24 | [Service] 25 | LimitNOFILE=300000 26 | logs: 27 | - paths: 28 | - /var/log/rabbitmq/*.log 29 | fields: 30 | type: rabbitmq 31 | tags: rabbitmq 32 | -------------------------------------------------------------------------------- /roles/rabbitmq/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart rabbitmq 3 | service: name=rabbitmq-server state=restarted enabled=yes 4 | 5 | - name: login as rabbitmq to update limits 6 | command: sudo -i -u rabbitmq 7 | register: rabbitmq_login 8 | failed_when: rabbitmq_login.rc != 1 9 | -------------------------------------------------------------------------------- /roles/rabbitmq/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apt-repos 4 | repos: 5 | - repo: 'deb {{ apt_repos.erlang.repo }} {{ ansible_lsb.codename }} contrib' 6 | key_url: '{{ apt_repos.erlang.key_url }}' 7 | - repo: 'deb {{ apt_repos.rabbitmq.repo }} trusty main' 8 | key_url: '{{ apt_repos.rabbitmq.key_url }}' 9 | when: ursula_os == 'ubuntu' 10 | - role: monitoring-common 11 | when: monitoring.enabled|default(True)|bool 12 | - role: logging-config 13 | when: logging.enabled|default(True)|bool 14 | service: rabbitmq 15 | logdata: "{{ rabbitmq.logs }}" 16 | - role: inspec 17 | install_inspec_controls: [rabbitmq] 18 | tags: inspec 19 | when: 20 | - inspec.enabled|bool 21 | - inspec.controls.rabbitmq.enabled|bool 22 | - role: collectd-plugin 23 | when: collectd is defined and collectd.enabled|bool 24 | -------------------------------------------------------------------------------- /roles/rabbitmq/templates/etc/collectd/plugins/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | LoadPlugin python 2 | 3 | TypesDB "{{ collectd_plugin.path }}/rabbitmq/config/types.db.custom" 4 | 5 | 6 | ModulePath "{{ collectd_plugin.path }}/rabbitmq/" 7 | LogTraces true 8 | Interactive false 9 | Import rabbitmq 10 | 11 | Username "{{ rabbitmq.user }}" 12 | Password "{{ secrets.rabbit_password }}" 13 | Realm "RabbitMQ Management" 14 | Host "localhost" 15 | Port "15672" 16 | 17 | Regex "amq-gen-.*" 18 | Regex "tmp-.*" 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /roles/rabbitmq/templates/etc/default/rabbitmq-server: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # This file is sourced by /etc/init.d/rabbitmq-server. Its primary 3 | # reason for existing is to allow adjustment of system limits for the 4 | # rabbitmq-server process. 5 | # 6 | # Maximum number of open file handles. This will need to be increased 7 | # to handle many simultaneous connections. Refer to the system 8 | # documentation for ulimit (in man bash) for more information. 9 | # 10 | #ulimit -n 1024 11 | ulimit -n {{ rabbitmq.nofile }} 12 | -------------------------------------------------------------------------------- /roles/rabbitmq/templates/etc/rabbitmq/rabbitmq-env.conf: -------------------------------------------------------------------------------- 1 | NODE_PORT={{ rabbitmq.port }} 2 | NODE_IP_ADDRESS={{ rabbitmq.ip }} 3 | NODENAME={{ rabbitmq.nodename }} 4 | -------------------------------------------------------------------------------- /roles/rabbitmq/templates/etc/rabbitmq/rabbitmq.config: -------------------------------------------------------------------------------- 1 | {% macro rabbitmq_hosts() -%} 2 | {% for host in groups['controller'] -%} 3 | {% if loop.last -%} 4 | '{{ rabbitmq.user }}@{{ hostvars[host]['ansible_hostname'] }}' 5 | {%- else -%} 6 | '{{ rabbitmq.user }}@{{ hostvars[host]['ansible_hostname'] }}', 7 | {%- endif -%} 8 | {% endfor -%} 9 | {% endmacro -%} 10 | 11 | [ 12 | {kernel, [ 13 | {inet_dist_listen_min, 65535}, 14 | {inet_dist_listen_max, 65535} 15 | ]}, 16 | {% if rabbitmq.cluster|bool -%} 17 | {rabbit, [ 18 | {cluster_nodes, {[{{ rabbitmq_hosts() }}],disc }}, 19 | {% else -%} 20 | {rabbit, [ 21 | {cluster_nodes, ["{{ rabbitmq.user }}@{{ ansible_hostname }}"]}, 22 | {% endif -%} 23 | {default_user, <<"{{ rabbitmq.user }}">>}, 24 | {default_pass, <<"{{ secrets.rabbit_password }}">>} 25 | ]} 26 | 27 | ]. 28 | -------------------------------------------------------------------------------- /roles/rabbitmq/templates/etc/security/limits.d/10-rabbitmq.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | rabbitmq hard nofile {{ rabbitmq.nofile }} 3 | rabbitmq soft nofile {{ rabbitmq.nofile }} 4 | -------------------------------------------------------------------------------- /roles/rabbitmq/templates/var/lib/rabbitmq/erlang.cookie: -------------------------------------------------------------------------------- 1 | {{ rabbitmq.erlang_cookie }} 2 | -------------------------------------------------------------------------------- /roles/repos/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: apt.yml 2 | when: ursula_os == "ubuntu" 3 | 4 | - include: yum.yml 5 | when: ursula_os == "rhel" 6 | 7 | -------------------------------------------------------------------------------- /roles/rhn-subscription/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rhn_subscription: 3 | username: ~ 4 | password: ~ 5 | pool_names_regex: "^Red Hat Satellite - Add-Ons for Providers" 6 | repos: 7 | refresh: false 8 | enable: 9 | - rhel-7-server-rpms 10 | - rhel-7-server-rh-common-rpms 11 | - rhel-7-server-extras-rpms 12 | - rhel-7-server-optional-rpms 13 | - rhel-7-server-openstack-10-rpms 14 | - rhel-7-server-openstack-10-tools-rpms 15 | - rhel-7-server-openstack-10-optools-rpms 16 | - rhel-7-server-openstack-10-tools-debug-rpms 17 | - rhel-7-server-rhceph-2-mon-rpms 18 | - rhel-7-server-rhceph-2-tools-rpms 19 | disable: 20 | - rhel* 21 | -------------------------------------------------------------------------------- /roles/security_errata/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | security: 3 | always_update: 4 | apt: 5 | - bash 6 | - openssl 7 | - libssl1.0.0 8 | - libc6 9 | - openssh-server 10 | - openssh-client 11 | - chrony 12 | - curl 13 | - libcurl3 14 | - libcurl3-gnutls 15 | -------------------------------------------------------------------------------- /roles/security_errata/files/detect_CVE-2015-0235: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blueboxgroup/ursula/b70ccc4a6bda2830559b99991025ee275301c121/roles/security_errata/files/detect_CVE-2015-0235 -------------------------------------------------------------------------------- /roles/security_errata/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload haproxy 3 | service: name=haproxy state=reloaded must_exist=false 4 | register: hareload 5 | failed_when: hareload|failed and ("service not found" not in hareload.msg) 6 | -------------------------------------------------------------------------------- /roles/sensu-check/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart sensu-client missing ok 3 | service: name=sensu-client state=restarted must_exist=false 4 | -------------------------------------------------------------------------------- /roles/sensu-check/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: sensu checks directory 3 | file: dest=/etc/sensu/conf.d/checks 4 | state=directory mode=0755 5 | -------------------------------------------------------------------------------- /roles/serverspec/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | serverspec: 3 | enabled: true 4 | interval: 3600 5 | gems: 6 | - name: rspec 7 | version: 3.5.0 8 | - name: rspec-core 9 | version: 3.5.3 10 | - name: net-ssh 11 | version: 2.9.2 12 | - name: specinfra 13 | version: 2.50.4 14 | - name: serverspec 15 | version: 2.29.1 16 | - name: serverspec-extended-types 17 | version: 0.0.3 18 | -------------------------------------------------------------------------------- /roles/serverspec/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: monitoring-common 4 | -------------------------------------------------------------------------------- /roles/serverspec/templates/etc/serverspec/Rakefile: -------------------------------------------------------------------------------- 1 | require 'rake' 2 | require 'rspec/core/rake_task' 3 | 4 | task :spec => 'spec:all' 5 | task :default => :spec 6 | 7 | namespace :spec do 8 | targets = [] 9 | Dir.glob('./spec/*').each do |dir| 10 | next unless File.directory?(dir) 11 | target = File.basename(dir) 12 | target = "_#{target}" if target == "default" 13 | targets << target 14 | end 15 | 16 | task :all => targets 17 | task :default => :all 18 | 19 | targets.each do |target| 20 | original_target = target == "_default" ? target[1..-1] : target 21 | desc "Run serverspec tests to #{original_target}" 22 | RSpec::Core::RakeTask.new(target.to_sym) do |t| 23 | ENV['TARGET_HOST'] = original_target 24 | t.pattern = "spec/#{original_target}/*_spec.rb" 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /roles/serverspec/templates/etc/serverspec/spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'serverspec' 2 | require 'serverspec_extended_types' 3 | 4 | set :backend, :exec 5 | -------------------------------------------------------------------------------- /roles/stop-services/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: stop services 3 | service: 4 | name: "{{ item }}" 5 | state: stopped 6 | must_exist: "{{ must_exist|default(omit) }}" 7 | with_items: "{{ services|default([]) }}" 8 | -------------------------------------------------------------------------------- /roles/swift-account/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | swift_account: 3 | ip: "{{ primary_ip }}" 4 | port: 6002 5 | workers: "{{ swift.cluster.no_workers }}" 6 | replicators: "{{ swift.cluster.no_replicators }}" 7 | auditors: "{{ swift.cluster.no_auditors }}" 8 | reapers: 1 9 | -------------------------------------------------------------------------------- /roles/swift-account/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart swift-account-services 3 | service: 4 | name: "{{ item.name }}" 5 | state: restarted 6 | with_items: 7 | - "{{ swift.services.swift_account }}" 8 | - "{{ swift.services.swift_account_auditor }}" 9 | - "{{ swift.services.swift_account_reaper }}" 10 | - "{{ swift.services.swift_account_replicator }}" 11 | when: start_account|bool 12 | -------------------------------------------------------------------------------- /roles/swift-account/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: swift-common 4 | -------------------------------------------------------------------------------- /roles/swift-account/templates/etc/swift/account-server.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | bind_ip = {{ swift_account.ip }} 3 | bind_port = {{ swift_account.port }} 4 | workers = {{ swift_account.workers }} 5 | 6 | log_udp_host = 127.0.0.1 7 | log_udp_port = 514 8 | 9 | {% if swift.logging.debug|bool %} 10 | log_level = DEBUG 11 | {% else %} 12 | log_level = INFO 13 | {% endif %} 14 | 15 | [pipeline:main] 16 | pipeline = healthcheck recon account-server 17 | 18 | [filter:healthcheck] 19 | use = egg:swift#healthcheck 20 | 21 | [filter:recon] 22 | use = egg:swift#recon 23 | 24 | [app:account-server] 25 | use = egg:swift#account 26 | 27 | [account-replicator] 28 | concurrency = {{ swift_account.replicators }} 29 | 30 | [account-auditor] 31 | concurrency = {{ swift_account.auditors }} 32 | 33 | [account-reaper] 34 | concurrency = {{ swift_account.reapers }} 35 | -------------------------------------------------------------------------------- /roles/swift-common/files/etc/rsyslog.d/10-swift-udp.conf: -------------------------------------------------------------------------------- 1 | $ModLoad imudp # needs to be done just once 2 | $UDPServerAddress 127.0.0.1 3 | $UDPServerRun 514 4 | -------------------------------------------------------------------------------- /roles/swift-common/files/etc/rsyslog.d/49-swift.conf: -------------------------------------------------------------------------------- 1 | # Use the following to have separate log files for each of the main servers: 2 | # account-server, container-server, object-server, proxy-server. Note: 3 | # object-updater's output will be stored in object.log. 4 | if $programname contains 'swift' then /var/log/swift/swift.log 5 | if $programname contains 'account' then /var/log/swift/account.log 6 | if $programname contains 'container' then /var/log/swift/container.log 7 | if $programname contains 'object' then /var/log/swift/object.log 8 | if $programname contains 'proxy' then /var/log/swift/proxy.log 9 | 10 | # Use the following to discard logs that don't match any of the above to avoid 11 | # them filling up /var/log/syslog. 12 | local0.* ~ 13 | -------------------------------------------------------------------------------- /roles/swift-common/templates/check-swift-dispersion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # check swift dispersion 4 | # only run on node with the floating ip 5 | 6 | if ifconfig | grep {{ swift_undercloud_floating_ip | default( swift_floating_ip ) }} > /dev/null 2>&1; then 7 | sudo /etc/sensu/plugins/check-swift-dispersion.py -c 98 -d 99 -o 98 -n 99 8 | fi; 9 | -------------------------------------------------------------------------------- /roles/swift-common/templates/etc/rsyncd.conf: -------------------------------------------------------------------------------- 1 | uid = swift 2 | gid = swift 3 | log file = /var/log/rsyncd.log 4 | address = {{ primary_ip }} 5 | 6 | [account] 7 | max connections = {{ swift.rsync.account.max_connections }} 8 | path = /srv/node/ 9 | read only = false 10 | lock file = /var/lock/account.lock 11 | 12 | [container] 13 | max connections = {{ swift.rsync.container.max_connections }} 14 | path = /srv/node/ 15 | read only = false 16 | lock file = /var/lock/container.lock 17 | 18 | [object] 19 | max connections = {{ swift.rsync.object.max_connections }} 20 | path = /srv/node/ 21 | read only = false 22 | lock file = /var/lock/object.lock 23 | -------------------------------------------------------------------------------- /roles/swift-common/templates/etc/sudoers.d/swiftops: -------------------------------------------------------------------------------- 1 | swiftops ALL= NOPASSWD: {{ ( ursula_os == 'rhel' ) | ternary('/','/usr/local/') }}bin/swifttool 2 | swiftops ALL= NOPASSWD: {{ ( ursula_os == 'rhel' ) | ternary('/sbin/','/usr/bin/') }}lshw 3 | swiftops ALL= NOPASSWD: {{ ( ursula_os == 'rhel' ) | ternary('/','/usr/local/') }}bin/swift-init 4 | -------------------------------------------------------------------------------- /roles/swift-common/templates/etc/swift/dispersion.conf: -------------------------------------------------------------------------------- 1 | [dispersion] 2 | auth_url = {{ endpoints.auth_uri }} 3 | auth_user = service:swift 4 | auth_key = {{ secrets.service_password }} 5 | auth_version = 2.0 6 | -------------------------------------------------------------------------------- /roles/swift-common/templates/etc/swift/swift.conf: -------------------------------------------------------------------------------- 1 | [swift-hash] 2 | swift_hash_path_prefix = {{ swift.hash_path_prefix }} 3 | -------------------------------------------------------------------------------- /roles/swift-common/templates/etc/update-motd.d/99-swift-motd: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo 4 | echo "WARNING Swift node -- do not replace disks without consulting Swift specific procedures" 5 | echo 6 | -------------------------------------------------------------------------------- /roles/swift-common/templates/home/swiftops/.ssh/id_rsa: -------------------------------------------------------------------------------- 1 | {{ swift_common.private_key }} 2 | -------------------------------------------------------------------------------- /roles/swift-common/templates/home/swiftops/.ssh/id_rsa.pub: -------------------------------------------------------------------------------- 1 | {{ swift_common.public_key }} 2 | -------------------------------------------------------------------------------- /roles/swift-container/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | swift_container: 3 | ip: "{{ primary_ip }}" 4 | port: 6001 5 | workers: "{{ swift.cluster.no_workers }}" 6 | replicators: "{{ swift.cluster.no_replicators }}" 7 | auditors: "{{ swift.cluster.no_auditors }}" 8 | updaters: "{{ swift.cluster.no_updaters }}" 9 | -------------------------------------------------------------------------------- /roles/swift-container/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart-swift-container-services 3 | service: 4 | name: "{{ item.name }}" 5 | state: restarted 6 | with_items: 7 | - "{{ swift.services.swift_container }}" 8 | - "{{ swift.services.swift_container_auditor }}" 9 | - "{{ swift.services.swift_container_replicator }}" 10 | - "{{ swift.services.swift_container_sync }}" 11 | - "{{ swift.services.swift_container_updater }}" 12 | when: start_container|bool 13 | -------------------------------------------------------------------------------- /roles/swift-container/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: swift-common 4 | -------------------------------------------------------------------------------- /roles/swift-object/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | swift_object: 3 | ip: "{{ primary_ip }}" 4 | port: 6000 5 | workers: "{{ swift.cluster.no_workers }}" 6 | replicators: "{{ swift.cluster.no_replicators }}" 7 | auditors: "{{ swift.cluster.no_auditors }}" 8 | updaters: "{{ swift.cluster.no_updaters }}" 9 | -------------------------------------------------------------------------------- /roles/swift-object/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart swift-object service 3 | service: name="{{ swift.services.swift_object.name }}" state=restarted 4 | when: start_object|bool 5 | 6 | - name: restart swift-object-expirer service 7 | service: name="{{ swift.services.swift_object_expirer.name }}" state=restarted 8 | when: start_object|bool 9 | 10 | - name: restart swift-object-services 11 | service: 12 | name: "{{ item.name }}" 13 | state: restarted 14 | with_items: 15 | - "{{ swift.services.swift_object }}" 16 | - "{{ swift.services.swift_object_expirer }}" 17 | - "{{ swift.services.swift_object_auditor }}" 18 | - "{{ swift.services.swift_object_replicator }}" 19 | - "{{ swift.services.swift_object_updater }}" 20 | when: start_object|bool 21 | -------------------------------------------------------------------------------- /roles/swift-object/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: swift-common 4 | -------------------------------------------------------------------------------- /roles/swift-object/templates/etc/swift/drive-audit.conf: -------------------------------------------------------------------------------- 1 | [drive-audit] 2 | log_facility = LOG_LOCAL0 3 | {% if swift.logging.debug|bool %} 4 | log_level = DEBUG 5 | {% else %} 6 | log_level = INFO 7 | {% endif %} 8 | device_dir = /srv/node 9 | minutes = 60 10 | error_limit = 1 11 | log_file_pattern = /var/log/kern* 12 | -------------------------------------------------------------------------------- /roles/swift-object/templates/etc/swift/object-expirer.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | log_name = object-expirer 3 | log_udp_host = 127.0.0.1 4 | log_udp_port = 514 5 | 6 | {% if swift.logging.debug|bool %} 7 | log_level = DEBUG 8 | {% else %} 9 | log_level = INFO 10 | {% endif %} 11 | 12 | [object-expirer] 13 | 14 | [pipeline:main] 15 | pipeline = catch_errors cache proxy-server 16 | 17 | [app:proxy-server] 18 | use = egg:swift#proxy 19 | 20 | [filter:cache] 21 | use = egg:swift#memcache 22 | memcache_servers = {{ hostvars|ursula_memcache_hosts(groups, memcached.port, controller_name='swiftnode') }} 23 | 24 | [filter:catch_errors] 25 | use = egg:swift#catch_errors 26 | -------------------------------------------------------------------------------- /roles/swift-object/templates/usr/local/bin/swift-drive-auditor: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /usr/local/bin/swift-drive-audit /etc/swift/drive-audit.conf 4 | -------------------------------------------------------------------------------- /roles/swift-proxy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | swift_proxy: 3 | ip: "{{ primary_ip }}" 4 | port: "{{ endpoints.swift.port.proxy_api }}" 5 | workers: "{{ swift.cluster.no_workers }}" 6 | operator_roles: Member,_member_,admin,cloud_admin,project_admin 7 | reseller_admin_role: cloud_admin 8 | 9 | -------------------------------------------------------------------------------- /roles/swift-proxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart swift-proxy service 3 | service: name="{{ swift.services.swift_proxy.name }}" state=restarted 4 | when: start_proxy|bool 5 | 6 | - name: restart haproxy 7 | service: name=haproxy state=restarted 8 | 9 | -------------------------------------------------------------------------------- /roles/swift-proxy/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: swift-common 4 | - role: memcached 5 | - role: endpoints 6 | - role: openstack-firewall 7 | rule_name: swift 8 | rules_type_input: 9 | - { protocol: tcp, port: "{{ endpoints.swift.port.haproxy_api }}" } 10 | -------------------------------------------------------------------------------- /roles/swift-proxy/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | memcached: 3 | bind_ip: "{{ hostvars[inventory_hostname][primary_interface]['ipv4']['address'] }}" 4 | -------------------------------------------------------------------------------- /roles/swift-ring/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | swift_ring: 3 | ring_definition_file: "{{ ursula_env_path }}/ring_definition.yml" 4 | mode: "bootstrap" 5 | iterations: 10 6 | outdir: "/etc/swift" 7 | -------------------------------------------------------------------------------- /roles/swift-ring/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: setup swift rings osp - bootstrap 3 | command: /bin/swifttool -u swiftops -i /home/swiftops/.ssh/id_rsa 4 | --config {{ swift_ring.outdir }}/ring_definition.yml --outdir {{ swift_ring.outdir }} bootstrap 5 | become: True 6 | become_user: swiftops 7 | when: openstack_distro_type == 'osp' 8 | 9 | - name: setup swift rings - bootstrap 10 | command: /usr/local/bin/swifttool -u swiftops -i /home/swiftops/.ssh/id_rsa 11 | --config {{ swift_ring.outdir }}/ring_definition.yml --outdir {{ swift_ring.outdir }} bootstrap 12 | become: True 13 | become_user: swiftops 14 | when: openstack_distro_type != 'osp' 15 | -------------------------------------------------------------------------------- /roles/swift-ring/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: swift-common 3 | -------------------------------------------------------------------------------- /roles/swift-ring/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: drop our ring configuration 3 | copy: src={{ swift_ring.ring_definition_file }} owner=swiftops group=swiftops 4 | dest={{ swift_ring.outdir }}/ring_definition.yml mode=644 backup=yes 5 | notify: 6 | - setup swift rings - bootstrap 7 | - setup swift rings osp - bootstrap 8 | 9 | - name: flush handlers 10 | meta: flush_handlers 11 | -------------------------------------------------------------------------------- /roles/v7k-defaults/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | v7k: 3 | enabled: False 4 | ssh_port: 22 5 | multipath: 6 | enabled: False 7 | ubuntu: 8 | package_name: multipath-tools 9 | service_name: multipath-tools 10 | rhel: 11 | package_name: device-mapper-multipath 12 | service_name: multipathd 13 | -------------------------------------------------------------------------------- /test/check-deps: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | source $(dirname $0)/common 4 | 5 | which nova >/dev/null || die "nova client must be present" 6 | 7 | which ansible-playbook >/dev/null || die "ansible-playbook must be present" 8 | 9 | [ -e ${STACK_RC} ] || die "${STACK_RC} must be present" 10 | -------------------------------------------------------------------------------- /test/cleanup: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | source $(dirname $0)/common 4 | 5 | stack_exist=`openstack stack list | grep " ${testenv_heat_stack_name} " | wc -l | tr -d '[[:space:]]'` 6 | if [[ $stack_exist > 0 ]] ; then 7 | echo "destroying heat stack: ${testenv_heat_stack_name}" 8 | openstack stack delete --yes ${testenv_heat_stack_name} 9 | 10 | # wait for some time to ensure all resources under heat stack are cleaned up 11 | wait_seconds=30 12 | echo "wait for ${wait_seconds} seconds to ensure all resources under heat stack are cleaned up" 13 | sleep ${wait_seconds} 14 | else 15 | echo "heat stack ${testenv_heat_stack_name} doesn't exist." 16 | fi 17 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 1.6 3 | skipsdist = True 4 | 5 | [testenv] 6 | install_command = pip install -U {opts} {packages} 7 | setenv = 8 | VIRTUAL_ENV={envdir} 9 | deps = -r{toxinidir}/requirements.txt 10 | commands = 11 | ursula --ursula-test envs/example/ci site.yml 12 | -------------------------------------------------------------------------------- /ursula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blueboxgroup/ursula/b70ccc4a6bda2830559b99991025ee275301c121/ursula.png -------------------------------------------------------------------------------- /vagrant.yml: -------------------------------------------------------------------------------- 1 | default: 2 | memory: 512 3 | cpus: 1 4 | 5 | vms: 6 | allinone: 7 | ip_address: 8 | - 172.16.0.100 9 | - 172.16.255.100 10 | - 192.168.255.100 11 | cpus: 2 12 | memory: 6144 13 | custom: 14 | - '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 15 | - '["modifyvm", :id, "--nicpromisc4", "allow-all"]' 16 | controller1: 17 | ip_address: 18 | - 172.16.0.101 19 | - 192.168.255.101 20 | memory: 3072 21 | custom: '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 22 | controller2: 23 | ip_address: 24 | - 172.16.0.102 25 | - 192.168.255.102 26 | memory: 3072 27 | custom: '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 28 | compute1: 29 | ip_address: 30 | - 172.16.0.111 31 | - 192.168.255.111 32 | memory: 1536 33 | custom: '["modifyvm", :id, "--nicpromisc3", "allow-all"]' 34 | --------------------------------------------------------------------------------