├── roles
├── neutron
│ ├── files
│ │ ├── dnsmasq-neutron.conf
│ │ ├── dhcp_agent.ini
│ │ └── l3_agent.ini
│ ├── tasks
│ │ ├── main.yml
│ │ ├── neutron.yml
│ │ └── controller.yml
│ └── templates
│ │ ├── metadata_agent.ini.j2
│ │ ├── lbaas_agent.ini.j2
│ │ ├── neutron_lbaas.conf.j2
│ │ ├── ml2_conf.ini.j2
│ │ ├── linuxbridge_agent.ini.j2
│ │ ├── neutrondb.sql.j2
│ │ └── neutron.conf.j2
├── prepare
│ ├── files
│ │ └── limits-openstack.conf
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── services.xml.j2
├── nova
│ ├── tasks
│ │ ├── main.yml
│ │ ├── nova.yml
│ │ └── controller.yml
│ ├── templates
│ │ ├── novadb.sql.j2
│ │ └── nova.conf.j2
│ └── files
│ │ └── 00-nova-placement-api.conf
├── swift
│ ├── tasks
│ │ ├── main.yml
│ │ ├── swift.yml
│ │ └── controller.yml
│ └── templates
│ │ ├── swift.conf.j2
│ │ ├── object-expirer.conf.j2
│ │ ├── rsyncd.conf.j2
│ │ ├── account-server.conf.j2
│ │ ├── container-server.conf.j2
│ │ ├── object-server.conf.j2
│ │ └── proxy-server.conf.j2
├── cinder
│ ├── tasks
│ │ ├── main.yml
│ │ ├── cinder.yml
│ │ └── controller.yml
│ └── templates
│ │ ├── cinderdb.sql.j2
│ │ ├── cinder.conf.j2
│ │ └── lvm.conf.j2
├── database
│ ├── templates
│ │ ├── memcached.j2
│ │ └── my.cnf.j2
│ ├── files
│ │ ├── server.cnf
│ │ └── mongod.conf
│ └── tasks
│ │ └── main.yml
├── ceilometer
│ ├── tasks
│ │ ├── main.yml
│ │ ├── ceilometer-nova.yml
│ │ ├── aodh.yml
│ │ └── controller.yml
│ └── templates
│ │ ├── aodhdb.sql.j2
│ │ ├── ceilometer.conf.j2
│ │ ├── aodh.conf.j2
│ │ └── pipeline.yaml.j2
├── tests
│ ├── files
│ │ └── cirros.yaml
│ └── tasks
│ │ └── main.yml
├── ha
│ ├── templates
│ │ └── keepalived.conf.j2
│ └── tasks
│ │ └── main.yml
├── heat
│ ├── templates
│ │ ├── heatdb.sql.j2
│ │ └── heat.conf.j2
│ └── tasks
│ │ └── main.yml
├── glance
│ ├── templates
│ │ ├── glancedb.sql.j2
│ │ ├── glance-registry.conf.j2
│ │ └── glance-api.conf.j2
│ └── tasks
│ │ └── main.yml
├── keystone
│ ├── templates
│ │ ├── keystonedb.sql.j2
│ │ └── keystone.conf.j2
│ ├── files
│ │ └── wsgi-keystone.conf
│ └── tasks
│ │ └── main.yml
└── horizon
│ ├── tasks
│ └── main.yml
│ └── templates
│ └── local_settings.j2
├── hosts
├── LICENSE
├── site.yml
├── group_vars
└── all
├── README.md
└── ansible.cfg
/roles/neutron/files/dnsmasq-neutron.conf:
--------------------------------------------------------------------------------
1 | dhcp-option-force=26,1450
2 |
--------------------------------------------------------------------------------
/roles/prepare/files/limits-openstack.conf:
--------------------------------------------------------------------------------
1 | * hard nofile 8192
2 | * soft nofile 4098
3 |
--------------------------------------------------------------------------------
/roles/nova/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: controller.yml
2 | when: controller is defined
3 |
4 | - include: nova.yml
5 | when: controller is not defined
6 |
--------------------------------------------------------------------------------
/roles/swift/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: controller.yml
2 | when: controller is defined
3 |
4 | - include: swift.yml
5 | when: controller is not defined
6 |
--------------------------------------------------------------------------------
/roles/cinder/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: controller.yml
2 | when: controller is defined
3 |
4 | - include: cinder.yml
5 | when: controller is not defined
6 |
--------------------------------------------------------------------------------
/roles/neutron/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: controller.yml
2 | when: controller is defined
3 |
4 | - include: neutron.yml
5 | when: controller is not defined
6 |
--------------------------------------------------------------------------------
/roles/database/templates/memcached.j2:
--------------------------------------------------------------------------------
1 | PORT="11211"
2 | USER="memcached"
3 | MAXCONN="1024"
4 | CACHESIZE="64"
5 | OPTIONS="-l {{ ansible_default_ipv4['address'] }}"
6 |
--------------------------------------------------------------------------------
/roles/neutron/templates/metadata_agent.ini.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | nova_metadata_ip = {{ groups['controller'][0] }}
3 | metadata_proxy_shared_secret = {{ metadata_secret }}
4 |
--------------------------------------------------------------------------------
/roles/neutron/templates/lbaas_agent.ini.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
3 |
4 | [haproxy]
5 | user_group = haproxy
6 |
--------------------------------------------------------------------------------
/roles/neutron/templates/neutron_lbaas.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | [service_providers]
4 | service_provider = LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
5 |
--------------------------------------------------------------------------------
/roles/ceilometer/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: controller.yml
2 | when: controller is defined
3 |
4 | - include: aodh.yml
5 | when: controller is defined
6 | tags: aodh
7 |
8 | - include: ceilometer-nova.yml
9 | when: controller is not defined
10 |
--------------------------------------------------------------------------------
/roles/swift/templates/swift.conf.j2:
--------------------------------------------------------------------------------
1 | [swift-hash]
2 | swift_hash_path_suffix = {{ swift_hash_path_suffix }}
3 | swift_hash_path_prefix = {{ swift_hash_path_prefix }}
4 |
5 | [storage-policy:0]
6 | name = Policy-0
7 | default = yes
8 |
9 | [swift-constraints]
10 |
--------------------------------------------------------------------------------
/roles/tests/files/cirros.yaml:
--------------------------------------------------------------------------------
1 | heat_template_version: 2014-10-16
2 | description: A simple server.
3 | resources:
4 | server:
5 | type: OS::Nova::Server
6 | properties:
7 | flavor: m1.nano
8 | image: cirros
9 | networks:
10 | - network: private
11 | user_data_format: RAW
12 | user_data: |
13 | #!/bin/sh
14 |
--------------------------------------------------------------------------------
/roles/neutron/templates/ml2_conf.ini.j2:
--------------------------------------------------------------------------------
1 | [ml2]
2 | type_drivers = flat,vlan,vxlan
3 | tenant_network_types = vxlan
4 | mechanism_drivers = linuxbridge,l2population
5 | extension_drivers = port_security
6 |
7 | [ml2_type_flat]
8 | flat_networks = {{ provider_network }}
9 |
10 | [ml2_type_vxlan]
11 | vni_ranges = 1:1000
12 |
13 | [securitygroup]
14 | enable_ipset = True
15 |
--------------------------------------------------------------------------------
/roles/ha/templates/keepalived.conf.j2:
--------------------------------------------------------------------------------
1 | vrrp_instance internal_vip_01 {
2 | state BACKUP
3 | nopreempt
4 | interface {{ ansible_default_ipv4['alias'] }}
5 | virtual_router_id 01
6 | priority {{ groups['controller'].index(inventory_hostname) + 1 }}
7 | advert_int 1
8 | virtual_ipaddress {
9 | {{ internal_vip_address }} dev {{ ansible_default_ipv4['alias'] }}
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/roles/heat/templates/heatdb.sql.j2:
--------------------------------------------------------------------------------
1 | {% if destroy_data %}
2 | DROP DATABASE IF EXISTS heat;
3 | {% endif %}
4 | CREATE DATABASE IF NOT EXISTS heat;
5 |
6 | GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY '{{ heatdb_password }}';
7 | GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'{{ ansible_hostname }}' IDENTIFIED BY '{{ heatdb_password }}';
8 | GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' IDENTIFIED BY '{{ heatdb_password }}';
9 |
--------------------------------------------------------------------------------
/roles/ceilometer/templates/aodhdb.sql.j2:
--------------------------------------------------------------------------------
1 | {% if destroy_data %}
2 | DROP DATABASE IF EXISTS aodh;
3 | {% endif %}
4 | CREATE DATABASE IF NOT EXISTS aodh;
5 |
6 | GRANT ALL PRIVILEGES ON aodh.* TO 'aodh'@'localhost' IDENTIFIED BY '{{ aodhdb_password }}';
7 | GRANT ALL PRIVILEGES ON aodh.* TO 'aodh'@'{{ ansible_hostname }}' IDENTIFIED BY '{{ aodhdb_password }}';
8 | GRANT ALL PRIVILEGES ON aodh.* TO 'aodh'@'%' IDENTIFIED BY '{{ aodhdb_password }}';
9 |
--------------------------------------------------------------------------------
/roles/cinder/templates/cinderdb.sql.j2:
--------------------------------------------------------------------------------
1 | {% if destroy_data %}
2 | DROP DATABASE IF EXISTS cinder;
3 | {% endif %}
4 | CREATE DATABASE IF NOT EXISTS cinder;
5 |
6 | GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '{{ cinderdb_password }}';
7 | GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'{{ ansible_hostname }}' IDENTIFIED BY '{{ cinderdb_password }}';
8 | GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '{{ cinderdb_password }}';
9 |
--------------------------------------------------------------------------------
/roles/glance/templates/glancedb.sql.j2:
--------------------------------------------------------------------------------
1 | {% if destroy_data %}
2 | DROP DATABASE IF EXISTS glance;
3 | {% endif %}
4 | CREATE DATABASE IF NOT EXISTS glance;
5 |
6 | GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '{{ glancedb_password }}';
7 | GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'{{ ansible_hostname }}' IDENTIFIED BY '{{ glancedb_password }}';
8 | GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '{{ glancedb_password }}';
9 |
--------------------------------------------------------------------------------
/roles/neutron/templates/linuxbridge_agent.ini.j2:
--------------------------------------------------------------------------------
1 | [linux_bridge]
2 | physical_interface_mappings = {{ provider_network }}:{{ public_interface_name }}
3 |
4 | [vxlan]
5 | enable_vxlan = True
6 | local_ip = {{ ansible_default_ipv4['address'] }}
7 | l2_population = True
8 |
9 | [agent]
10 | prevent_arp_spoofing = True
11 |
12 | [securitygroup]
13 | enable_security_group = True
14 | firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
15 |
--------------------------------------------------------------------------------
/roles/neutron/templates/neutrondb.sql.j2:
--------------------------------------------------------------------------------
1 | {% if destroy_data %}
2 | DROP DATABASE IF EXISTS neutron;
3 | {% endif %}
4 | CREATE DATABASE IF NOT EXISTS neutron;
5 |
6 | GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '{{ neutrondb_password }}';
7 | GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'{{ ansible_hostname }}' IDENTIFIED BY '{{ neutrondb_password }}';
8 | GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '{{ neutrondb_password }}';
9 |
--------------------------------------------------------------------------------
/roles/keystone/templates/keystonedb.sql.j2:
--------------------------------------------------------------------------------
1 | {% if destroy_data %}
2 | DROP DATABASE IF EXISTS keystone;
3 | {% endif %}
4 | CREATE DATABASE IF NOT EXISTS keystone;
5 |
6 | GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '{{ keystonedb_password }}';
7 | GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'{{ ansible_hostname }}' IDENTIFIED BY '{{ keystonedb_password }}';
8 | GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '{{ keystonedb_password }}';
9 |
--------------------------------------------------------------------------------
/roles/swift/templates/object-expirer.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | [object-expirer]
4 | # auto_create_account_prefix = .
5 |
6 | [pipeline:main]
7 | pipeline = catch_errors cache proxy-server
8 |
9 | [app:proxy-server]
10 | use = egg:swift#proxy
11 |
12 | [filter:cache]
13 | use = egg:swift#memcache
14 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
15 |
16 | [filter:catch_errors]
17 | use = egg:swift#catch_errors
18 |
--------------------------------------------------------------------------------
/roles/nova/templates/novadb.sql.j2:
--------------------------------------------------------------------------------
1 | {% set nova_db = ['nova', 'nova_api', 'nova_cell0'] %}
2 | {% for db in nova_db %}
3 | {% if destroy_data %}
4 | DROP DATABASE IF EXISTS {{ db }} ;
5 | {% endif %}
6 | CREATE DATABASE IF NOT EXISTS {{ db }} ;
7 | GRANT ALL PRIVILEGES ON {{ db }}.* TO 'nova'@'localhost' IDENTIFIED BY '{{ novadb_password }}';
8 | GRANT ALL PRIVILEGES ON {{ db }}.* TO 'nova'@'{{ ansible_hostname }}' IDENTIFIED BY '{{ novadb_password }}';
9 | GRANT ALL PRIVILEGES ON {{ db }}.* TO 'nova'@'%' IDENTIFIED BY '{{ novadb_password }}';
10 | {% endfor %}
11 |
--------------------------------------------------------------------------------
/roles/ha/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name=keepalived state=latest
4 |
5 | - name: install configuration
6 | template: src=keepalived.conf.j2 dest=/etc/keepalived/keepalived.conf
7 |
8 | - name: start services
9 | tags: service
10 | service: name=keepalived state=restarted enabled=yes
11 | when: not ansible_check_mode
12 |
13 | - name: verify vip is available
14 | debug: msg="{{ ansible_hostname}} have {{ ansible_all_ipv4_addresses }}"
15 | when: internal_vip_address in ansible_all_ipv4_addresses
16 |
17 |
--------------------------------------------------------------------------------
/roles/nova/tasks/nova.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-nova-compute
6 | - sysfsutils
7 |
8 | - name: install nova configuration
9 | tags: config
10 | template: src={{ item }}.j2 dest=/etc/nova/{{ item }}
11 | with_items:
12 | - nova.conf
13 |
14 | - name: start services
15 | tags: service
16 | service: name={{ item }} state=restarted enabled=yes
17 | when: not ansible_check_mode
18 | with_items:
19 | - libvirtd
20 | - openstack-nova-compute
21 |
--------------------------------------------------------------------------------
/roles/keystone/templates/keystone.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | [token]
4 | provider = fernet
5 |
6 | [fernet_tokens]
7 | max_active_keys = {{ (groups['controller'] | length) + 1 }}
8 |
9 | [database]
10 | connection = mysql+pymysql://keystone:{{ keystonedb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/keystone
11 |
12 | [cache]
13 | backend = oslo_cache.memcache_pool
14 | enabled = True
15 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
16 |
--------------------------------------------------------------------------------
/roles/horizon/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-dashboard
6 | - openstack-neutron-lbaas-ui
7 |
8 | - name: install horizon configuration
9 | tags: config
10 | template: src={{ item }}.j2 dest=/etc/openstack-dashboard/{{ item }}
11 | with_items:
12 | - local_settings
13 |
14 | - name: start services
15 | tags: service
16 | service: name={{ item }} state=restarted enabled=yes
17 | when: not ansible_check_mode
18 | with_items:
19 | - memcached
20 | - httpd
21 |
--------------------------------------------------------------------------------
/roles/swift/templates/rsyncd.conf.j2:
--------------------------------------------------------------------------------
1 | uid = swift
2 | gid = swift
3 | log file = /var/log/rsyncd.log
4 | pid file = /var/run/rsyncd.pid
5 | address = {{ ansible_default_ipv4['address'] }}
6 |
7 | [account]
8 | max connections = 2
9 | path = {{ swift_storage_dir }}
10 | read only = false
11 | lock file = /var/lock/account.lock
12 |
13 | [container]
14 | max connections = 2
15 | path = {{ swift_storage_dir }}
16 | read only = false
17 | lock file = /var/lock/container.lock
18 |
19 | [object]
20 | max connections = 2
21 | path = /srv/node/
22 | read only = false
23 | lock file = /var/lock/object.lock
24 |
--------------------------------------------------------------------------------
/roles/swift/templates/account-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | bind_ip = {{ ansible_default_ipv4['address'] }}
3 | bind_port = 6002
4 | swift_dir = /etc/swift
5 | devices = {{ swift_storage_dir }}
6 |
7 | [pipeline:main]
8 | pipeline = healthcheck recon account-server
9 |
10 | [app:account-server]
11 | use = egg:swift#account
12 |
13 | [filter:healthcheck]
14 | use = egg:swift#healthcheck
15 |
16 | [filter:recon]
17 | use = egg:swift#recon
18 | recon_cache_path = /var/cache/swift
19 |
20 | [filter:xprofile]
21 | use = egg:swift#xprofile
22 |
23 | [account-auditor]
24 |
25 | [account-replicator]
26 |
27 | [account-reaper]
28 |
29 |
--------------------------------------------------------------------------------
/roles/ceilometer/tasks/ceilometer-nova.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: package
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-ceilometer-compute
6 | - python-ceilometerclient
7 | - python-pecan
8 |
9 | - name: install configuration
10 | tags: config
11 | template: src={{ item }}.j2 dest=/etc/ceilometer/{{ item }}
12 | with_items:
13 | - ceilometer.conf
14 | - pipeline.yaml
15 |
16 | - name: start services
17 | tags: service
18 | service: name={{ item }} state=restarted enabled=yes
19 | when: not ansible_check_mode
20 | with_items:
21 | - openstack-ceilometer-compute
22 |
--------------------------------------------------------------------------------
/roles/swift/templates/container-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | bind_ip = {{ ansible_default_ipv4['address'] }}
3 | bind_port = 6001
4 | user = swift
5 | swift_dir = /etc/swift
6 | devices = {{ swift_storage_dir }}
7 |
8 | [pipeline:main]
9 | pipeline = healthcheck recon container-server
10 |
11 | [app:container-server]
12 | use = egg:swift#container
13 |
14 | [filter:healthcheck]
15 | use = egg:swift#healthcheck
16 |
17 | [filter:recon]
18 | use = egg:swift#recon
19 | recon_cache_path = /var/cache/swift
20 |
21 | [filter:xprofile]
22 | use = egg:swift#xprofile
23 |
24 | [container-replicator]
25 |
26 | [container-updater]
27 |
28 | [container-auditor]
29 |
30 | [container-sync]
31 |
32 |
--------------------------------------------------------------------------------
/roles/swift/templates/object-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | bind_ip = {{ ansible_default_ipv4['address'] }}
3 | bind_port = 6000
4 | user = swift
5 | swift_dir = /etc/swift
6 | devices = {{ swift_storage_dir }}
7 |
8 | [pipeline:main]
9 | pipeline = healthcheck recon object-server
10 |
11 | [app:object-server]
12 | use = egg:swift#object
13 |
14 | [filter:healthcheck]
15 | use = egg:swift#healthcheck
16 |
17 | [filter:recon]
18 | use = egg:swift#recon
19 | recon_cache_path = /var/cache/swift
20 | recon_lock_path = /var/lock
21 |
22 | [filter:xprofile]
23 | use = egg:swift#xprofile
24 |
25 | [object-replicator]
26 |
27 | [object-reconstructor]
28 |
29 | [object-updater]
30 |
31 | [object-auditor]
32 |
33 |
--------------------------------------------------------------------------------
/roles/cinder/tasks/cinder.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: package
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - lvm2
6 | - openstack-cinder
7 | - targetcli
8 | - python-oslo-policy
9 | - python-keystonemiddleware
10 |
11 | - name: create volume
12 | command: pvcreate -y -ff {{ cinder_dev }}
13 |
14 | - name: create volume group
15 | command: vgcreate cinder-volumes {{ cinder_dev }}
16 |
17 | - name: install cinder configuration
18 | tags: config
19 | template: src={{ item }}.j2 dest=/etc/cinder/{{ item }}
20 | with_items:
21 | - cinder.conf
22 |
23 | - name: start services
24 | tags: service
25 | service: name={{ item }} state=restarted enabled=yes
26 | when: not ansible_check_mode
27 | with_items:
28 | - lvm2-lvmetad
29 | - openstack-cinder-volume
30 | - target
31 |
--------------------------------------------------------------------------------
/hosts:
--------------------------------------------------------------------------------
1 | # please fill the hosts for the controller and compute groups
2 | # you can supply the same single host
3 | [controller]
4 |
5 | [compute]
6 |
7 | # please do not edit or remove the groups below
8 | [database:children]
9 | controller
10 |
11 | [keystone:children]
12 | controller
13 |
14 | [glance:children]
15 | controller
16 |
17 | [cinder-controller:children]
18 | controller
19 |
20 | [cinder:children]
21 | compute
22 |
23 | [nova-controller:children]
24 | controller
25 |
26 | [nova:children]
27 | compute
28 |
29 | [neutron-controller:children]
30 | controller
31 |
32 | [neutron:children]
33 | compute
34 |
35 | [heat:children]
36 | controller
37 |
38 | [swift-controller:children]
39 | controller
40 |
41 | [swift:children]
42 | compute
43 |
44 | [ceilometer-controller:children]
45 | controller
46 |
47 | [ceilometer-nova:children]
48 | nova
49 |
50 | [horizon:children]
51 | controller
52 |
--------------------------------------------------------------------------------
/roles/neutron/tasks/neutron.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-neutron
6 | - openstack-neutron-linuxbridge
7 | - openstack-neutron-lbaas
8 | - ebtables
9 | - ipset
10 | - haproxy
11 |
12 | - name: install configurations
13 | tags: config
14 | template: src={{ item }}.j2 dest=/etc/neutron/{{ item }}
15 | with_items:
16 | - neutron.conf
17 | - lbaas_agent.ini
18 |
19 | - name: install configurations
20 | tags: config
21 | template: src=linuxbridge_agent.ini.j2 dest=/etc/neutron/plugins/ml2/linuxbridge_agent.ini
22 |
23 | - name: start services
24 | tags: service
25 | service: name={{ item }} state=restarted enabled=yes
26 | when: not ansible_check_mode
27 | with_items:
28 | - openstack-nova-compute
29 | - neutron-linuxbridge-agent
30 | - neutron-lbaasv2-agent
31 |
--------------------------------------------------------------------------------
/roles/database/files/server.cnf:
--------------------------------------------------------------------------------
1 | # this is read by the standalone daemon and embedded servers
2 | [server]
3 |
4 | # this is only for the mysqld standalone daemon
5 | [mysqld]
6 | bind-address = 0.0.0.0
7 | default-storage-engine = innodb
8 | innodb_file_per_table
9 | collation-server = utf8_general_ci
10 | init-connect = 'SET NAMES utf8'
11 | character-set-server = utf8
12 | max_connections = 500
13 | open_files_limit = 4096
14 |
15 | # this is only for embedded server
16 | [embedded]
17 |
18 | # This group is only read by MariaDB-5.5 servers.
19 | # If you use the same .cnf file for MariaDB of different versions,
20 | # use this group for options that older servers don't understand
21 | [mysqld-5.5]
22 |
23 | # These two groups are only read by MariaDB servers, not by MySQL.
24 | # If you use the same .cnf file for MySQL and MariaDB,
25 | # you can put MariaDB-only options here
26 | [mariadb]
27 |
28 | [mariadb-5.5]
29 |
--------------------------------------------------------------------------------
/roles/prepare/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install {{ openstack_release }}
2 | package: name=centos-release-openstack-{{ openstack_release }} state=latest
3 |
4 | - name: install support files for {{ openstack_release }}
5 | package: name={{ item }} state=latest
6 | with_items:
7 | - python-openstackclient
8 | - openstack-selinux
9 |
10 | - name: setup limits
11 | copy: src=limits-openstack.conf dest=/etc/security/limits.d/30-openstack.conf
12 |
13 | - name: generate environment
14 | lineinfile: line="export {{ item.key }}={{ item.value }}" dest=/root/openstack-admin.rc state=present create=yes
15 | tags: config
16 | with_dict: "{{ openstack_env }}"
17 |
18 | - name: make workdir
19 | tags: config
20 | local_action: file dest={{ inventory_dir }}/workdir state=directory
21 | run_once: true
22 |
23 | - name: generate services.xml
24 | tags:
25 | - config
26 | - clinit
27 | local_action: template src="services.xml.j2" dest={{ inventory_dir }}/workdir/services.xml
28 | run_once: true
29 |
--------------------------------------------------------------------------------
/roles/nova/files/00-nova-placement-api.conf:
--------------------------------------------------------------------------------
1 | Listen 8778
2 |
3 |
4 | WSGIProcessGroup nova-placement-api
5 | WSGIApplicationGroup %{GLOBAL}
6 | WSGIPassAuthorization On
7 | WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
8 | WSGIScriptAlias / /usr/bin/nova-placement-api
9 | = 2.4>
10 | ErrorLogFormat "%M"
11 |
12 | ErrorLog /var/log/nova/nova-placement-api.log
13 | #SSLEngine On
14 | #SSLCertificateFile ...
15 | #SSLCertificateKeyFile ...
16 |
17 |
18 | Alias /nova-placement-api /usr/bin/nova-placement-api
19 |
20 | SetHandler wsgi-script
21 | Options +ExecCGI
22 | WSGIProcessGroup nova-placement-api
23 | WSGIApplicationGroup %{GLOBAL}
24 | WSGIPassAuthorization On
25 |
26 |
27 |
28 | = 2.4>
29 | Require all granted
30 |
31 |
32 | Order allow,deny
33 | Allow from all
34 |
35 |
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Serge Sergeev
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/roles/glance/templates/glance-registry.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 | #bind_host = {{ ansible_default_ipv4['address'] }}
6 | registry_host = {{ internal_vip_address|default(groups['controller'][0]) }}
7 |
8 | [database]
9 | connection = mysql+pymysql://glance:{{ glancedb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/glance
10 |
11 | [keystone_authtoken]
12 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
13 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
14 | auth_type = password
15 | project_domain_name = default
16 | user_domain_name = default
17 | project_name = service
18 | username = glance
19 | password = {{ glance_password }}
20 | memcache_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
21 |
22 | [paste_deploy]
23 | flavor = keystone
24 |
25 | [oslo_messaging_notifications]
26 | {% if groups['ceilometer-controller']|count > 0 %}
27 | driver = messagingv2
28 | {% else %}
29 | driver = noop
30 | {% endif %}
31 |
--------------------------------------------------------------------------------
/roles/ceilometer/templates/ceilometer.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 | [database]
6 | connection = mongodb://ceilometer:{{ ceilometerdb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}:27017/ceilometer
7 |
8 | [keystone_authtoken]
9 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
10 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
11 | auth_type = password
12 | project_domain_name = default
13 | user_domain_name = default
14 | project_name = service
15 | username = ceilometer
16 | password = {{ ceilometer_password }}
17 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
18 |
19 | [service_credentials]
20 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
21 | auth_type = password
22 | project_domain_name = default
23 | user_domain_name = default
24 | username = ceilometer
25 | password = {{ ceilometer_password }}
26 | project_name = service
27 | interface = internalURL
28 | region_name = RegionOne
29 |
--------------------------------------------------------------------------------
/roles/glance/templates/glance-api.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 | #bind_host = {{ ansible_default_ipv4['address'] }}
6 | registry_host = {{ internal_vip_address|default(groups['controller'][0]) }}
7 |
8 | [database]
9 | connection = mysql+pymysql://glance:{{ glancedb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/glance
10 |
11 | [glance_store]
12 | stores = file,http
13 | default_store = file
14 | filesystem_store_datadir = /var/lib/glance/images/
15 |
16 | [keystone_authtoken]
17 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
18 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
19 | auth_type = password
20 | project_domain_name = default
21 | user_domain_name = default
22 | project_name = service
23 | username = glance
24 | password = {{ glance_password }}
25 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
26 |
27 | [paste_deploy]
28 | flavor = keystone
29 |
30 | [oslo_messaging_notifications]
31 | {% if groups['ceilometer-controller']|count > 0 %}
32 | driver = messagingv2
33 | {% else %}
34 | driver = noop
35 | {% endif %}
36 |
--------------------------------------------------------------------------------
/roles/database/templates/my.cnf.j2:
--------------------------------------------------------------------------------
1 | [client]
2 | default-character-set=utf8
3 |
4 | [mysql]
5 | default-character-set=utf8
6 |
7 | [mysqld]
8 | #bind-address= api_interface_address
9 | #port= mariadb_port
10 |
11 | log-bin=mysql-bin
12 | binlog_format=ROW
13 | default-storage-engine=innodb
14 | innodb_autoinc_lock_mode=2
15 |
16 | collation-server = utf8_general_ci
17 | init-connect = 'SET NAMES utf8'
18 | character-set-server = utf8
19 |
20 | datadir=/var/lib/mysql/
21 |
22 | max_connections=10000
23 |
24 | key_buffer_size = '64M'
25 | max_heap_table_size = '64M'
26 | tmp_table_size = '64M'
27 | {% set dynamic_pool_size_mb = (hostvars[inventory_hostname]['ansible_memtotal_mb'] * 0.1) | round | int %}
28 | {% if dynamic_pool_size_mb < 8192 %}
29 | innodb_buffer_pool_size = '{{ dynamic_pool_size_mb }}M'
30 | {% else %}
31 | innodb_buffer_pool_size = '8192M'
32 | {% endif %}
33 |
34 | wsrep_on=ON
35 |
36 | wsrep_cluster_address=gcomm://{% if (groups['database'] | length) > 1 %}{% for host in groups['database'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
37 |
38 | wsrep_provider=/usr/lib64/galera/libgalera_smm.so
39 | wsrep_cluster_name="openstack"
40 | wsrep_node_name={{ ansible_hostname }}
41 | wsrep_sst_method=xtrabackup-v2
42 | #wsrep_sst_auth=mariadb:{{ mariadb_password }}
43 | wsrep_sst_auth=root
44 | wsrep_slave_threads=4
45 | #wsrep_notify_cmd=/usr/local/bin/wsrep-notify.sh
46 |
--------------------------------------------------------------------------------
/roles/ceilometer/templates/aodh.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 |
6 | [database]
7 | connection = mysql+pymysql://aodh:{{ aodhdb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/aodh
8 |
9 | [keystone_authtoken]
10 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
11 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
12 | auth_type = password
13 | project_domain_name = default
14 | user_domain_name = default
15 | project_name = service
16 | username = aodh
17 | password = {{ aodh_password }}
18 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
19 |
20 |
21 | [service_credentials]
22 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000/v3
23 | auth_type = password
24 | project_domain_name = default
25 | user_domain_name = default
26 | project_name = service
27 | username = aodh
28 | password = {{ aodh_password }}
29 | interface = internalURL
30 | region_name = RegionOne
31 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
32 |
--------------------------------------------------------------------------------
/roles/cinder/templates/cinder.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 | auth_strategy = keystone
6 | my_ip = {{ ansible_default_ipv4['address'] }}
7 | enabled_backends = lvm
8 | glance_api_servers = http://{{ internal_vip_address|default(groups['controller'][0]) }}:9292
9 |
10 |
11 | [lvm]
12 | volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
13 | volume_group = cinder-volumes
14 | iscsi_protocol = iscsi
15 | iscsi_helper = lioadm
16 | volume_clear=none
17 | volume_clear_size=0
18 |
19 | [database]
20 | connection = mysql+pymysql://cinder:{{ cinderdb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/cinder
21 |
22 | [keystone_authtoken]
23 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
24 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
25 | auth_type = password
26 | project_domain_name = default
27 | user_domain_name = default
28 | project_name = service
29 | username = cinder
30 | password = {{ cinder_password }}
31 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
32 |
33 | [oslo_concurrency]
34 | lock_path = /var/lib/cinder/tmp
35 |
36 | [oslo_messaging_notifications]
37 | {% if groups['ceilometer-controller']|count > 0 %}
38 | driver = messagingv2
39 | {% else %}
40 | driver = noop
41 | {% endif %}
42 |
--------------------------------------------------------------------------------
/roles/keystone/files/wsgi-keystone.conf:
--------------------------------------------------------------------------------
1 | Listen 5000
2 | Listen 35357
3 |
4 |
5 | WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
6 | WSGIProcessGroup keystone-public
7 | WSGIScriptAlias / /usr/bin/keystone-wsgi-public
8 | WSGIApplicationGroup %{GLOBAL}
9 | WSGIPassAuthorization On
10 | = 2.4>
11 | ErrorLogFormat "%{cu}t %M"
12 |
13 | ErrorLog /var/log/httpd/keystone-error.log
14 | CustomLog /var/log/httpd/keystone-access.log combined
15 |
16 |
17 | = 2.4>
18 | Require all granted
19 |
20 |
21 | Order allow,deny
22 | Allow from all
23 |
24 |
25 |
26 |
27 |
28 | WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
29 | WSGIProcessGroup keystone-admin
30 | WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
31 | WSGIApplicationGroup %{GLOBAL}
32 | WSGIPassAuthorization On
33 | = 2.4>
34 | ErrorLogFormat "%{cu}t %M"
35 |
36 | ErrorLog /var/log/httpd/keystone-error.log
37 | CustomLog /var/log/httpd/keystone-access.log combined
38 |
39 |
40 | = 2.4>
41 | Require all granted
42 |
43 |
44 | Order allow,deny
45 | Allow from all
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/roles/neutron/templates/neutron.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 | core_plugin = ml2
6 | service_plugins = router,neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
7 | allow_overlapping_ips = True
8 | notify_nova_on_port_status_changes = True
9 | notify_nova_on_port_data_changes = True
10 | nova_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:8774/v2
11 |
12 | [keystone_authtoken]
13 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
14 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
15 | auth_type = password
16 | project_domain_name = default
17 | user_domain_name = default
18 | project_name = service
19 | username = neutron
20 | password = {{ neutron_password }}
21 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
22 |
23 | [database]
24 | connection = mysql+pymysql://neutron:{{ neutrondb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/neutron
25 |
26 | [nova]
27 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
28 | auth_type = password
29 | project_domain_name = default
30 | user_domain_name = default
31 | region_name = RegionOne
32 | project_name = service
33 | username = nova
34 | password = {{ nova_password }}
35 |
36 | [oslo_concurrency]
37 | lock_path = /var/lib/neutron/tmp
38 |
39 | [oslo_messaging_notifications]
40 | driver = messagingv2
41 |
--------------------------------------------------------------------------------
/roles/ceilometer/tasks/aodh.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-aodh-api
6 | - openstack-aodh-evaluator
7 | - openstack-aodh-notifier
8 | - openstack-aodh-listener
9 | - openstack-aodh-expirer
10 | - python-ceilometerclient
11 |
12 | - name: install configuration
13 | template: src={{ item }}.j2 dest=/etc/aodh/{{ item }}
14 | with_items:
15 | - aodh.conf
16 |
17 | - block:
18 | - name: copy db create template
19 | template: src=aodhdb.sql.j2 dest=/tmp/aodhdb.sql
20 |
21 | - name: create database
22 | shell : mysql -u root < /tmp/aodhdb.sql
23 |
24 | - name: remove sql
25 | file: path=/tmp/aodhdb.sql state=absent
26 |
27 | - name: list users
28 | command: "openstack user list"
29 | register: ulist
30 |
31 | - block:
32 | - command: "openstack user create --domain default --password {{ aodh_password }} aodh"
33 | - command: "openstack role add --project service --user aodh admin"
34 | - command: "openstack service create --name aodh --description 'OpenStack Telemetry Alarming service' alarming"
35 | - command: "openstack endpoint create --region RegionOne alarming {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8042"
36 | with_items:
37 | - internal
38 | - public
39 | - admin
40 | when: not ansible_check_mode and ulist.stdout.find("aodh") < 0
41 |
42 | - name: run aodh db_sync
43 | command: aodh-dbsync
44 | become: true
45 | become_user: aodh
46 | when: not ansible_check_mode
47 | run_once: true
48 |
49 | - name: start services
50 | tags: service
51 | service: name={{ item }} state=restarted enabled=yes
52 | when: not ansible_check_mode
53 | with_items:
54 | - openstack-aodh-api
55 | - openstack-aodh-evaluator
56 | - openstack-aodh-notifier
57 | - openstack-aodh-listener
58 |
--------------------------------------------------------------------------------
/roles/heat/templates/heat.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 | heat_metadata_server_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:8000
6 | heat_waitcondition_server_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:8000/v1/waitcondition
7 | stack_domain_admin = heat_domain_admin
8 | stack_domain_admin_password = {{ heat_domain_password }}
9 | stack_user_domain_name = heat
10 | deferred_auth_method = trusts
11 | trusts_delegated_role = heat_stack_owner
12 |
13 | [clients_keystone]
14 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
15 |
16 | [keystone_authtoken]
17 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
18 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
19 | auth_type = password
20 | project_domain_name = default
21 | user_domain_name = default
22 | project_name = service
23 | username = heat
24 | password = {{ heat_password }}
25 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
26 |
27 | [cache]
28 | backend = oslo_cache.memcache_pool
29 | enabled = True
30 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
31 |
32 | [trustee]
33 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
34 | auth_plugin = password
35 | user_domain_name = default
36 | username = heat
37 | password = {{ heat_password }}
38 |
39 | [database]
40 | connection = mysql+pymysql://heat:{{ heatdb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/heat
41 |
42 | [ec2authtoken]
43 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000/v3
44 |
45 | [oslo_messaging_notifications]
46 | {% if groups['ceilometer-controller']|count > 0 %}
47 | driver = messagingv2
48 | {% else %}
49 | driver = noop
50 | {% endif %}
51 |
--------------------------------------------------------------------------------
/roles/ceilometer/tasks/controller.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: package
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-ceilometer-api
6 | - openstack-ceilometer-collector
7 | - openstack-ceilometer-notification
8 | - openstack-ceilometer-central
9 | - openstack-ceilometer-alarm
10 | - python-ceilometerclient
11 |
12 | - name: install configuration
13 | tags: config
14 | template: src={{ item }}.j2 dest=/etc/ceilometer/{{ item }}
15 | with_items:
16 | - ceilometer.conf
17 | - pipeline.yaml
18 |
19 | - block:
20 | - name: database
21 | tags: dbs
22 | shell: "mongo --host {{ internal_vip_address|default(groups['controller'][0]) }} --eval \"db = db.getSiblingDB('ceilometer');db.getUser('ceilometer')\""
23 | register: ulist
24 |
25 | - name: create database
26 | tags: dbs
27 | command : "mongo --host {{ internal_vip_address|default(groups['controller'][0]) }} --eval \"db = db.getSiblingDB('ceilometer'); db.createUser({user: 'ceilometer', pwd: '{{ ceilometerdb_password }}', roles: [ 'readWrite', 'dbAdmin' ]})\""
28 | when: not ansible_check_mode and ulist.stdout.find("null") >= 0
29 |
30 | - name: list users
31 | command: "openstack user list"
32 | register: ulist
33 |
34 | - block:
35 | - command: "openstack user create --domain default --password {{ ceilometer_password }} ceilometer"
36 | - command: "openstack role add --project service --user ceilometer admin"
37 | - command: "openstack service create --name ceilometer --description 'OpenStack Telemetry service' metering"
38 | - command: "openstack endpoint create --region RegionOne metering {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8777"
39 | with_items:
40 | - internal
41 | - public
42 | - admin
43 | when: not ansible_check_mode and ulist.stdout.find("ceilometer") < 0
44 | run_once: true
45 |
46 | - name: start services
47 | tags: service
48 | service: name={{ item }} state=restarted enabled=yes
49 | when: not ansible_check_mode
50 | with_items:
51 | - openstack-ceilometer-api
52 | - openstack-ceilometer-notification
53 | - openstack-ceilometer-central
54 | - openstack-ceilometer-collector
55 |
--------------------------------------------------------------------------------
/roles/tests/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | #nova
3 | - name: create nano flavor
4 | shell: openstack flavor list | awk '/m1.nano/{print "m1.nano"}'
5 | register: flav
6 |
7 | - name: create nano flavor
8 | command: openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
9 | when: flav.stdout.find('m1.nano') < 0
10 |
11 | - name: launch test instance
12 | tags: test_nova
13 | command: nova boot --image cirros --flavor m1.nano --nic net-name=private --poll ansible_test
14 |
15 | - name: check instance status
16 | tags: test_nova
17 | shell: nova show ansible_test | awk '/status/{print $4}'
18 | register: result
19 | failed_when: result.stdout.find('ACTIVE') < 0
20 |
21 | - name: destroy instance
22 | tags: test_nova
23 | command: nova delete ansible_test
24 |
25 | #heat
26 | - name: copy stack template
27 | tags: test_stack
28 | copy: src=cirros.yaml dest=/tmp/cirros.yaml
29 |
30 | - name: create test stack
31 | command: openstack stack create --wait -t /tmp/cirros.yaml ansible_test
32 |
33 | - name: check stack status
34 | tags: test_stack
35 | shell: openstack stack show ansible_test | awk '/status /{print $4}'
36 | register: result
37 | failed_when: result.stdout.find('CREATE_COMPLETE') < 0
38 |
39 | - name: delete stack
40 | tags: test_stack
41 | command: openstack stack delete --wait -y ansible_test
42 |
43 | - name: delete nano flavor
44 | command: openstack flavor delete m1.nano
45 |
46 | #swift
47 | - name: create test container
48 | tags: test_swift
49 | command: swift post ansible_test
50 |
51 | - name: create test file
52 | tags: test_swift
53 | command: dd if=/dev/urandom of=ansible_swift_test bs=1024 count=2000
54 |
55 | - name: upload test file to test container
56 | tags: test_swift
57 | command: swift upload ansible_test ansible_swift_test
58 |
59 | - name: remove test file
60 | tags: test_swift
61 | file: name=ansible_swift_test state=absent
62 |
63 | - name: download the file from test container
64 | tags: test_swift
65 | command: swift download ansible_test ansible_swift_test
66 |
67 | - name: remove test file
68 | tags: test_swift
69 | file: name=ansible_swift_test state=absent
70 |
71 | - name: delete test file from test container
72 | tags: test_swift
73 | command: swift delete ansible_test ansible_swift_test
74 |
75 | - name: delete test container
76 | tags: test_swift
77 | command: swift delete ansible_test
78 | run_once: true
79 | when: not ansible_check_mode
80 |
--------------------------------------------------------------------------------
/roles/glance/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-glance
6 | - python-glance
7 | - python-glanceclient
8 |
9 | - name: install glance configuration
10 | template: src={{ item }}.j2 dest=/etc/glance/{{ item }}
11 | with_items:
12 | - glance-api.conf
13 | - glance-registry.conf
14 |
15 | - block:
16 | - name: copy db create template
17 | template: src=glancedb.sql.j2 dest=/tmp/glancedb.sql
18 |
19 | - name: create database
20 | shell : mysql -u root < /tmp/glancedb.sql
21 |
22 | - name: remove sql
23 | file: path=/tmp/glancedb.sql state=absent
24 |
25 | - name: list users
26 | command: "openstack service list"
27 | register: ulist
28 |
29 | - block:
30 | - command: "openstack user create --domain default --password {{ glance_password }} glance"
31 | - command: "openstack role add --project service --user glance admin"
32 | - command: "openstack service create --name glance --description 'OpenStack Image service' image"
33 | - command: "openstack endpoint create --region RegionOne image {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:9292"
34 | with_items:
35 | - internal
36 | - public
37 | - admin
38 | when: not ansible_check_mode and ulist.stdout.find("glance") < 0
39 |
40 | - name: run glance-db sync
41 | command: glance-manage db_sync
42 | become: true
43 | become_user: glance
44 | when: not ansible_check_mode
45 | run_once: true
46 |
47 | - name: start services
48 | tags: service
49 | service: name={{ item }} state=restarted enabled=yes
50 | when: not ansible_check_mode
51 | with_items:
52 | - openstack-glance-registry
53 | - openstack-glance-api
54 |
55 | - wait_for: port=9292
56 |
57 | - block:
58 | - name: get images list
59 | command: "openstack image list"
60 | register: imlist
61 | when: not ansible_check_mode
62 |
63 | - block:
64 | - name: get cirros image
65 | get_url: url=http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img dest=/tmp/cirros-0.3.4-x86_64-disk.img
66 |
67 | - name: upload the image
68 | command: "openstack image create 'cirros' --file /tmp/cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public"
69 | when: not ansible_check_mode and imlist.stdout.find("cirros") < 0
70 | run_once: true
71 |
72 | - name: test glance api
73 | tags: test
74 | shell: glance --os-image-url http://{{ ansible_default_ipv4['address'] }}:9292 image-list | grep '^| ID'
75 |
--------------------------------------------------------------------------------
/roles/cinder/tasks/controller.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: package
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-cinder
6 | - targetcli
7 | - python-keystone
8 |
9 | - name: install cinder configuration
10 | tags: config
11 | template: src={{ item }}.j2 dest=/etc/cinder/{{ item }}
12 | with_items:
13 | - cinder.conf
14 |
15 | - block:
16 | - name: copy db create template
17 | template: src=cinderdb.sql.j2 dest=/tmp/cinderdb.sql
18 |
19 | - name: create database
20 | shell : mysql -u root < /tmp/cinderdb.sql
21 |
22 | - name: remove sql
23 | file: path=/tmp/cinderdb.sql state=absent
24 |
25 | - name: list users
26 | command: "openstack user list"
27 | register: ulist
28 |
29 | - block:
30 | - command: "openstack user create --domain default --password {{ cinder_password }} cinder"
31 | - command: "openstack role add --project service --user cinder admin"
32 | - command: "openstack service create --name cinder --description 'OpenStack Block Storage service' volume"
33 | - command: "openstack service create --name cinderv2 --description 'OpenStack Block Storage service' volumev2"
34 | - command: "openstack service create --name cinderv3 --description 'OpenStack Block Storage service' volumev3"
35 | - command: "openstack endpoint create --region RegionOne volume {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8776/v1/%(project_id)s"
36 | with_items:
37 | - internal
38 | - public
39 | - admin
40 | - command: "openstack endpoint create --region RegionOne volumev2 {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8776/v2/%(project_id)s"
41 | with_items:
42 | - internal
43 | - public
44 | - admin
45 | - command: "openstack endpoint create --region RegionOne volumev3 {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8776/v3/%(project_id)s"
46 | with_items:
47 | - internal
48 | - public
49 | - admin
50 | when: not ansible_check_mode and ulist.stdout.find("cinder") < 0
51 |
52 | - name: run cinder db sync
53 | command: cinder-manage db sync
54 | become: true
55 | become_user: cinder
56 | when: not ansible_check_mode
57 | run_once: true
58 |
59 | - name: start services
60 | tags: service
61 | service: name={{ item }} state=restarted enabled=yes
62 | when: not ansible_check_mode
63 | with_items:
64 | - openstack-cinder-api
65 | - openstack-cinder-scheduler
66 |
67 | - wait_for: port=8776
68 |
69 | - name: test cinder api
70 | tags: test
71 | shell: cinder list | grep '^| ID'
72 | run_once: true
73 |
--------------------------------------------------------------------------------
/roles/keystone/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install required packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-keystone
6 | - httpd
7 | - mod_wsgi
8 | - python-memcached
9 | when: not ansible_check_mode
10 |
11 | - name: install keystone configuration
12 | tags: config
13 | template: src=keystone.conf.j2 dest=/etc/keystone/keystone.conf
14 | when: not ansible_check_mode
15 |
16 | - name: install httpd config
17 | tags: config
18 | copy: src=wsgi-keystone.conf dest=/etc/httpd/conf.d/wsgi-keystone.conf
19 |
20 | - name: initialize fernet key repositories
21 | command: "{{ item }}"
22 | with_items:
23 | - "keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone"
24 | - "keystone-manage credential_setup --keystone-user keystone --keystone-group keystone"
25 |
26 | - name: start httpd
27 | tags: service
28 | service: name=httpd state=restarted enabled=yes
29 | when: not ansible_check_mode
30 |
31 | - block:
32 | - name: copy db create template
33 | template: src=keystonedb.sql.j2 dest=/tmp/keystonedb.sql
34 |
35 | - name: create database
36 | shell : mysql -u root < /tmp/keystonedb.sql
37 |
38 | - name: remove sql
39 | file: path=/tmp/keystonedb.sql state=absent
40 |
41 | - name: run keystone-db sync
42 | command: keystone-manage db_sync
43 | become: true
44 | become_user: keystone
45 | when: not ansible_check_mode
46 |
47 | - name: bootstrap the identity service
48 | shell:
49 | cmd: |
50 | keystone-manage bootstrap --bootstrap-password {{ admin_password }} \
51 | --bootstrap-admin-url http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357/v3/ \
52 | --bootstrap-internal-url http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000/v3/ \
53 | --bootstrap-public-url http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000/v3/ \
54 | --bootstrap-region-id RegionOne
55 |
56 | - name: list projects
57 | command: "openstack project list"
58 | register: plist
59 | when: not ansible_check_mode
60 |
61 | - block:
62 | - command: "openstack project create --domain default --description 'Service Project' service"
63 | - command: "openstack project create --domain default --description 'Demo Project' demo"
64 | - command: "openstack user create --domain default --password {{ demo_password }} demo"
65 | - command: "openstack role create user"
66 | - command: "openstack role add --project demo --user demo user"
67 | when: not ansible_check_mode and plist.stdout.find("service") < 0
68 |
69 | run_once: true
70 |
71 | - name: test keystone token
72 | tags: test
73 | shell: openstack --os-auth-url http://{{ ansible_default_ipv4['address'] }}:35357/v3 token issue | grep '^| id'
74 |
--------------------------------------------------------------------------------
/roles/ceilometer/templates/pipeline.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | sources:
3 | - name: meter_source
4 | interval: 60
5 | meters:
6 | - "*"
7 | sinks:
8 | - meter_sink
9 | - name: cpu_source
10 | interval: 60
11 | meters:
12 | - "cpu"
13 | sinks:
14 | - cpu_sink
15 | - cpu_delta_sink
16 | - name: disk_source
17 | interval: 60
18 | meters:
19 | - "disk.read.bytes"
20 | - "disk.read.requests"
21 | - "disk.write.bytes"
22 | - "disk.write.requests"
23 | - "disk.device.read.bytes"
24 | - "disk.device.read.requests"
25 | - "disk.device.write.bytes"
26 | - "disk.device.write.requests"
27 | sinks:
28 | - disk_sink
29 | - name: network_source
30 | interval: 60
31 | meters:
32 | - "network.incoming.bytes"
33 | - "network.incoming.packets"
34 | - "network.outgoing.bytes"
35 | - "network.outgoing.packets"
36 | sinks:
37 | - network_sink
38 | sinks:
39 | - name: meter_sink
40 | transformers:
41 | publishers:
42 | - notifier://
43 | - name: cpu_sink
44 | transformers:
45 | - name: "rate_of_change"
46 | parameters:
47 | target:
48 | name: "cpu_util"
49 | unit: "%"
50 | type: "gauge"
51 | scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
52 | publishers:
53 | - notifier://
54 | - name: cpu_delta_sink
55 | transformers:
56 | - name: "delta"
57 | parameters:
58 | target:
59 | name: "cpu.delta"
60 | growth_only: True
61 | publishers:
62 | - notifier://
63 | - name: disk_sink
64 | transformers:
65 | - name: "rate_of_change"
66 | parameters:
67 | source:
68 | map_from:
69 | name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
70 | unit: "(B|request)"
71 | target:
72 | map_to:
73 | name: "\\1.\\2.\\3.rate"
74 | unit: "\\1/s"
75 | type: "gauge"
76 | publishers:
77 | - notifier://
78 | - name: network_sink
79 | transformers:
80 | - name: "rate_of_change"
81 | parameters:
82 | source:
83 | map_from:
84 | name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
85 | unit: "(B|packet)"
86 | target:
87 | map_to:
88 | name: "network.\\1.\\2.rate"
89 | unit: "\\1/s"
90 | type: "gauge"
91 | publishers:
92 | - notifier://
93 |
--------------------------------------------------------------------------------
/roles/heat/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-heat-api
6 | - openstack-heat-api-cfn
7 | - openstack-heat-engine
8 | - openstack-barbican
9 | - python-barbicanclient
10 | - python-croniter
11 | - python-designateclient
12 | - python-manilaclient
13 | - python-zaqarclient
14 | - python-openstack-mistral
15 | - openstack-magnum-api
16 | - python2-magnumclient
17 |
18 | - name: install configuration
19 | template: src={{ item }}.j2 dest=/etc/heat/{{ item }}
20 | with_items:
21 | - heat.conf
22 |
23 | - block:
24 | - name: copy db create template
25 | template: src=heatdb.sql.j2 dest=/tmp/heatdb.sql
26 |
27 | - name: create database
28 | shell : mysql -u root < /tmp/heatdb.sql
29 |
30 | - name: remove sql
31 | file: path=/tmp/heatdb.sql state=absent
32 |
33 | - name: list users
34 | command: "openstack user list"
35 | register: ulist
36 |
37 | - block:
38 | - command: "openstack user create --domain default --password {{ heat_password }} heat"
39 | - command: "openstack role add --project service --user heat admin"
40 | - command: "openstack service create --name heat --description 'OpenStack Orchestration service' orchestration"
41 | - command: "openstack service create --name heat-cfn --description 'OpenStack Orchestration service' cloudformation"
42 | - command: "openstack endpoint create --region RegionOne orchestration {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8004/v1/%(tenant_id)s"
43 | with_items:
44 | - internal
45 | - public
46 | - admin
47 | - command: "openstack endpoint create --region RegionOne cloudformation {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8000/v1"
48 | with_items:
49 | - internal
50 | - public
51 | - admin
52 | - command: "openstack domain create --description 'Stack projects and users' heat"
53 | - command: "openstack user create --domain heat --password {{ heat_domain_password }} heat_domain_admin"
54 | - command: "openstack role add --domain heat --user-domain heat --user heat_domain_admin admin"
55 | - command: "openstack role create heat_stack_owner"
56 | - command: "openstack role add --project demo --user demo heat_stack_owner"
57 | - command: "openstack role create heat_stack_user"
58 | when: not ansible_check_mode and ulist.stdout.find("heat") < 0
59 |
60 | - name: run heat db_sync
61 | command: heat-manage db_sync
62 | become: true
63 | become_user: heat
64 | when: not ansible_check_mode
65 | run_once: true
66 |
67 | - name: start services
68 | tags: service
69 | service: name={{ item }} state=restarted enabled=yes
70 | when: not ansible_check_mode
71 | with_items:
72 | - openstack-heat-api
73 | - openstack-heat-api-cfn
74 | - openstack-heat-engine
75 |
--------------------------------------------------------------------------------
/roles/swift/tasks/swift.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: package
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-swift-account
6 | - openstack-swift-container
7 | - openstack-swift-object
8 | - rsync
9 | - xfsprogs
10 |
11 | - name: install rsync configuration
12 | tags: config
13 | template: src={{ item }}.j2 dest=/etc/{{ item }}
14 | with_items:
15 | - rsyncd.conf
16 |
17 | - name: start services
18 | tags: service
19 | service: name={{ item }} state=restarted enabled=yes
20 | with_items:
21 | - rsyncd
22 |
23 | - name: install swift configuration
24 | tags: config
25 | template: src={{ item }}.j2 dest=/etc/swift/{{ item }}
26 | with_items:
27 | - account-server.conf
28 | - container-server.conf
29 | - object-server.conf
30 | - swift.conf
31 |
32 | - name: install directories
33 | tags: config
34 | file: dest={{ swift_storage_dir }} owner=swift group=swift state=directory
35 | when: not ansible_check_mode
36 |
37 | - name: install directories
38 | tags: config
39 | file: dest=/var/cache/swift owner=root group=swift state=directory mode=0775
40 | when: not ansible_check_mode
41 |
42 | - name: prepare storage
43 | file: dest={{ swift_storage_dir }}/{{ item|basename }} state=directory owner=swift
44 | with_items: "{{ swift_storage_devices }}"
45 | when: item.find('/dev') < 0 and not ansible_check_mode
46 |
47 | - name: create filefs
48 | command: truncate -s {{ swift_device_size }} {{ item }}
49 | with_items: "{{ swift_storage_devices }}"
50 | when: item.find('/dev') < 0 and not ansible_check_mode
51 |
52 | - name: create fs
53 | command: mkfs.xfs -f {{ item }}
54 | with_items: "{{ swift_storage_devices }}"
55 | when: not ansible_check_mode
56 |
57 | - name: mount
58 | mount: name={{ swift_storage_dir }}/{{ item|basename }} src={{ item }} fstype=xfs opts="noatime,nodiratime,nobarrier,logbufs=8" state=mounted
59 | with_items: "{{ swift_storage_devices }}"
60 | when: not ansible_check_mode
61 |
62 | - name: fix permissions
63 | file: dest={{ swift_storage_dir }} owner=swift group=swift mode=0775 recurse=yes
64 | when: not ansible_check_mode
65 |
66 | - name: copy ring config
67 | tags: copy
68 | copy: src={{ inventory_dir }}/workdir/{{ item }} dest=/etc/swift/{{ item }}
69 | with_items:
70 | - account.ring.gz
71 | - container.ring.gz
72 | - object.ring.gz
73 |
74 | - name: start services
75 | tags: service
76 | service: name={{ item }} state=restarted enabled=yes
77 | when: not ansible_check_mode
78 | with_items:
79 | - rsyslog
80 | - openstack-swift-account
81 | - openstack-swift-account-auditor
82 | - openstack-swift-account-reaper
83 | - openstack-swift-account-replicator
84 | - openstack-swift-container
85 | - openstack-swift-container-auditor
86 | - openstack-swift-container-replicator
87 | - openstack-swift-container-updater
88 | - openstack-swift-object
89 | - openstack-swift-object-auditor
90 | - openstack-swift-object-replicator
91 | - openstack-swift-object-updater
92 |
--------------------------------------------------------------------------------
/roles/nova/tasks/controller.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-nova-api
6 | - openstack-nova-conductor
7 | - openstack-nova-console
8 | - openstack-nova-novncproxy
9 | - openstack-nova-scheduler
10 | - openstack-nova-placement-api
11 |
12 | - name: install nova configuration
13 | tags: config
14 | template: src={{ item }}.j2 dest=/etc/nova/{{ item }}
15 | with_items:
16 | - nova.conf
17 |
18 | - name: install fixed httpd configuration
19 | tags: config
20 | copy: src=00-nova-placement-api.conf dest=/etc/httpd/conf.d/00-nova-placement-api.conf
21 |
22 | - name: restart httpd
23 | service: name=httpd state=restarted enabled=yes
24 | when: not ansible_check_mode
25 |
26 | - name: copy db create template
27 | template: src=novadb.sql.j2 dest=/tmp/novadb.sql
28 |
29 | - block:
30 | - name: create database
31 | shell : mysql -u root < /tmp/novadb.sql
32 |
33 | - name: remove sql
34 | file: path=/tmp/novadb.sql state=absent
35 |
36 | - name: list users
37 | command: "openstack user list"
38 | register: ulist
39 |
40 | - block:
41 | - command: "openstack user create --domain default --password {{ nova_password }} nova"
42 | - command: "openstack role add --project service --user nova admin"
43 | - command: "openstack service create --name nova --description 'OpenStack Compute' compute"
44 | - command: "openstack endpoint create --region RegionOne compute {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8774/v2.1"
45 | with_items:
46 | - internal
47 | - public
48 | - admin
49 | - command: "{{ item }}"
50 | with_items:
51 | - "openstack user create --domain default --password {{ nova_placement_password }} placement"
52 | - "openstack role add --project service --user placement admin"
53 | - command: "openstack service create --name placement --description 'Placement API' placement"
54 | - command: "openstack endpoint create --region RegionOne placement {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8778"
55 | with_items:
56 | - internal
57 | - public
58 | - admin
59 | when: not ansible_check_mode and ulist.stdout.find("nova") < 0
60 |
61 | - name: run nova db sync
62 | command: nova-manage {{ item }}
63 | become: true
64 | become_user: nova
65 | with_items:
66 | - "api_db sync"
67 | - "cell_v2 map_cell0"
68 | - "cell_v2 create_cell --name=cell1 --verbose"
69 | - "db sync"
70 | when: not ansible_check_mode
71 | run_once: true
72 |
73 | - name: start services
74 | tags: service
75 | service: name={{ item }} state=restarted enabled=yes
76 | when: not ansible_check_mode
77 | with_items:
78 | - openstack-nova-api
79 | - openstack-nova-consoleauth
80 | - openstack-nova-scheduler
81 | - openstack-nova-conductor
82 | - openstack-nova-novncproxy
83 |
84 | - name: verify operation
85 | shell: nova-manage cell_v2 list_cells | grep '^| cell0'
86 |
--------------------------------------------------------------------------------
/roles/swift/templates/proxy-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | bind_port = 8080
3 | swift_dir = /etc/swift
4 | user = swift
5 |
6 | [pipeline:main]
7 | pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging ceilometer proxy-server
8 |
9 | [app:proxy-server]
10 | use = egg:swift#proxy
11 | account_autocreate = true
12 |
13 | [filter:tempauth]
14 | use = egg:swift#tempauth
15 | user_admin_admin = admin .admin .reseller_admin
16 | user_test_tester = testing .admin
17 | user_test2_tester2 = testing2 .admin
18 | user_test_tester3 = testing3
19 | user_test5_tester5 = testing5 service
20 |
21 | [filter:authtoken]
22 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
23 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
24 | paste.filter_factory = keystonemiddleware.auth_token:filter_factory
25 | auth_type = password
26 | project_domain_name = default
27 | user_domain_name = default
28 | project_name = service
29 | username = swift
30 | password = {{ swift_password }}
31 | delay_auth_decision = True
32 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
33 |
34 | {% if groups['ceilometer-controller']|count > 0 %}
35 | [filter:ceilometer]
36 | paste.filter_factory = ceilometermiddleware.swift:filter_factory
37 | control_exchange = swift
38 | url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
39 |
40 | driver = messagingv2
41 | topic = notifications
42 | log_level = WARN
43 | {% endif %}
44 |
45 | [filter:keystoneauth]
46 | use = egg:swift#keystoneauth
47 | operator_roles = admin, swiftoperator, user, ResellerAdmin
48 |
49 | [filter:healthcheck]
50 | use = egg:swift#healthcheck
51 |
52 | [filter:cache]
53 | use = egg:swift#memcache
54 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
55 |
56 | [filter:ratelimit]
57 | use = egg:swift#ratelimit
58 |
59 | [filter:domain_remap]
60 | use = egg:swift#domain_remap
61 |
62 | [filter:catch_errors]
63 | use = egg:swift#catch_errors
64 |
65 | [filter:cname_lookup]
66 | use = egg:swift#cname_lookup
67 |
68 | [filter:staticweb]
69 | use = egg:swift#staticweb
70 |
71 | [filter:tempurl]
72 | use = egg:swift#tempurl
73 |
74 | [filter:formpost]
75 | use = egg:swift#formpost
76 |
77 | [filter:name_check]
78 | use = egg:swift#name_check
79 |
80 | [filter:list-endpoints]
81 | use = egg:swift#list_endpoints
82 |
83 | [filter:proxy-logging]
84 | use = egg:swift#proxy_logging
85 |
86 | [filter:bulk]
87 | use = egg:swift#bulk
88 |
89 | [filter:slo]
90 | use = egg:swift#slo
91 |
92 | [filter:dlo]
93 | use = egg:swift#dlo
94 |
95 | [filter:container-quotas]
96 | use = egg:swift#container_quotas
97 |
98 | [filter:account-quotas]
99 | use = egg:swift#account_quotas
100 |
101 | [filter:gatekeeper]
102 | use = egg:swift#gatekeeper
103 |
104 | [filter:container_sync]
105 | use = egg:swift#container_sync
106 |
107 | [filter:xprofile]
108 | use = egg:swift#xprofile
109 |
110 | [filter:versioned_writes]
111 | use = egg:swift#versioned_writes
112 |
--------------------------------------------------------------------------------
/roles/nova/templates/nova.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | transport_url = rabbit://{% for host in groups['controller'] %}openstack:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
4 |
5 | my_ip = {{ ansible_default_ipv4['address'] }}
6 | network_api_class = nova.network.neutronv2.api.API
7 | security_group_api = neutron
8 | linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
9 | firewall_driver = nova.virt.firewall.NoopFirewallDriver
10 | enabled_apis=osapi_compute,metadata
11 | instance_usage_audit = True
12 | instance_usage_audit_period = hour
13 | notify_on_state_change = vm_and_task_state
14 | use_neutron = True
15 | public_interface={{ public_interface_name }}
16 |
17 | {% if virt_type|default('kvm') == 'lxc' %}
18 | use_cow_images=False
19 | {% endif %}
20 |
21 | [database]
22 | connection = mysql+pymysql://nova:{{ novadb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/nova
23 |
24 | [api_database]
25 | connection = mysql+pymysql://nova:{{ novadb_password }}@{{ internal_vip_address|default(groups['controller'][0]) }}/nova_api
26 |
27 | [cache]
28 | backend = oslo_cache.memcache_pool
29 | enabled = True
30 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
31 |
32 | [glance]
33 | api_servers = http://{{ internal_vip_address|default(groups['controller'][0]) }}:9292
34 |
35 | [keystone_authtoken]
36 | auth_uri = http://{{ internal_vip_address|default(groups['controller'][0]) }}:5000
37 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
38 | auth_type = password
39 | project_domain_name = default
40 | user_domain_name = default
41 | project_name = service
42 | username = nova
43 | password = {{ nova_password }}
44 | memcached_servers = {% for host in groups['controller'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211{% if not loop.last %},{% endif %}{% endfor %}
45 |
46 |
47 | [libvirt]
48 | virt_type = {{ virt_type|default('kvm') }}
49 | hw_machine_type = x86_64=pc-i440fx-rhel7.2.0
50 |
51 | [neutron]
52 | url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:9696
53 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357
54 | auth_strategy = keystone
55 | auth_type = password
56 | project_domain_name = default
57 | user_domain_name = default
58 | region_name = RegionOne
59 | project_name = service
60 | admin_tenant_name = service
61 | username = neutron
62 | password = {{ neutron_password }}
63 | {% if ansible_hostname in groups['nova-controller'] %}
64 | service_metadata_proxy = True
65 | metadata_proxy_shared_secret = {{ metadata_secret }}
66 | {% endif %}
67 |
68 | [oslo_messaging_notifications]
69 | driver = messagingv2
70 |
71 | [vnc]
72 | {% if ansible_hostname in groups['nova'] %}
73 | enabled = True
74 | {% endif %}
75 | vncserver_listen = $my_ip
76 | vncserver_proxyclient_address = $my_ip
77 | novncproxy_base_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:6080/vnc_auto.html
78 |
79 | [placement]
80 | os_region_name = RegionOne
81 | project_domain_name = Default
82 | project_name = service
83 | auth_type = password
84 | user_domain_name = Default
85 | auth_url = http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357/v3
86 | username = placement
87 | password = {{ nova_placement_password }}
88 |
89 | [scheduler]
90 | discover_hosts_in_cells_interval = 120
91 |
--------------------------------------------------------------------------------
/site.yml:
--------------------------------------------------------------------------------
1 | - name: prepare
2 | hosts: all
3 | tags: prepare
4 | roles:
5 | - prepare
6 | environment: "{{ openstack_env }}"
7 |
8 | - name: ha
9 | hosts: controller
10 | tags: ha
11 | roles:
12 | - { role: ha, when: "{{ groups['controller']|count > 1 }}" }
13 | environment: "{{ openstack_env }}"
14 |
15 | - name: install database
16 | hosts: database
17 | tags: database
18 | roles:
19 | - database
20 | environment: "{{ openstack_env }}"
21 |
22 | - name: install keystone identity service
23 | hosts: keystone
24 | tags: keystone
25 | roles:
26 | - keystone
27 | environment: "{{ openstack_env }}"
28 |
29 | - name: install glance image service
30 | hosts: glance
31 | tags: glance
32 | roles:
33 | - glance
34 | environment: "{{ openstack_env }}"
35 |
36 | - name: install cinder controller service
37 | hosts: cinder-controller
38 | tags: cinder-controller
39 | roles:
40 | - { role: cinder, controller: true }
41 | environment: "{{ openstack_env }}"
42 |
43 | - name: install cinder storage service
44 | hosts: cinder
45 | tags: cinder
46 | roles:
47 | - cinder
48 | environment: "{{ openstack_env }}"
49 |
50 | - name: install nova controller service
51 | hosts: nova-controller
52 | tags: nova-controller
53 | roles:
54 | - { role: nova, controller: true }
55 | environment: "{{ openstack_env }}"
56 |
57 | - name: install nova compute service
58 | hosts: nova
59 | tags: nova
60 | roles:
61 | - nova
62 | environment: "{{ openstack_env }}"
63 |
64 | - name: discover compute hosts
65 | hosts: nova-controller
66 | tags: nova-controller
67 | tasks:
68 | - command: nova-manage cell_v2 discover_hosts --verbose
69 | become_user: nova
70 | become: true
71 | run_once: true
72 | when: not ansible_check_mode
73 | environment: "{{ openstack_env }}"
74 |
75 | - name: install ceilometer controller service
76 | hosts: ceilometer-controller
77 | tags: ceilometer-controller
78 | roles:
79 | - { role: ceilometer, controller: true }
80 | environment: "{{ openstack_env }}"
81 |
82 | - name: install ceilometer service
83 | hosts: ceilometer-nova
84 | tags: ceilometer-nova
85 | roles:
86 | - ceilometer
87 | environment: "{{ openstack_env }}"
88 |
89 | - name: install neutron controller service
90 | hosts: neutron-controller
91 | tags: neutron-controller
92 | roles:
93 | - { role: neutron, controller: true }
94 | environment: "{{ openstack_env }}"
95 |
96 | - name: install neutron service
97 | hosts: neutron
98 | tags: neutron
99 | roles:
100 | - neutron
101 | environment: "{{ openstack_env }}"
102 |
103 | - name: install heat orchestration service
104 | hosts: heat
105 | tags: heat
106 | roles:
107 | - heat
108 | environment: "{{ openstack_env }}"
109 |
110 | - name: install swift controller storage
111 | hosts: swift-controller
112 | tags: swift-controller
113 | roles:
114 | - { role: swift, controller: true }
115 | environment: "{{ openstack_env }}"
116 |
117 | - name: install swift object storage
118 | hosts: swift
119 | tags: swift
120 | roles:
121 | - swift
122 | environment: "{{ openstack_env }}"
123 |
124 | - name: install horizon dashboard
125 | hosts: horizon
126 | tags: horizon
127 | roles:
128 | - horizon
129 | environment: "{{ openstack_env }}"
130 |
131 | - name: run tests
132 | hosts: controller
133 | tags: test
134 | roles:
135 | - tests
136 | environment: "{{ openstack_env }}"
137 |
--------------------------------------------------------------------------------
/group_vars/all:
--------------------------------------------------------------------------------
1 | openstack_release: pike
2 |
3 | destroy_data: True
4 | root_ssh_key: ''
5 |
6 | # openstack admin and demo passwords
7 | admin_password: admin
8 | demo_password: demo
9 |
10 | # root password for mariadb database
11 | mariadb_password: maria
12 |
13 | # rabbitmq password
14 | rabbitmq_password: openstack
15 |
16 | # keystone database password
17 | keystonedb_password: keystone
18 |
19 | # glance database password
20 | glancedb_password: glance
21 | glance_password: glance
22 |
23 | # cinder database password
24 | cinderdb_password: cinder
25 | cinder_password: cinder
26 |
27 | # the required configuration, please provide empty partition device
28 | # see also http://docs.openstack.org/ocata/install-guide-rdo/cinder.html
29 | cinder_dev: /dev/sda3
30 |
31 | # nova database password
32 | novadb_password: nova
33 | nova_password: nova
34 | nova_placement_password: nova_place
35 |
36 | # virt type, kvm is default and the playbook test role is supposed to run with kvm.
37 | # can also be specified as host variable
38 | #virt_type: kvm
39 |
40 | # neutroun database password
41 | neutrondb_password: neutron
42 | neutron_password: neutron
43 |
44 | # heat database password
45 | heatdb_password: heat
46 | heat_domain_password: heat
47 | heat_password: heat
48 |
49 | # metadata password
50 | metadata_secret: top_secret
51 |
52 | # aodh database password
53 | aodhdb_password: aodh
54 | aodh_domain_password: aodh
55 | aodh_password: aodh
56 |
57 | # ceilometer database password
58 | ceilometerdb_password: ceilometer
59 | ceilometer_password: ceilometer
60 |
61 | # swift settings
62 | swift_password: swift
63 | swift_hash_path_suffix: 39fe2de19b
64 | swift_hash_path_prefix: 7a2cac670f
65 |
66 | swift_storage_dir: /srv/node
67 | # important note:
68 | # file or a device can be used, if a device ( started with '/dev' ) it must be not mounted
69 | swift_storage_devices:
70 | - /srv/swiftfs1.img
71 |
72 | swift_device_size: 1G
73 | swift_replica_count: 1
74 |
75 | # provider network settings
76 | provider_network: public
77 | provider_nameserver: 8.8.8.8
78 |
79 | # the vip address for High available configurations. Required when more than 1 controller node specified
80 | #internal_vip_address: 192.168.168.234
81 |
82 | # the required configuration, please supply appropriate configuration
83 | # see also http://docs.openstack.org/ocata/install-guide-rdo/launch-instance-networks-public.html
84 |
85 | public_interface_name: eth1
86 | provider_allocation_pool: start=192.168.24.190,end=192.168.24.209
87 | provider_gateway: 192.168.24.254
88 | provider_network_cidr: 192.168.24.0/24
89 |
90 | #provider_allocation_pool: start=192.168.168.190,end=192.168.168.199
91 | #provider_gateway: 192.168.168.1
92 | #provider_network_cidr: 192.168.168.0/24
93 |
94 | selfservice_network: private
95 | selfservice_nameserver: 8.8.8.8
96 | selfservice_network_cidr: 192.168.167.0/24
97 | selfservice_gateway: 192.168.167.1
98 |
99 | # openstack environment, please do not change, unless you read the playbook and understand consequences.
100 | openstack_env:
101 | OS_IDENTITY_API_VERSION: 3
102 | OS_URL: http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357/v3
103 | OS_AUTH_URL: http://{{ internal_vip_address|default(groups['controller'][0]) }}:35357/v3
104 | OS_PROJECT_DOMAIN_NAME: default
105 | OS_USER_DOMAIN_NAME: default
106 | OS_PROJECT_NAME: admin
107 | OS_TENANT_NAME: admin
108 | OS_USERNAME: admin
109 | OS_PASSWORD: "{{ admin_password }}"
110 | OS_IMAGE_API_VERSION: 2
111 | OS_AUTH_VERSION: 3
112 | OS_REGION_NAME: RegionOne
113 |
--------------------------------------------------------------------------------
/roles/swift/tasks/controller.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: package
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-swift-proxy
6 | - python-swiftclient
7 | - python-keystoneclient
8 | - python-keystonemiddleware
9 | - memcached
10 | - python-ceilometermiddleware
11 |
12 | - name: install swift configuration
13 | tags: config
14 | template: src={{ item }}.j2 dest=/etc/swift/{{ item }}
15 | with_items:
16 | - proxy-server.conf
17 | - swift.conf
18 | - object-expirer.conf
19 |
20 | - name: list users
21 | command: "openstack user list"
22 | register: ulist
23 |
24 | - block:
25 | - command: "openstack user create --domain default --password {{ swift_password }} swift"
26 | - command: "openstack role add --project service --user swift admin"
27 | - command: "openstack service create --name swift --description 'OpenStack Object Storage service' object-store"
28 | - command: "openstack role create ResellerAdmin"
29 | - command: "openstack role add --project service --user ceilometer ResellerAdmin"
30 | - command: "openstack endpoint create --region RegionOne object-store {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:8080/v1/AUTH_%(tenant_id)s"
31 | with_items:
32 | - internal
33 | - public
34 | - admin
35 | when: not ansible_check_mode and ulist.stdout.find("swift") < 0
36 |
37 | - name: create the base account.builder
38 | command: swift-ring-builder account.builder create 10 {{ swift_replica_count }} 1 chdir=/etc/swift
39 |
40 | - name: create the base container.builder
41 | command: swift-ring-builder container.builder create 10 {{ swift_replica_count }} 1 chdir=/etc/swift
42 |
43 | - name: create the base object.builder
44 | command: swift-ring-builder object.builder create 10 {{ swift_replica_count }} 1 chdir=/etc/swift
45 |
46 | - name: add each storage node to the ring - account.builder
47 | command: swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ hostvars[item[0]].ansible_default_ipv4['address'] }} --port 6002 --device {{ item[1]|basename }} --weight 100 chdir=/etc/swift
48 | with_nested:
49 | - "{{ groups['swift'] }}"
50 | - "{{ swift_storage_devices }}"
51 | - name: add each storage node to the ring - container.builder
52 | command: swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ hostvars[item[0]].ansible_default_ipv4['address'] }} --port 6001 --device {{ item[1]|basename }} --weight 100 chdir=/etc/swift
53 | with_nested:
54 | - "{{ groups['swift'] }}"
55 | - "{{ swift_storage_devices }}"
56 | - name: add each storage node to the ring - object.builder
57 | command: swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ hostvars[item[0]].ansible_default_ipv4['address'] }} --port 6000 --device {{ item[1]|basename }} --weight 100 chdir=/etc/swift
58 | with_nested:
59 | - "{{ groups['swift'] }}"
60 | - "{{ swift_storage_devices }}"
61 |
62 | - name: rebalance rings
63 | command: swift-ring-builder {{ item }} rebalance chdir=/etc/swift
64 | with_items:
65 | - account.builder
66 | - container.builder
67 | - object.builder
68 |
69 | - name: fetch files
70 | tags: fetch
71 | fetch: src=/etc/swift/{{ item }} dest={{ inventory_dir }}/workdir/{{ item }} flat=yes
72 | with_items:
73 | - account.ring.gz
74 | - container.ring.gz
75 | - object.ring.gz
76 | #- name: debuggin
77 | # tags: feat
78 | # debug: msg="{{ hostvars[item].ansible_default_ipv4['address'] }}"
79 | # with_items: "{{ groups['swift'] }}"
80 |
81 | - name: start services
82 | tags: service
83 | service: name={{ item }} state=restarted enabled=yes
84 | when: not ansible_check_mode
85 | with_items:
86 | - rsyslog
87 | - openstack-swift-proxy
88 | - memcached
89 |
--------------------------------------------------------------------------------
/roles/neutron/tasks/controller.yml:
--------------------------------------------------------------------------------
1 | - name: install packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - openstack-neutron
6 | - openstack-neutron-ml2
7 | - openstack-neutron-linuxbridge
8 | - openstack-neutron-lbaas
9 | - python-neutronclient
10 | - ebtables
11 | - ipset
12 |
13 | - name: install configurations
14 | tags: config
15 | template: src=neutron.conf.j2 dest=/etc/neutron/neutron.conf
16 | - template: src=ml2_conf.ini.j2 dest=/etc/neutron/plugins/ml2/ml2_conf.ini
17 | - name: install configurations
18 | tags: config
19 | template: src=linuxbridge_agent.ini.j2 dest=/etc/neutron/plugins/ml2/linuxbridge_agent.ini
20 | - copy: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
21 | - copy: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
22 | - template: src=neutron_lbaas.conf.j2 dest=/etc/neutron/neutron_lbaas.conf
23 | - template: src=metadata_agent.ini.j2 dest=/etc/neutron/metadata_agent.ini
24 | - file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
25 | when: not ansible_check_mode
26 | - copy: src=dnsmasq-neutron.conf dest=/etc/neutron/dnsmasq-neutron.conf
27 |
28 | - block:
29 | - name: copy db create template
30 | template: src=neutrondb.sql.j2 dest=/tmp/neutrondb.sql
31 |
32 | - name: create database
33 | shell : mysql -u root < /tmp/neutrondb.sql
34 |
35 | - name: remove sql
36 | file: path=/tmp/neutrondb.sql state=absent
37 |
38 | - name: list users
39 | command: "openstack user list"
40 | register: ulist
41 |
42 | - block:
43 | - command: "openstack user create --domain default --password {{ neutron_password }} neutron"
44 | - command: "openstack role add --project service --user neutron admin"
45 | - command: "openstack service create --name neutron --description 'OpenStack Networking' network"
46 | - command: "openstack endpoint create --region RegionOne network {{ item }} http://{{ internal_vip_address|default(groups['controller'][0]) }}:9696"
47 | with_items:
48 | - internal
49 | - public
50 | - admin
51 | when: not ansible_check_mode and ulist.stdout.find("neutron") < 0
52 |
53 | - name: bootstrap database
54 | command: neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
55 | become: true
56 | become_user: neutron
57 | when: not ansible_check_mode
58 | run_once: true
59 |
60 | - name: start services
61 | tags: service
62 | service: name={{ item }} state=restarted enabled=yes
63 | when: not ansible_check_mode
64 | with_items:
65 | - openstack-nova-api
66 | - neutron-server
67 | - neutron-linuxbridge-agent
68 | - neutron-dhcp-agent
69 | - neutron-metadata-agent
70 | - neutron-l3-agent
71 |
72 | - block:
73 | - name: list networks
74 | command: "neutron net-list"
75 | register: ulist
76 |
77 | - block:
78 | - command: "neutron net-create --shared --provider:physical_network {{ provider_network }} --provider:network_type flat {{ provider_network }} --router:external True"
79 | - command: "neutron subnet-create --name {{ provider_network }} --allocation-pool {{ provider_allocation_pool }} --dns-nameserver {{ provider_nameserver }} --gateway {{ provider_gateway }} {{ provider_network }} {{ provider_network_cidr }}"
80 | - command: "neutron net-create {{ selfservice_network }}"
81 | - command: "neutron subnet-create --name {{ selfservice_network }} --dns-nameserver {{ selfservice_nameserver }} --gateway {{ selfservice_gateway }} {{ selfservice_network }} {{ selfservice_network_cidr }}"
82 | - command: "neutron router-create router"
83 | - command: "neutron router-interface-add router {{ selfservice_network }}"
84 | - command: "neutron router-gateway-set router {{ provider_network }}"
85 | tags: net
86 | become: true
87 | become_user: neutron
88 | when: not ansible_check_mode and ulist.stdout.find("private") < 0
89 | run_once: true
90 |
--------------------------------------------------------------------------------
/roles/database/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install mysql packages
2 | tags: packages
3 | package: name={{ item }} state=latest
4 | with_items:
5 | - mariadb
6 | - mariadb-server-galera
7 | - MySQL-python
8 | - percona-xtrabackup
9 |
10 | - name: install mariadb config
11 | tags: config
12 | template: src=my.cnf.j2 dest=/etc/my.cnf
13 |
14 | - name: destroy mariadb data
15 | shell: service mariadb stop; rm -rf /var/lib/mysql/{*,.sst}
16 | when: destroy_data
17 | args:
18 | warn: false
19 |
20 | - name: start new galera cluster
21 | tags: service
22 | command: galera_new_cluster
23 | when: not ansible_check_mode and groups['database'].index(inventory_hostname) == 0 and groups['database']|count > 1
24 |
25 | - name: start mariadb
26 | tags: service
27 | service: name=mariadb state=restarted enabled=yes
28 | when: not ansible_check_mode and ( groups['database'].index(inventory_hostname) > 0 or groups['database']|count < 2 )
29 |
30 | - name: test mariadb
31 | tags: test
32 | shell: mysql -u root -e "SHOW STATUS LIKE 'wsrep_cluster_size'" -Ns | cut -f2
33 | register: wsrep_cluster_size
34 |
35 | - name: check cluster size
36 | tags: test
37 | debug: msg="mariadb cluster size is {{ wsrep_cluster_size.stdout }}"
38 | failed_when: groups['database']|count != {{ wsrep_cluster_size.stdout }}
39 | when: not ansible_check_mode
40 |
41 | # rabbitmq
42 | - name: install rabbitmq packages
43 | tags: packages
44 | package: name=rabbitmq-server state=latest
45 |
46 | - name: destroy rabbitmq data
47 | shell: service rabbitmq-server stop; rm -rf /var/lib/rabbitmq/{*,.er*}
48 | when: destroy_data
49 | args:
50 | warn: false
51 |
52 | - name: start rabbitmq
53 | tags: service
54 | service: name=rabbitmq-server state=restarted enabled=yes
55 | when: not ansible_check_mode
56 |
57 | - name: get rabbitmq users
58 | command: rabbitmqctl list_users
59 | register: rabbitmq_users
60 |
61 | - name: add rabbitmq user
62 | command: rabbitmqctl add_user openstack {{ rabbitmq_password }}
63 | when: not ansible_check_mode and rabbitmq_users.stdout.find('openstack') == -1
64 |
65 | - name: set rabbitmq permissions
66 | command: rabbitmqctl set_permissions openstack ".*" ".*" ".*"
67 |
68 | - block:
69 | - name: stop rabbitmq
70 | service: name=rabbitmq-server state=stopped enabled=yes
71 | when: not ansible_check_mode
72 |
73 | - name: fetch cookie from {{ groups['database'][0] }}
74 | fetch: src=/var/lib/rabbitmq/.erlang.cookie dest={{ inventory_dir }}/workdir/.erlang.cookie flat=yes
75 | when: groups['database'].index(inventory_hostname) == 0
76 |
77 | - name: put cookie on cluster nodes
78 | copy: dest=/var/lib/rabbitmq/.erlang.cookie src={{ inventory_dir }}/workdir/.erlang.cookie owner=rabbitmq group=rabbitmq mode=400
79 | when: groups['database'].index(inventory_hostname) > 0
80 |
81 | - name: start rabbitmq
82 | service: name=rabbitmq-server state=started enabled=yes
83 | when: not ansible_check_mode
84 |
85 | - name: create rabbitmq cluster
86 | command: rabbitmqctl {{ item }}
87 | with_items:
88 | - stop_app
89 | - join_cluster --ram rabbit@{{ groups['database'][0] }}
90 | - start_app
91 | when: groups['database'].index(inventory_hostname) > 0
92 |
93 | - name: ensure ha policy
94 | command: "rabbitmqctl set_policy ha-all '^(?!amq\\.).*' '{\"ha-mode\": \"all\"}'"
95 | when: groups['database'].index(inventory_hostname) == 0
96 |
97 | when: groups['database']|count > 1
98 |
99 | - name: check rabbitmq cluster
100 | tags: test
101 | shell: rabbitmqctl cluster_status | grep running_nodes | grep {{ item }}
102 | with_items: "{{ groups['database'] }}"
103 |
104 | # memcached
105 | - name: install memcached packages
106 | tags: packages
107 | package: name=memcached state=latest
108 |
109 | - name: configure memcached
110 | template: src=memcached.j2 dest=/etc/sysconfig/memcached
111 |
112 | - name: start memcached
113 | tags: service
114 | service: name=memcached state=restarted enabled=yes
115 | when: not ansible_check_mode
116 |
117 | - name: install mongodb packages
118 | tags: packages
119 | package: name={{ item }} state=latest
120 | with_items:
121 | - mongodb
122 | - mongodb-server
123 |
124 | - name: install mongod config
125 | tags: config
126 | copy: src=mongod.conf dest=/etc/mongod.conf
127 |
128 | - name: start mongodb
129 | service: name=mongod state=restarted enabled=yes
130 | when: not ansible_check_mode
131 |
--------------------------------------------------------------------------------
/roles/neutron/files/dhcp_agent.ini:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
4 | dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
5 | enable_isolated_metadata = True
6 |
7 | verbose = True
8 |
9 | dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
10 |
11 | # Show debugging output in log (sets DEBUG log level output)
12 | # debug = False
13 |
14 | # The DHCP agent will resync its state with Neutron to recover from any
15 | # transient notification or rpc errors. The interval is number of
16 | # seconds between attempts.
17 | # resync_interval = 5
18 |
19 | # The DHCP agent requires an interface driver be set. Choose the one that best
20 | # matches your plugin.
21 | # interface_driver =
22 |
23 | # Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
24 | # BigSwitch/Floodlight)
25 | # interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
26 |
27 | # Name of Open vSwitch bridge to use
28 | # ovs_integration_bridge = br-int
29 |
30 | # Use veth for an OVS interface or not.
31 | # Support kernels with limited namespace support
32 | # (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
33 | # ovs_use_veth = False
34 |
35 | # Example of interface_driver option for LinuxBridge
36 | # interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
37 |
38 | # The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
39 | # no additional setup of the DHCP server.
40 | # dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
41 |
42 | # Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
43 | # iproute2 package that supports namespaces). This option is deprecated and
44 | # will be removed in a future release, at which point the old behavior of
45 | # use_namespaces = True will be enforced.
46 | # use_namespaces = True
47 |
48 | # In some cases the neutron router is not present to provide the metadata
49 | # IP but the DHCP server can be used to provide this info. Setting this
50 | # value will force the DHCP server to append specific host routes to the
51 | # DHCP request. If this option is set, then the metadata service will be
52 | # activated for all the networks.
53 | # force_metadata = False
54 |
55 | # The DHCP server can assist with providing metadata support on isolated
56 | # networks. Setting this value to True will cause the DHCP server to append
57 | # specific host routes to the DHCP request. The metadata service will only
58 | # be activated when the subnet does not contain any router port. The guest
59 | # instance must be configured to request host routes via DHCP (Option 121).
60 | # This option doesn't have any effect when force_metadata is set to True.
61 | # enable_isolated_metadata = False
62 |
63 | # Allows for serving metadata requests coming from a dedicated metadata
64 | # access network whose cidr is 169.254.169.254/16 (or larger prefix), and
65 | # is connected to a Neutron router from which the VMs send metadata
66 | # request. In this case DHCP Option 121 will not be injected in VMs, as
67 | # they will be able to reach 169.254.169.254 through a router.
68 | # This option requires enable_isolated_metadata = True
69 | # enable_metadata_network = False
70 |
71 | # Number of threads to use during sync process. Should not exceed connection
72 | # pool size configured on server.
73 | # num_sync_threads = 4
74 |
75 | # Location to store DHCP server config files
76 | # dhcp_confs = $state_path/dhcp
77 |
78 | # Domain to use for building the hostnames. This option will be deprecated in
79 | # a future release. It is being replaced by dns_domain in neutron.conf
80 | # dhcp_domain = openstacklocal
81 |
82 | # Override the default dnsmasq settings with this file
83 | # dnsmasq_config_file =
84 |
85 | # Comma-separated list of DNS servers which will be used by dnsmasq
86 | # as forwarders.
87 | # dnsmasq_dns_servers =
88 |
89 | # Base log dir for dnsmasq logging. The log contains DHCP and DNS log
90 | # information and is useful for debugging issues with either DHCP or DNS.
91 | # If this section is null, disable dnsmasq log.
92 | # dnsmasq_base_log_dir =
93 |
94 | # Limit number of leases to prevent a denial-of-service.
95 | # dnsmasq_lease_max = 16777216
96 |
97 | # Location to DHCP lease relay UNIX domain socket
98 | # dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
99 |
100 | # Use broadcast in DHCP replies
101 | # dhcp_broadcast_reply = False
102 |
103 | # dhcp_delete_namespaces, which is True by default, can be set to False if
104 | # namespaces can't be deleted cleanly on the host running the DHCP agent.
105 | # Disable this if you hit the issue in
106 | # https://bugs.launchpad.net/neutron/+bug/1052535 or if
107 | # you are sure that your version of iproute suffers from the problem.
108 | # This should not be a problem any more. Refer to bug:
109 | # https://bugs.launchpad.net/neutron/+bug/1418079
110 | # This option is deprecated and will be removed in the M release
111 | # dhcp_delete_namespaces = True
112 |
113 | # Timeout for ovs-vsctl commands.
114 | # If the timeout expires, ovs commands will fail with ALARMCLOCK error.
115 | # ovs_vsctl_timeout = 10
116 |
117 | [AGENT]
118 | # Log agent heartbeats from this DHCP agent
119 | # log_agent_heartbeats = False
120 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Ansible Playbook: Openstack
2 |
3 | An ansible playbook to deploy openstack components to a cluster.
4 | # Overview
5 | Initially the playbook was composed with a primary purpose to learn openstack deployment in a nutshell.
6 |
7 | As the project succesfully passes test environment the goal is changed to fill the gap between [install from source](https://github.com/openstack/openstack-ansible) and
8 | [docker deployment](https://github.com/openstack/kolla-ansible) , i.e. to create a deployment on bare metal hosts from official packages repository without containers and therfore eliminate addition level of related complexity.
9 |
10 | At the current state, the playbook is able to deploy a fully functional openstack cluster(see below).
11 | Also it's possible to deploy everything on a single(VM) host.
12 |
13 | You are welcomed to read the playbook and feedback pull requests and suggestions :)
14 |
15 | #### Basic high availability features implemented for controller/infrastructure services:
16 | * MariaDB galera cluster
17 | * RabbitMQ cluster
18 | * Memcached service
19 | * VIP cluster address managed by keepalived
20 |
21 | #### So if more than one controller node configured, seemless failover is expected for:
22 | * keystone
23 | * glance
24 | * cinder controller
25 | * nova controller
26 | * neutron controller
27 | * heat
28 | * horizon
29 |
30 | # Description
31 | #### The playbook is able to setup the core services described in the [official guide](https://docs.openstack.org/install-guide/openstack-services.html#):
32 | * [**keystone**](https://docs.openstack.org/keystone/latest/)
33 | * [**glance**](https://docs.openstack.org/glance/latest/)
34 | * [**cinder**](https://docs.openstack.org/cinder/latest/)
35 | * [**nova**](https://docs.openstack.org/cinder/latest/)
36 | * [**neutron**](https://docs.openstack.org/neutron/latest/)
37 | * [**heat**](https://docs.openstack.org/heat/latest/)
38 | * [**swift**](https://docs.openstack.org/swift/latest/)
39 | * [**ceilometer**](https://docs.openstack.org/ceilometer/latest/)
40 | * [**horizon**](https://docs.openstack.org/horizon/latest/)
41 |
42 | The configuration is _very_ simple:
43 |
44 | It’s only required to place hostname(s) to the **controller** and **compute** groups in [hosts](hosts) file and carefully fill the required
45 | [group_vars](group_vars/all) parameters.
46 |
47 | The playbook contains configuration files in roles directories. If you need to add or change any parameter you can edit
48 | the configuration file which can be found in **roles/_service_/[files|templates]** directory.
49 |
50 | Besides of cluster( or single host ) setup, the playbook also generates cluster manager configuration file located at **workdir/services.xml**.
51 | Please visit [clinit manager home page](https://github.com/sergevs/clinit) and see
52 | [manual](https://github.com/sergevs/clinit/wiki). The rpm package can be downloaded from [clinit-1.0-ssv1.el6.noarch.rpm](https://github.com/sergevs/clinit/releases/download/1.0/clinit-1.0-ssv1.el6.noarch.rpm).
53 | After clinit package installed you’ll be able to stop, start and see status of services on the cluster.
54 |
55 | # Configuration
56 | Services configuration performed using the hosts and variables files.
57 | #### Hosts file:
58 | The empty file is supplied with the playbook. Please examine [hosts](hosts) and supply appropriate host names.
59 | **You must not remove any existing group**. Leave the group empty if you don't need services the group configures. The same hostname can be placed to any hosts group.
60 | As an instance if you want setup everything on one host, just put the same hostname to each hosts group.
61 | As far, only **controller** and **compute** groups are well tested and supported.
62 |
63 | #### Variables file:
64 | Please examine [group_vars/all](group_vars/all) and supply appropriate configuration for your network environment and disk storage parameters.
65 |
66 | # Usage
67 | ## Prepare,verifty repositories configuration and perform a basic check:
68 |
69 | ansible-playbook -i hosts -t prepare site.yml
70 | ansible-playbook -i hosts -t check site.yml
71 |
72 | ## Deployment:
73 |
74 | ansible-playbook -i hosts site.yml
75 |
76 | if you have installed clinit, after deployment you can also run:
77 |
78 | clinit -S workdir/services.xml status
79 | clinit -S workdir/services.xml tree
80 |
81 | #### Tags used in playbook:
82 | * **package** : install rpm packages
83 | * **config** : deploy configuration files, useful if you want just change configuration on hosts.
84 | * **test** : run test actions
85 |
86 | Also most hostgroups have the tag with similar name.
87 |
88 | # Requirements
89 | * [Ansible >= 2.2.0.0 ](http://www.ansible.com) is required.
90 | * Openstack version: **liberty**, **mitaka**, **ocata**, **pike** - please use appropriate branch, **pike** is currently at the master branch.
91 | * **remote_user = root** must be configured for ansible.
92 |
93 | # Target host(s) requirements
94 | * At least 8 Gb RAM, 4 CPU cores, 10Gb HDD (5Gb for root FS + 5GB cinder partition) is required for minimal single host test installation.
95 | * OS version: Redhat/CentOS 7.4 with current updates.(**updates are important**).
96 | * The required for Openstack repositories have to be properly configured.
97 | * SSH key passwordless authentication must be configured for root account.
98 | * **se_linux** must be disabled.
99 | * **requiretty** should be switched off in **/etc/sudoers** file.
100 | * 2 interfaces must be present: one for private and one for **provider(public)** network.
101 | * at least one spare partition must be available for **cinder**( block storage ) service.
102 |
103 | ## License
104 |
105 | [MIT](LICENSE)
106 |
--------------------------------------------------------------------------------
/roles/neutron/files/l3_agent.ini:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
4 | external_network_bridge =
5 |
6 | verbose = True
7 | # Show debugging output in log (sets DEBUG log level output)
8 | # debug = False
9 |
10 | # L3 requires that an interface driver be set. Choose the one that best
11 | # matches your plugin.
12 | # interface_driver =
13 |
14 | # Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
15 | # that supports L3 agent
16 | # interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
17 |
18 | # Use veth for an OVS interface or not.
19 | # Support kernels with limited namespace support
20 | # (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
21 | # ovs_use_veth = False
22 |
23 | # Example of interface_driver option for LinuxBridge
24 | # interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
25 |
26 | # Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
27 | # iproute2 package that supports namespaces). This option is deprecated and
28 | # will be removed in a future release, at which point the old behavior of
29 | # use_namespaces = True will be enforced.
30 | # use_namespaces = True
31 |
32 | # If use_namespaces is set as False then the agent can only configure one router.
33 |
34 | # This is done by setting the specific router_id.
35 | # router_id =
36 |
37 | # When external_network_bridge is set, each L3 agent can be associated
38 | # with no more than one external network. This value should be set to the UUID
39 | # of that external network. To allow L3 agent support multiple external
40 | # networks, both the external_network_bridge and gateway_external_network_id
41 | # must be left empty.
42 | # gateway_external_network_id =
43 |
44 | # With IPv6, the network used for the external gateway does not need
45 | # to have an associated subnet, since the automatically assigned
46 | # link-local address (LLA) can be used. However, an IPv6 gateway address
47 | # is needed for use as the next-hop for the default route. If no IPv6
48 | # gateway address is configured here, (and only then) the neutron router
49 | # will be configured to get its default route from router advertisements (RAs)
50 | # from the upstream router; in which case the upstream router must also be
51 | # configured to send these RAs.
52 | # The ipv6_gateway, when configured, should be the LLA of the interface
53 | # on the upstream router. If a next-hop using a global unique address (GUA)
54 | # is desired, it needs to be done via a subnet allocated to the network
55 | # and not through this parameter.
56 | # ipv6_gateway =
57 |
58 | # (StrOpt) Driver used for ipv6 prefix delegation. This needs to be
59 | # an entry point defined in the neutron.agent.linux.pd_drivers namespace. See
60 | # setup.cfg for entry points included with the neutron source.
61 | # prefix_delegation_driver = dibbler
62 |
63 | # Indicates that this L3 agent should also handle routers that do not have
64 | # an external network gateway configured. This option should be True only
65 | # for a single agent in a Neutron deployment, and may be False for all agents
66 | # if all routers must have an external network gateway
67 | # handle_internal_only_routers = True
68 |
69 | # Name of bridge used for external network traffic. This should be set to
70 | # empty value for the linux bridge. when this parameter is set, each L3 agent
71 | # can be associated with no more than one external network.
72 | # This option is deprecated and will be removed in the M release.
73 | # external_network_bridge = br-ex
74 |
75 | # TCP Port used by Neutron metadata server
76 | # metadata_port = 9697
77 |
78 | # Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
79 | # to disable this feature.
80 | # send_arp_for_ha = 3
81 |
82 | # seconds between re-sync routers' data if needed
83 | # periodic_interval = 40
84 |
85 | # seconds to start to sync routers' data after
86 | # starting agent
87 | # periodic_fuzzy_delay = 5
88 |
89 | # enable_metadata_proxy, which is true by default, can be set to False
90 | # if the Nova metadata server is not available
91 | # enable_metadata_proxy = True
92 |
93 | # Iptables mangle mark used to mark metadata valid requests
94 | # metadata_access_mark = 0x1
95 |
96 | # Iptables mangle mark used to mark ingress from external network
97 | # external_ingress_mark = 0x2
98 |
99 | # router_delete_namespaces, which is True by default, can be set to False if
100 | # namespaces can't be deleted cleanly on the host running the L3 agent.
101 | # Disable this if you hit the issue in
102 | # https://bugs.launchpad.net/neutron/+bug/1052535 or if
103 | # you are sure that your version of iproute suffers from the problem.
104 | # If True, namespaces will be deleted when a router is destroyed.
105 | # This should not be a problem any more. Refer to bug:
106 | # https://bugs.launchpad.net/neutron/+bug/1418079
107 | # This option is deprecated and will be removed in the M release
108 | # router_delete_namespaces = True
109 |
110 | # Timeout for ovs-vsctl commands.
111 | # If the timeout expires, ovs commands will fail with ALARMCLOCK error.
112 | # ovs_vsctl_timeout = 10
113 |
114 | # The working mode for the agent. Allowed values are:
115 | # - legacy: this preserves the existing behavior where the L3 agent is
116 | # deployed on a centralized networking node to provide L3 services
117 | # like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
118 | # - dvr: this mode enables DVR functionality, and must be used for an L3
119 | # agent that runs on a compute host.
120 | # - dvr_snat: this enables centralized SNAT support in conjunction with
121 | # DVR. This mode must be used for an L3 agent running on a centralized
122 | # node (or in single-host deployments, e.g. devstack).
123 | # agent_mode = legacy
124 |
125 | # Location to store keepalived and all HA configurations
126 | # ha_confs_path = $state_path/ha_confs
127 |
128 | # VRRP authentication type AH/PASS
129 | # ha_vrrp_auth_type = PASS
130 |
131 | # VRRP authentication password
132 | # ha_vrrp_auth_password =
133 |
134 | # The advertisement interval in seconds
135 | # ha_vrrp_advert_int = 2
136 |
137 | [AGENT]
138 | # Log agent heartbeats from this L3 agent
139 | # log_agent_heartbeats = False
140 |
--------------------------------------------------------------------------------
/roles/database/files/mongod.conf:
--------------------------------------------------------------------------------
1 | ##
2 | ### Basic Defaults
3 | ##
4 |
5 | # Comma separated list of ip addresses to listen on (all local ips by default)
6 | bind_ip = 0.0.0.0
7 |
8 | # Specify port number (27017 by default)
9 | #port = 27017
10 |
11 | # Fork server process (false by default)
12 | fork = true
13 |
14 | # Full path to pidfile (if not set, no pidfile is created)
15 | pidfilepath = /var/run/mongodb/mongod.pid
16 |
17 | # Log file to send write to instead of stdout - has to be a file, not directory
18 | logpath = /var/log/mongodb/mongod.log
19 |
20 | # Alternative directory for UNIX domain sockets (defaults to /tmp)
21 | unixSocketPrefix = /var/run/mongodb
22 |
23 | # Directory for datafiles (defaults to /data/db/)
24 | dbpath = /var/lib/mongodb
25 |
26 | # Enable/Disable journaling (journaling is on by default for 64 bit)
27 | #journal = true
28 | #nojournal = true
29 |
30 |
31 |
32 | ##
33 | ### General options
34 | ##
35 |
36 | # Be more verbose (include multiple times for more verbosity e.g. -vvvvv) (v by default)
37 | #verbose = v
38 |
39 | # Max number of simultaneous connections (1000000 by default)
40 | #maxConns = 1000000
41 |
42 | # Log to system's syslog facility instead of file or stdout (false by default)
43 | #syslog = true
44 |
45 | # Syslog facility used for monogdb syslog message (user by defautl)
46 | #syslogFacility = user
47 |
48 | # Append to logpath instead of over-writing (false by default)
49 | #logappend = true
50 |
51 | # Desired format for timestamps in log messages (One of ctime, iso8601-utc or iso8601-local) (iso8601-local by default)
52 | #timeStampFormat = arg
53 |
54 | # Private key for cluster authentication
55 | #keyFile = arg
56 |
57 | # Set a configurable parameter
58 | #setParameter = arg
59 |
60 | # Enable http interface (false by default)
61 | #httpinterface = true
62 |
63 | # Authentication mode used for cluster authentication. Alternatives are (keyFile|sendKeyFile|sendX509|x509) (keyFile by default)
64 | #clusterAuthMode = arg
65 |
66 | # Disable listening on unix sockets (false by default)
67 | #nounixsocket = true
68 |
69 | # Run with/without security (without by default)
70 | #auth = true
71 | #noauth = true
72 |
73 | # Enable IPv6 support (disabled by default)
74 | #ipv6 = true
75 |
76 | # Allow JSONP access via http (has security implications) (false by default)
77 | #jsonp = true
78 |
79 | # Turn on simple rest api (false by default)
80 | #rest = true
81 |
82 | # Value of slow for profile and console log (100 by default)
83 | #slowms = 100
84 |
85 | # 0=off 1=slow, 2=all (0 by default)
86 | #profile = 0
87 |
88 | # Periodically show cpu and iowait utilization (false by default)
89 | #cpu = true
90 |
91 | # Print some diagnostic system information (false by default)
92 | #sysinfo = true
93 |
94 | # Each database will be stored in a separate directory (false by default)
95 | #directoryperdb = true
96 |
97 | # Don't retry any index builds that were interrupted by shutdown (false by default)
98 | #noIndexBuildRetry = true
99 |
100 | # Disable data file preallocation - will often hurt performance (false by default)
101 | #noprealloc = true
102 |
103 | # .ns file size (in MB) for new databases (16 MB by default)
104 | #nssize = 16
105 |
106 | # Limits each database to a certain number of files (8 default)
107 | #quota
108 |
109 | # Number of files allowed per db, implies --quota (8 by default)
110 | #quotaFiles = 8
111 |
112 | # Use a smaller default file size (false by default)
113 | smallfiles = true
114 |
115 | # Seconds between disk syncs (0=never, but not recommended) (60 by default)
116 | #syncdelay = 60
117 |
118 | # Upgrade db if needed (false by default)
119 | #upgrade = true
120 |
121 | # Run repair on all dbs (false by default)
122 | #repair = true
123 |
124 | # Root directory for repair files (defaults to dbpath)
125 | #repairpath = arg
126 |
127 | # Disable scripting engine (false by default)
128 | #noscripting = true
129 |
130 | # Do not allow table scans (false by default)
131 | #notablescan = true
132 |
133 | # Journal diagnostic options (0 by default)
134 | #journalOptions = 0
135 |
136 | # How often to group/batch commit (ms) (100 or 30 by default)
137 | #journalCommitInterval = 100
138 |
139 |
140 |
141 | ##
142 | ### Replication options
143 | ##
144 |
145 | # Size to use (in MB) for replication op log (default 5% of disk space - i.e. large is good)
146 | #oplogSize = arg
147 |
148 |
149 |
150 | ##
151 | ### Master/slave options (old; use replica sets instead)
152 | ##
153 |
154 | # Master mode
155 | #master = true
156 |
157 | # Slave mode
158 | #slave = true
159 |
160 | # When slave: specify master as
161 | #source = arg
162 |
163 | # When slave: specify a single database to replicate
164 | #only = arg
165 |
166 | # Specify delay (in seconds) to be used when applying master ops to slave
167 | #slavedelay = arg
168 |
169 | # Automatically resync if slave data is stale
170 | #autoresync = true
171 |
172 |
173 |
174 | ##
175 | ### Replica set options
176 | ##
177 |
178 | # Arg is [/]
179 | #replSet = arg
180 |
181 | # Specify index prefetching behavior (if secondary) [none|_id_only|all] (all by default)
182 | #replIndexPrefetch = all
183 |
184 |
185 |
186 | ##
187 | ### Sharding options
188 | ##
189 |
190 | # Declare this is a config db of a cluster (default port 27019; default dir /data/configdb) (false by default)
191 | #configsvr = true
192 |
193 | # Declare this is a shard db of a cluster (default port 27018) (false by default)
194 | #shardsvr = true
195 |
196 |
197 |
198 | ##
199 | ### SSL options
200 | ##
201 |
202 | # Use ssl on configured ports
203 | #sslOnNormalPorts = true
204 |
205 | # Set the SSL operation mode (disabled|allowSSL|preferSSL|requireSSL)
206 | # sslMode = arg
207 |
208 | # PEM file for ssl
209 | #sslPEMKeyFile = arg
210 |
211 | # PEM file password
212 | #sslPEMKeyPassword = arg
213 |
214 | # Key file for internal SSL authentication
215 | #sslClusterFile = arg
216 |
217 | # Internal authentication key file password
218 | #sslClusterPassword = arg
219 |
220 | # Certificate Authority file for SSL
221 | #sslCAFile = arg
222 |
223 | # Certificate Revocation List file for SSL
224 | #sslCRLFile = arg
225 |
226 | # Allow client to connect without presenting a certificate
227 | #sslWeakCertificateValidation = true
228 |
229 | # Allow server certificates to provide non-matching hostnames
230 | #sslAllowInvalidHostnames = true
231 |
232 | # Allow connections to servers with invalid certificates
233 | #sslAllowInvalidCertificates = true
234 |
235 | # Activate FIPS 140-2 mode at startup
236 | #sslFIPSMode = true
237 |
238 |
--------------------------------------------------------------------------------
/roles/horizon/templates/local_settings.j2:
--------------------------------------------------------------------------------
1 | import os
2 | from django.utils.translation import ugettext_lazy as _
3 | from openstack_dashboard import exceptions
4 | from openstack_dashboard.settings import HORIZON_CONFIG
5 | DEBUG = False
6 | TEMPLATE_DEBUG = DEBUG
7 | WEBROOT = '/dashboard/'
8 | ALLOWED_HOSTS = ['*', 'localhost']
9 | OPENSTACK_API_VERSIONS = {
10 | "identity": 3,
11 | "image": 2,
12 | "volume": 2,
13 | }
14 | OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
15 | LOCAL_PATH = '/tmp'
16 | SECRET_KEY='338e8547026b098129d1'
17 | SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
18 | CACHES = {
19 | 'default': {
20 | 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
21 | 'LOCATION': [{% for host in groups['controller'] %}'{{ hostvars[host]['ansible_default_ipv4']['address'] }}:11211'{% if not loop.last %},{% endif %}{% endfor %}],
22 | }
23 | }
24 | EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
25 | OPENSTACK_HOST = "{{ internal_vip_address|default(groups['controller'][0]) }}"
26 | OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
27 | OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
28 | OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
29 | OPENSTACK_KEYSTONE_BACKEND = {
30 | 'name': 'native',
31 | 'can_edit_user': True,
32 | 'can_edit_group': True,
33 | 'can_edit_project': True,
34 | 'can_edit_domain': True,
35 | 'can_edit_role': True,
36 | }
37 | OPENSTACK_HYPERVISOR_FEATURES = {
38 | 'can_set_mount_point': False,
39 | 'can_set_password': False,
40 | 'requires_keypair': False,
41 | }
42 | OPENSTACK_CINDER_FEATURES = {
43 | 'enable_backup': False,
44 | }
45 | OPENSTACK_NEUTRON_NETWORK = {
46 | 'enable_router': True,
47 | 'enable_quotas': True,
48 | 'enable_ipv6': True,
49 | 'enable_distributed_router': False,
50 | 'enable_ha_router': False,
51 | 'enable_lb': True,
52 | 'enable_firewall': True,
53 | 'enable_vpn': True,
54 | 'enable_fip_topology_check': True,
55 | # Neutron can be configured with a default Subnet Pool to be used for IPv4
56 | # subnet-allocation. Specify the label you wish to display in the Address
57 | # pool selector on the create subnet step if you want to use this feature.
58 | 'default_ipv4_subnet_pool_label': None,
59 | # Neutron can be configured with a default Subnet Pool to be used for IPv6
60 | # subnet-allocation. Specify the label you wish to display in the Address
61 | # pool selector on the create subnet step if you want to use this feature.
62 | # You must set this to enable IPv6 Prefix Delegation in a PD-capable
63 | # environment.
64 | 'default_ipv6_subnet_pool_label': None,
65 | # The profile_support option is used to detect if an external router can be
66 | # configured via the dashboard. When using specific plugins the
67 | # profile_support can be turned on if needed.
68 | 'profile_support': None,
69 | #'profile_support': 'cisco',
70 | # Set which provider network types are supported. Only the network types
71 | # in this list will be available to choose from when creating a network.
72 | # Network types include local, flat, vlan, gre, and vxlan.
73 | 'supported_provider_types': ['*'],
74 | # Set which VNIC types are supported for port binding. Only the VNIC
75 | # types in this list will be available to choose from when creating a
76 | # port.
77 | # VNIC types include 'normal', 'macvtap' and 'direct'.
78 | # Set to empty list or None to disable VNIC type selection.
79 | 'supported_vnic_types': ['*']
80 | }
81 | IMAGE_CUSTOM_PROPERTY_TITLES = {
82 | "architecture": _("Architecture"),
83 | "kernel_id": _("Kernel ID"),
84 | "ramdisk_id": _("Ramdisk ID"),
85 | "image_state": _("Euca2ools state"),
86 | "project_id": _("Project ID"),
87 | "image_type": _("Image Type"),
88 | }
89 | IMAGE_RESERVED_CUSTOM_PROPERTIES = []
90 | API_RESULT_LIMIT = 1000
91 | API_RESULT_PAGE_SIZE = 20
92 | SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
93 | DROPDOWN_MAX_ITEMS = 30
94 | TIME_ZONE = "UTC"
95 | POLICY_FILES_PATH = '/etc/openstack-dashboard'
96 | POLICY_FILES_PATH = '/etc/openstack-dashboard'
97 | LOGGING = {
98 | 'version': 1,
99 | # When set to True this will disable all logging except
100 | # for loggers specified in this configuration dictionary. Note that
101 | # if nothing is specified here and disable_existing_loggers is True,
102 | # django.db.backends will still log unless it is disabled explicitly.
103 | 'disable_existing_loggers': False,
104 | 'handlers': {
105 | 'null': {
106 | 'level': 'DEBUG',
107 | 'class': 'django.utils.log.NullHandler',
108 | },
109 | 'console': {
110 | # Set the level to "DEBUG" for verbose output logging.
111 | 'level': 'INFO',
112 | 'class': 'logging.StreamHandler',
113 | },
114 | },
115 | 'loggers': {
116 | # Logging from django.db.backends is VERY verbose, send to null
117 | # by default.
118 | 'django.db.backends': {
119 | 'handlers': ['null'],
120 | 'propagate': False,
121 | },
122 | 'requests': {
123 | 'handlers': ['null'],
124 | 'propagate': False,
125 | },
126 | 'horizon': {
127 | 'handlers': ['console'],
128 | 'level': 'DEBUG',
129 | 'propagate': False,
130 | },
131 | 'openstack_dashboard': {
132 | 'handlers': ['console'],
133 | 'level': 'DEBUG',
134 | 'propagate': False,
135 | },
136 | 'novaclient': {
137 | 'handlers': ['console'],
138 | 'level': 'DEBUG',
139 | 'propagate': False,
140 | },
141 | 'cinderclient': {
142 | 'handlers': ['console'],
143 | 'level': 'DEBUG',
144 | 'propagate': False,
145 | },
146 | 'keystoneclient': {
147 | 'handlers': ['console'],
148 | 'level': 'DEBUG',
149 | 'propagate': False,
150 | },
151 | 'glanceclient': {
152 | 'handlers': ['console'],
153 | 'level': 'DEBUG',
154 | 'propagate': False,
155 | },
156 | 'neutronclient': {
157 | 'handlers': ['console'],
158 | 'level': 'DEBUG',
159 | 'propagate': False,
160 | },
161 | 'heatclient': {
162 | 'handlers': ['console'],
163 | 'level': 'DEBUG',
164 | 'propagate': False,
165 | },
166 | 'ceilometerclient': {
167 | 'handlers': ['console'],
168 | 'level': 'DEBUG',
169 | 'propagate': False,
170 | },
171 | 'troveclient': {
172 | 'handlers': ['console'],
173 | 'level': 'DEBUG',
174 | 'propagate': False,
175 | },
176 | 'swiftclient': {
177 | 'handlers': ['console'],
178 | 'level': 'DEBUG',
179 | 'propagate': False,
180 | },
181 | 'openstack_auth': {
182 | 'handlers': ['console'],
183 | 'level': 'DEBUG',
184 | 'propagate': False,
185 | },
186 | 'nose.plugins.manager': {
187 | 'handlers': ['console'],
188 | 'level': 'DEBUG',
189 | 'propagate': False,
190 | },
191 | 'django': {
192 | 'handlers': ['console'],
193 | 'level': 'DEBUG',
194 | 'propagate': False,
195 | },
196 | 'iso8601': {
197 | 'handlers': ['null'],
198 | 'propagate': False,
199 | },
200 | 'scss': {
201 | 'handlers': ['null'],
202 | 'propagate': False,
203 | },
204 | }
205 | }
206 | SECURITY_GROUP_RULES = {
207 | 'all_tcp': {
208 | 'name': _('All TCP'),
209 | 'ip_protocol': 'tcp',
210 | 'from_port': '1',
211 | 'to_port': '65535',
212 | },
213 | 'all_udp': {
214 | 'name': _('All UDP'),
215 | 'ip_protocol': 'udp',
216 | 'from_port': '1',
217 | 'to_port': '65535',
218 | },
219 | 'all_icmp': {
220 | 'name': _('All ICMP'),
221 | 'ip_protocol': 'icmp',
222 | 'from_port': '-1',
223 | 'to_port': '-1',
224 | },
225 | 'ssh': {
226 | 'name': 'SSH',
227 | 'ip_protocol': 'tcp',
228 | 'from_port': '22',
229 | 'to_port': '22',
230 | },
231 | 'smtp': {
232 | 'name': 'SMTP',
233 | 'ip_protocol': 'tcp',
234 | 'from_port': '25',
235 | 'to_port': '25',
236 | },
237 | 'dns': {
238 | 'name': 'DNS',
239 | 'ip_protocol': 'tcp',
240 | 'from_port': '53',
241 | 'to_port': '53',
242 | },
243 | 'http': {
244 | 'name': 'HTTP',
245 | 'ip_protocol': 'tcp',
246 | 'from_port': '80',
247 | 'to_port': '80',
248 | },
249 | 'pop3': {
250 | 'name': 'POP3',
251 | 'ip_protocol': 'tcp',
252 | 'from_port': '110',
253 | 'to_port': '110',
254 | },
255 | 'imap': {
256 | 'name': 'IMAP',
257 | 'ip_protocol': 'tcp',
258 | 'from_port': '143',
259 | 'to_port': '143',
260 | },
261 | 'ldap': {
262 | 'name': 'LDAP',
263 | 'ip_protocol': 'tcp',
264 | 'from_port': '389',
265 | 'to_port': '389',
266 | },
267 | 'https': {
268 | 'name': 'HTTPS',
269 | 'ip_protocol': 'tcp',
270 | 'from_port': '443',
271 | 'to_port': '443',
272 | },
273 | 'smtps': {
274 | 'name': 'SMTPS',
275 | 'ip_protocol': 'tcp',
276 | 'from_port': '465',
277 | 'to_port': '465',
278 | },
279 | 'imaps': {
280 | 'name': 'IMAPS',
281 | 'ip_protocol': 'tcp',
282 | 'from_port': '993',
283 | 'to_port': '993',
284 | },
285 | 'pop3s': {
286 | 'name': 'POP3S',
287 | 'ip_protocol': 'tcp',
288 | 'from_port': '995',
289 | 'to_port': '995',
290 | },
291 | 'ms_sql': {
292 | 'name': 'MS SQL',
293 | 'ip_protocol': 'tcp',
294 | 'from_port': '1433',
295 | 'to_port': '1433',
296 | },
297 | 'mysql': {
298 | 'name': 'MYSQL',
299 | 'ip_protocol': 'tcp',
300 | 'from_port': '3306',
301 | 'to_port': '3306',
302 | },
303 | 'rdp': {
304 | 'name': 'RDP',
305 | 'ip_protocol': 'tcp',
306 | 'from_port': '3389',
307 | 'to_port': '3389',
308 | },
309 | }
310 | REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES']
311 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | # config file for ansible -- http://ansible.com/
2 | # ==============================================
3 |
4 | # nearly all parameters can be overridden in ansible-playbook
5 | # or with command line flags. ansible will read ANSIBLE_CONFIG,
6 | # ansible.cfg in the current working directory, .ansible.cfg in
7 | # the home directory or /etc/ansible/ansible.cfg, whichever it
8 | # finds first
9 |
10 | [defaults]
11 |
12 | # some basic default values...
13 |
14 | #inventory = /etc/ansible/hosts
15 | #library = /usr/share/my_modules/
16 | #remote_tmp = $HOME/.ansible/tmp
17 | #local_tmp = $HOME/.ansible/tmp
18 | #forks = 5
19 | #poll_interval = 15
20 | #sudo_user = root
21 | #ask_sudo_pass = True
22 | #ask_pass = True
23 | #transport = smart
24 | #remote_port = 22
25 | #module_lang = C
26 | #module_set_locale = True
27 |
28 | # plays will gather facts by default, which contain information about
29 | # the remote system.
30 | #
31 | # smart - gather by default, but don't regather if already gathered
32 | # implicit - gather by default, turn off with gather_facts: False
33 | # explicit - do not gather by default, must say gather_facts: True
34 | #gathering = implicit
35 |
36 | # by default retrieve all facts subsets
37 | # all - gather all subsets
38 | # network - gather min and network facts
39 | # hardware - gather hardware facts (longest facts to retrieve)
40 | # virtual - gather min and virtual facts
41 | # facter - import facts from facter
42 | # ohai - import facts from ohai
43 | # You can combine them using comma (ex: network,virtual)
44 | # You can negate them using ! (ex: !hardware,!facter,!ohai)
45 | # A minimal set of facts is always gathered.
46 | #gather_subset = all
47 |
48 | # additional paths to search for roles in, colon separated
49 | #roles_path = /etc/ansible/roles
50 |
51 | # uncomment this to disable SSH key host checking
52 | #host_key_checking = False
53 |
54 | # change the default callback
55 | #stdout_callback = skippy
56 | # enable additional callbacks
57 | #callback_whitelist = timer, mail
58 |
59 | # Determine whether includes in tasks and handlers are "static" by
60 | # default. As of 2.0, includes are dynamic by default. Setting these
61 | # values to True will make includes behave more like they did in the
62 | # 1.x versions.
63 | #task_includes_static = True
64 | #handler_includes_static = True
65 |
66 | # change this for alternative sudo implementations
67 | #sudo_exe = sudo
68 |
69 | # What flags to pass to sudo
70 | # WARNING: leaving out the defaults might create unexpected behaviours
71 | #sudo_flags = -H -S -n
72 |
73 | # SSH timeout
74 | #timeout = 10
75 |
76 | # default user to use for playbooks if user is not specified
77 | # (/usr/bin/ansible will use current user as default)
78 | remote_user = root
79 |
80 | # logging is off by default unless this path is defined
81 | # if so defined, consider logrotate
82 | #log_path = /var/log/ansible.log
83 |
84 | # default module name for /usr/bin/ansible
85 | #module_name = command
86 |
87 | # use this shell for commands executed under sudo
88 | # you may need to change this to bin/bash in rare instances
89 | # if sudo is constrained
90 | #executable = /bin/sh
91 |
92 | # if inventory variables overlap, does the higher precedence one win
93 | # or are hash values merged together? The default is 'replace' but
94 | # this can also be set to 'merge'.
95 | #hash_behaviour = replace
96 |
97 | # by default, variables from roles will be visible in the global variable
98 | # scope. To prevent this, the following option can be enabled, and only
99 | # tasks and handlers within the role will see the variables there
100 | #private_role_vars = yes
101 |
102 | # list any Jinja2 extensions to enable here:
103 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
104 |
105 | # if set, always use this private key file for authentication, same as
106 | # if passing --private-key to ansible or ansible-playbook
107 | #private_key_file = /path/to/file
108 |
109 | # If set, configures the path to the Vault password file as an alternative to
110 | # specifying --vault-password-file on the command line.
111 | #vault_password_file = /path/to/vault_password_file
112 |
113 | # format of string {{ ansible_managed }} available within Jinja2
114 | # templates indicates to users editing templates files will be replaced.
115 | # replacing {file}, {host} and {uid} and strftime codes with proper values.
116 | #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
117 | # This short version is better used in templates as it won't flag the file as changed every run.
118 | #ansible_managed = Ansible managed: {file} on {host}
119 |
120 | # by default, ansible-playbook will display "Skipping [host]" if it determines a task
121 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
122 | # messages. NOTE: the task header will still be shown regardless of whether or not the
123 | # task is skipped.
124 | #display_skipped_hosts = True
125 |
126 | # by default, if a task in a playbook does not include a name: field then
127 | # ansible-playbook will construct a header that includes the task's action but
128 | # not the task's args. This is a security feature because ansible cannot know
129 | # if the *module* considers an argument to be no_log at the time that the
130 | # header is printed. If your environment doesn't have a problem securing
131 | # stdout from ansible-playbook (or you have manually specified no_log in your
132 | # playbook on all of the tasks where you have secret information) then you can
133 | # safely set this to True to get more informative messages.
134 | #display_args_to_stdout = False
135 |
136 | # by default (as of 1.3), Ansible will raise errors when attempting to dereference
137 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line
138 | # to revert the behavior to pre-1.3.
139 | #error_on_undefined_vars = False
140 |
141 | # by default (as of 1.6), Ansible may display warnings based on the configuration of the
142 | # system running ansible itself. This may include warnings about 3rd party packages or
143 | # other conditions that should be resolved if possible.
144 | # to disable these warnings, set the following value to False:
145 | #system_warnings = True
146 |
147 | # by default (as of 1.4), Ansible may display deprecation warnings for language
148 | # features that should no longer be used and will be removed in future versions.
149 | # to disable these warnings, set the following value to False:
150 | #deprecation_warnings = True
151 |
152 | # (as of 1.8), Ansible can optionally warn when usage of the shell and
153 | # command module appear to be simplified by using a default Ansible module
154 | # instead. These warnings can be silenced by adjusting the following
155 | # setting or adding warn=yes or warn=no to the end of the command line
156 | # parameter string. This will for example suggest using the git module
157 | # instead of shelling out to the git command.
158 | # command_warnings = False
159 |
160 |
161 | # set plugin path directories here, separate with colons
162 | #action_plugins = /usr/share/ansible/plugins/action
163 | #callback_plugins = /usr/share/ansible/plugins/callback
164 | #connection_plugins = /usr/share/ansible/plugins/connection
165 | #lookup_plugins = /usr/share/ansible/plugins/lookup
166 | #vars_plugins = /usr/share/ansible/plugins/vars
167 | #filter_plugins = /usr/share/ansible/plugins/filter
168 | #test_plugins = /usr/share/ansible/plugins/test
169 | #strategy_plugins = /usr/share/ansible/plugins/strategy
170 |
171 | # by default callbacks are not loaded for /bin/ansible, enable this if you
172 | # want, for example, a notification or logging callback to also apply to
173 | # /bin/ansible runs
174 | #bin_ansible_callbacks = False
175 |
176 |
177 | # don't like cows? that's unfortunate.
178 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
179 | #nocows = 1
180 |
181 | # set which cowsay stencil you'd like to use by default. When set to 'random',
182 | # a random stencil will be selected for each task. The selection will be filtered
183 | # against the `cow_whitelist` option below.
184 | #cow_selection = default
185 | #cow_selection = random
186 |
187 | # when using the 'random' option for cowsay, stencils will be restricted to this list.
188 | # it should be formatted as a comma-separated list with no spaces between names.
189 | # NOTE: line continuations here are for formatting purposes only, as the INI parser
190 | # in python does not support them.
191 | #cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
192 | # hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
193 | # stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
194 |
195 | # don't like colors either?
196 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
197 | #nocolor = 1
198 |
199 | # if set to a persistent type (not 'memory', for example 'redis') fact values
200 | # from previous runs in Ansible will be stored. This may be useful when
201 | # wanting to use, for example, IP information from one group of servers
202 | # without having to talk to them in the same playbook run to get their
203 | # current IP information.
204 | #fact_caching = memory
205 |
206 |
207 | # retry files
208 | # When a playbook fails by default a .retry file will be created in ~/
209 | # You can disable this feature by setting retry_files_enabled to False
210 | # and you can change the location of the files by setting retry_files_save_path
211 |
212 | #retry_files_enabled = False
213 | #retry_files_save_path = ~/.ansible-retry
214 |
215 | # squash actions
216 | # Ansible can optimise actions that call modules with list parameters
217 | # when looping. Instead of calling the module once per with_ item, the
218 | # module is called once with all items at once. Currently this only works
219 | # under limited circumstances, and only with parameters named 'name'.
220 | #squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper
221 |
222 | # prevents logging of task data, off by default
223 | #no_log = False
224 |
225 | # prevents logging of tasks, but only on the targets, data is still logged on the master/controller
226 | #no_target_syslog = False
227 |
228 | # controls whether Ansible will raise an error or warning if a task has no
229 | # choice but to create world readable temporary files to execute a module on
230 | # the remote machine. This option is False by default for security. Users may
231 | # turn this on to have behaviour more like Ansible prior to 2.1.x. See
232 | # https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
233 | # for more secure ways to fix this than enabling this option.
234 | allow_world_readable_tmpfiles = True
235 |
236 | # controls the compression level of variables sent to
237 | # worker processes. At the default of 0, no compression
238 | # is used. This value must be an integer from 0 to 9.
239 | #var_compression_level = 9
240 |
241 | # controls what compression method is used for new-style ansible modules when
242 | # they are sent to the remote system. The compression types depend on having
243 | # support compiled into both the controller's python and the client's python.
244 | # The names should match with the python Zipfile compression types:
245 | # * ZIP_STORED (no compression. available everywhere)
246 | # * ZIP_DEFLATED (uses zlib, the default)
247 | # These values may be set per host via the ansible_module_compression inventory
248 | # variable
249 | #module_compression = 'ZIP_DEFLATED'
250 |
251 | # This controls the cutoff point (in bytes) on --diff for files
252 | # set to 0 for unlimited (RAM may suffer!).
253 | #max_diff_size = 1048576
254 |
255 | [privilege_escalation]
256 | #become=True
257 | #become_method=sudo
258 | #become_user=root
259 | #become_ask_pass=False
260 |
261 | [paramiko_connection]
262 |
263 | # uncomment this line to cause the paramiko connection plugin to not record new host
264 | # keys encountered. Increases performance on new host additions. Setting works independently of the
265 | # host key checking setting above.
266 | #record_host_keys=False
267 |
268 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
269 | # line to disable this behaviour.
270 | #pty=False
271 |
272 | [ssh_connection]
273 |
274 | # ssh arguments to use
275 | # Leaving off ControlPersist will result in poor performance, so use
276 | # paramiko on older platforms rather than removing it
277 | #ssh_args = -o ControlMaster=auto -o ControlPersist=60s
278 |
279 | # The path to use for the ControlPath sockets. This defaults to
280 | # "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with
281 | # very long hostnames or very long path names (caused by long user names or
282 | # deeply nested home directories) this can exceed the character limit on
283 | # file socket names (108 characters for most platforms). In that case, you
284 | # may wish to shorten the string below.
285 | #
286 | # Example:
287 | # control_path = %(directory)s/%%h-%%r
288 | #control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
289 |
290 | # Enabling pipelining reduces the number of SSH operations required to
291 | # execute a module on the remote server. This can result in a significant
292 | # performance improvement when enabled, however when using "sudo:" you must
293 | # first disable 'requiretty' in /etc/sudoers
294 | #
295 | # By default, this option is disabled to preserve compatibility with
296 | # sudoers configurations that have requiretty (the default on many distros).
297 | #
298 | pipelining = True
299 |
300 | # if True, make ansible use scp if the connection type is ssh
301 | # (default is sftp)
302 | #scp_if_ssh = True
303 |
304 | # if False, sftp will not use batch mode to transfer files. This may cause some
305 | # types of file transfer failures impossible to catch however, and should
306 | # only be disabled if your sftp version has problems with batch mode
307 | #sftp_batch_mode = False
308 |
309 | [accelerate]
310 | #accelerate_port = 5099
311 | #accelerate_timeout = 30
312 | #accelerate_connect_timeout = 5.0
313 |
314 | # The daemon timeout is measured in minutes. This time is measured
315 | # from the last activity to the accelerate daemon.
316 | #accelerate_daemon_timeout = 30
317 |
318 | # If set to yes, accelerate_multi_key will allow multiple
319 | # private keys to be uploaded to it, though each user must
320 | # have access to the system via SSH to add a new key. The default
321 | # is "no".
322 | #accelerate_multi_key = yes
323 |
324 | [selinux]
325 | # file systems that require special treatment when dealing with security context
326 | # the default behaviour that copies the existing context or uses the user default
327 | # needs to be changed to use the file system dependent context.
328 | #special_context_filesystems=nfs,vboxsf,fuse,ramfs
329 |
330 | # Set this to yes to allow libvirt_lxc connections to work without SELinux.
331 | #libvirt_lxc_noseclabel = yes
332 |
333 | [colors]
334 | #highlight = white
335 | #verbose = blue
336 | #warn = bright purple
337 | #error = red
338 | #debug = dark gray
339 | #deprecate = purple
340 | #skip = cyan
341 | #unreachable = red
342 | #ok = green
343 | #changed = yellow
344 | #diff_add = green
345 | #diff_remove = red
346 | #diff_lines = cyan
347 |
--------------------------------------------------------------------------------
/roles/prepare/templates/services.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 | {% for item in groups['controller'] %}
8 |
12 |
13 |
17 |
18 |
22 |
23 |
27 |
28 |
32 |
33 |
34 | {% endfor %}
35 | {% for item in groups['glance'] %}
36 |
40 |
41 |
42 |
46 |
47 |
48 | {% endfor %}
49 | {% for item in groups['nova-controller'] %}
50 |
54 |
55 |
56 |
60 |
61 |
62 |
66 |
67 |
68 |
72 |
73 |
74 |
78 |
79 |
80 | {% endfor %}
81 | {% for item in groups['nova'] %}
82 |
86 |
87 |
88 |
92 |
93 | {% endfor %}
94 | {% for item in groups['neutron-controller'] %}
95 |
99 |
100 |
101 |
105 |
106 |
110 |
111 |
115 |
116 |
117 |
121 |
122 | {% endfor %}
123 | {% for item in groups['neutron'] %}
124 |
128 |
129 |
133 |
134 | {% endfor %}
135 | {% for item in groups['cinder-controller'] %}
136 |
140 |
141 |
145 |
146 | {% endfor %}
147 | {% for item in groups['cinder'] %}
148 |
152 |
153 |
157 |
158 |
162 |
163 | {% endfor %}
164 | {% for item in groups['swift-controller'] %}
165 |
169 |
170 | {% endfor %}
171 | {% for item in groups['swift'] %}
172 |
176 |
177 |
181 |
182 |
186 |
187 |
191 |
192 |
196 |
197 |
201 |
202 |
206 |
207 |
211 |
212 |
216 |
217 |
221 |
222 |
226 |
227 |
231 |
232 |
236 |
237 | {% endfor %}
238 | {% for item in groups['heat'] %}
239 |
243 |
244 |
248 |
249 |
253 |
254 | {% endfor %}
255 | {% for item in groups['ceilometer-controller'] %}
256 |
260 |
261 |
265 |
266 |
270 |
271 |
275 |
276 |
280 |
281 |
285 |
286 |
290 |
291 |
295 |
296 | {% endfor %}
297 | {% for item in groups['ceilometer-nova'] %}
298 |
302 |
303 | {% endfor %}
304 |
305 |
--------------------------------------------------------------------------------
/roles/cinder/templates/lvm.conf.j2:
--------------------------------------------------------------------------------
1 | config {
2 | # If enabled, any LVM2 configuration mismatch is reported.
3 | # This implies checking that the configuration key is understood
4 | # by LVM2 and that the value of the key is of a proper type.
5 | # If disabled, any configuration mismatch is ignored and default
6 | # value is used instead without any warning (a message about the
7 | # configuration key not being found is issued in verbose mode only).
8 | checks = 1
9 | # If enabled, any configuration mismatch aborts the LVM2 process.
10 | abort_on_errors = 0
11 | # Directory where LVM looks for configuration profiles.
12 | profile_dir = "/etc/lvm/profile"
13 | }
14 | devices {
15 | # Where do you want your volume groups to appear ?
16 | dir = "/dev"
17 | # An array of directories that contain the device nodes you wish
18 | # to use with LVM2.
19 | scan = [ "/dev" ]
20 | # If set, the cache of block device nodes with all associated symlinks
21 | # will be constructed out of the existing udev database content.
22 | # This avoids using and opening any inapplicable non-block devices or
23 | # subdirectories found in the device directory. This setting is applied
24 | # to udev-managed device directory only, other directories will be scanned
25 | # fully. LVM2 needs to be compiled with udev support for this setting to
26 | # take effect. N.B. Any device node or symlink not managed by udev in
27 | # udev directory will be ignored with this setting on.
28 | obtain_device_list_from_udev = 1
29 | # If several entries in the scanned directories correspond to the
30 | # same block device and the tools need to display a name for device,
31 | # all the pathnames are matched against each item in the following
32 | # list of regular expressions in turn and the first match is used.
33 | # By default no preferred names are defined.
34 | # preferred_names = [ ]
35 | # Try to avoid using undescriptive /dev/dm-N names, if present.
36 | preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
37 | # In case no prefererred name matches or if preferred_names are not
38 | # defined at all, builtin rules are used to determine the preference.
39 | #
40 | # The first builtin rule checks path prefixes and it gives preference
41 | # based on this ordering (where "dev" depends on devices/dev setting):
42 | # /dev/mapper > /dev/disk > /dev/dm-* > /dev/block
43 | #
44 | # If the ordering above cannot be applied, the path with fewer slashes
45 | # gets preference then.
46 | #
47 | # If the number of slashes is the same, a symlink gets preference.
48 | #
49 | # Finally, if all the rules mentioned above are not applicable,
50 | # lexicographical order is used over paths and the smallest one
51 | # of all gets preference.
52 | # A filter that tells LVM2 to only use a restricted set of devices.
53 | # The filter consists of an array of regular expressions. These
54 | # expressions can be delimited by a character of your choice, and
55 | # prefixed with either an 'a' (for accept) or 'r' (for reject).
56 | # The first expression found to match a device name determines if
57 | # the device will be accepted or rejected (ignored). Devices that
58 | # don't match any patterns are accepted.
59 | # Be careful if there there are symbolic links or multiple filesystem
60 | # entries for the same device as each name is checked separately against
61 | # the list of patterns. The effect is that if the first pattern in the
62 | # list to match a name is an 'a' pattern for any of the names, the device
63 | # is accepted; otherwise if the first pattern in the list to match a name
64 | # is an 'r' pattern for any of the names it is rejected; otherwise it is
65 | # accepted.
66 | # Don't have more than one filter line active at once: only one gets used.
67 | # Run vgscan after you change this parameter to ensure that
68 | # the cache file gets regenerated (see below).
69 | # If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
70 | # If lvmetad is used, then see "A note about device filtering while
71 | # lvmetad is used" comment that is attached to global/use_lvmetad setting.
72 | # By default we accept every block device:
73 | # filter = [ "a/.*/" ]
74 | # Exclude the cdrom drive
75 | # filter = [ "r|/dev/cdrom|" ]
76 | # When testing I like to work with just loopback devices:
77 | # filter = [ "a/loop/", "r/.*/" ]
78 | # Or maybe all loops and ide drives except hdc:
79 | # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
80 | # Use anchors if you want to be really specific
81 | # filter = [ "a|^/dev/hda8$|", "r/.*/" ]
82 | # Since "filter" is often overridden from command line, it is not suitable
83 | # for system-wide device filtering (udev rules, lvmetad). To hide devices
84 | # from LVM-specific udev processing and/or from lvmetad, you need to set
85 | # global_filter. The syntax is the same as for normal "filter"
86 | # above. Devices that fail the global_filter are not even opened by LVM.
87 | # global_filter = []
88 | # The results of the filtering are cached on disk to avoid
89 | # rescanning dud devices (which can take a very long time).
90 | # By default this cache is stored in the /etc/lvm/cache directory
91 | # in a file called '.cache'.
92 | # It is safe to delete the contents: the tools regenerate it.
93 | # (The old setting 'cache' is still respected if neither of
94 | # these new ones is present.)
95 | # N.B. If obtain_device_list_from_udev is set to 1 the list of
96 | # devices is instead obtained from udev and any existing .cache
97 | # file is removed.
98 | cache_dir = "/etc/lvm/cache"
99 | cache_file_prefix = ""
100 | # You can turn off writing this cache file by setting this to 0.
101 | write_cache_state = 1
102 | # Advanced settings.
103 | # List of pairs of additional acceptable block device types found
104 | # in /proc/devices with maximum (non-zero) number of partitions.
105 | # types = [ "fd", 16 ]
106 | # If sysfs is mounted (2.6 kernels) restrict device scanning to
107 | # the block devices it believes are valid.
108 | # 1 enables; 0 disables.
109 | sysfs_scan = 1
110 | # By default, LVM2 will ignore devices used as component paths
111 | # of device-mapper multipath devices.
112 | # 1 enables; 0 disables.
113 | multipath_component_detection = 1
114 | # By default, LVM2 will ignore devices used as components of
115 | # software RAID (md) devices by looking for md superblocks.
116 | # 1 enables; 0 disables.
117 | md_component_detection = 1
118 | # By default, if a PV is placed directly upon an md device, LVM2
119 | # will align its data blocks with the md device's stripe-width.
120 | # 1 enables; 0 disables.
121 | md_chunk_alignment = 1
122 | # Default alignment of the start of a data area in MB. If set to 0,
123 | # a value of 64KB will be used. Set to 1 for 1MiB, 2 for 2MiB, etc.
124 | # default_data_alignment = 1
125 | # By default, the start of a PV's data area will be a multiple of
126 | # the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
127 | # - minimum_io_size - the smallest request the device can perform
128 | # w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
129 | # - optimal_io_size - the device's preferred unit of receiving I/O
130 | # (e.g. MD's stripe width)
131 | # minimum_io_size is used if optimal_io_size is undefined (0).
132 | # If md_chunk_alignment is enabled, that detects the optimal_io_size.
133 | # This setting takes precedence over md_chunk_alignment.
134 | # 1 enables; 0 disables.
135 | data_alignment_detection = 1
136 | # Alignment (in KB) of start of data area when creating a new PV.
137 | # md_chunk_alignment and data_alignment_detection are disabled if set.
138 | # Set to 0 for the default alignment (see: data_alignment_default)
139 | # or page size, if larger.
140 | data_alignment = 0
141 | # By default, the start of the PV's aligned data area will be shifted by
142 | # the 'alignment_offset' exposed in sysfs. This offset is often 0 but
143 | # may be non-zero; e.g.: certain 4KB sector drives that compensate for
144 | # windows partitioning will have an alignment_offset of 3584 bytes
145 | # (sector 7 is the lowest aligned logical block, the 4KB sectors start
146 | # at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
147 | # But note that pvcreate --dataalignmentoffset will skip this detection.
148 | # 1 enables; 0 disables.
149 | data_alignment_offset_detection = 1
150 | # If, while scanning the system for PVs, LVM2 encounters a device-mapper
151 | # device that has its I/O suspended, it waits for it to become accessible.
152 | # Set this to 1 to skip such devices. This should only be needed
153 | # in recovery situations.
154 | ignore_suspended_devices = 0
155 | # ignore_lvm_mirrors: Introduced in version 2.02.104
156 | # This setting determines whether logical volumes of "mirror" segment
157 | # type are scanned for LVM labels. This affects the ability of
158 | # mirrors to be used as physical volumes. If 'ignore_lvm_mirrors'
159 | # is set to '1', it becomes impossible to create volume groups on top
160 | # of mirror logical volumes - i.e. to stack volume groups on mirrors.
161 | #
162 | # Allowing mirror logical volumes to be scanned (setting the value to '0')
163 | # can potentially cause LVM processes and I/O to the mirror to become
164 | # blocked. This is due to the way that the "mirror" segment type handles
165 | # failures. In order for the hang to manifest itself, an LVM command must
166 | # be run just after a failure and before the automatic LVM repair process
167 | # takes place OR there must be failures in multiple mirrors in the same
168 | # volume group at the same time with write failures occurring moments
169 | # before a scan of the mirror's labels.
170 | #
171 | # Note that these scanning limitations do not apply to the LVM RAID
172 | # types, like "raid1". The RAID segment types handle failures in a
173 | # different way and are not subject to possible process or I/O blocking.
174 | #
175 | # It is encouraged that users set 'ignore_lvm_mirrors' to 1 if they
176 | # are using the "mirror" segment type. Users that require volume group
177 | # stacking on mirrored logical volumes should consider using the "raid1"
178 | # segment type. The "raid1" segment type is not available for
179 | # active/active clustered volume groups.
180 | #
181 | # Set to 1 to disallow stacking and thereby avoid a possible deadlock.
182 | ignore_lvm_mirrors = 1
183 | # During each LVM operation errors received from each device are counted.
184 | # If the counter of a particular device exceeds the limit set here, no
185 | # further I/O is sent to that device for the remainder of the respective
186 | # operation. Setting the parameter to 0 disables the counters altogether.
187 | disable_after_error_count = 0
188 | # Allow use of pvcreate --uuid without requiring --restorefile.
189 | require_restorefile_with_uuid = 1
190 | # Minimum size (in KB) of block devices which can be used as PVs.
191 | # In a clustered environment all nodes must use the same value.
192 | # Any value smaller than 512KB is ignored.
193 | # Ignore devices smaller than 2MB such as floppy drives.
194 | pv_min_size = 2048
195 | # The original built-in setting was 512 up to and including version 2.02.84.
196 | # pv_min_size = 512
197 | # Issue discards to a logical volumes's underlying physical volume(s) when
198 | # the logical volume is no longer using the physical volumes' space (e.g.
199 | # lvremove, lvreduce, etc). Discards inform the storage that a region is
200 | # no longer in use. Storage that supports discards advertise the protocol
201 | # specific way discards should be issued by the kernel (TRIM, UNMAP, or
202 | # WRITE SAME with UNMAP bit set). Not all storage will support or benefit
203 | # from discards but SSDs and thinly provisioned LUNs generally do. If set
204 | # to 1, discards will only be issued if both the storage and kernel provide
205 | # support.
206 | # 1 enables; 0 disables.
207 | issue_discards = 0
208 | }
209 | allocation {
210 | # When searching for free space to extend an LV, the "cling"
211 | # allocation policy will choose space on the same PVs as the last
212 | # segment of the existing LV. If there is insufficient space and a
213 | # list of tags is defined here, it will check whether any of them are
214 | # attached to the PVs concerned and then seek to match those PV tags
215 | # between existing extents and new extents.
216 | # Use the special tag "@*" as a wildcard to match any PV tag.
217 |
218 | # Example: LVs are mirrored between two sites within a single VG.
219 | # PVs are tagged with either @site1 or @site2 to indicate where
220 | # they are situated.
221 | # cling_tag_list = [ "@site1", "@site2" ]
222 | # cling_tag_list = [ "@*" ]
223 | # Changes made in version 2.02.85 extended the reach of the 'cling'
224 | # policies to detect more situations where data can be grouped
225 | # onto the same disks. Set this to 0 to revert to the previous
226 | # algorithm.
227 | maximise_cling = 1
228 | # Whether to use blkid library instead of native LVM2 code to detect
229 | # any existing signatures while creating new Physical Volumes and
230 | # Logical Volumes. LVM2 needs to be compiled with blkid wiping support
231 | # for this setting to take effect.
232 | #
233 | # LVM2 native detection code is currently able to recognize these signatures:
234 | # - MD device signature
235 | # - swap signature
236 | # - LUKS signature
237 | # To see the list of signatures recognized by blkid, check the output
238 | # of 'blkid -k' command. The blkid can recognize more signatures than
239 | # LVM2 native detection code, but due to this higher number of signatures
240 | # to be recognized, it can take more time to complete the signature scan.
241 | use_blkid_wiping = 1
242 | # Set to 1 to wipe any signatures found on newly-created Logical Volumes
243 | # automatically in addition to zeroing of the first KB on the LV
244 | # (controlled by the -Z/--zero y option).
245 | # The command line option -W/--wipesignatures takes precedence over this
246 | # setting.
247 | # The default is to wipe signatures when zeroing.
248 | #
249 | wipe_signatures_when_zeroing_new_lvs = 1
250 | # Set to 1 to guarantee that mirror logs will always be placed on
251 | # different PVs from the mirror images. This was the default
252 | # until version 2.02.85.
253 | mirror_logs_require_separate_pvs = 0
254 | # Set to 1 to guarantee that cache_pool metadata will always be
255 | # placed on different PVs from the cache_pool data.
256 | cache_pool_metadata_require_separate_pvs = 0
257 | # Specify the minimal chunk size (in kiB) for cache pool volumes.
258 | # Using a chunk_size that is too large can result in wasteful use of
259 | # the cache, where small reads and writes can cause large sections of
260 | # an LV to be mapped into the cache. However, choosing a chunk_size
261 | # that is too small can result in more overhead trying to manage the
262 | # numerous chunks that become mapped into the cache. The former is
263 | # more of a problem than the latter in most cases, so we default to
264 | # a value that is on the smaller end of the spectrum. Supported values
265 | # range from 32(kiB) to 1048576 in multiples of 32.
266 | # cache_pool_chunk_size = 64
267 | # Specify the default cache mode used for new cache pools.
268 | # Possible options are:
269 | # "writethrough" - Data blocks are immediately written from
270 | # the cache to disk.
271 | # "writeback" - Data blocks are written from the cache
272 | # back to disk after some delay to improve
273 | # performance.
274 | # cache_pool_cachemode = "writethrough"
275 | # Set to 1 to guarantee that thin pool metadata will always
276 | # be placed on different PVs from the pool data.
277 | thin_pool_metadata_require_separate_pvs = 0
278 | # Specify chunk size calculation policy for thin pool volumes.
279 | # Possible options are:
280 | # "generic" - if thin_pool_chunk_size is defined, use it.
281 | # Otherwise, calculate the chunk size based on
282 | # estimation and device hints exposed in sysfs:
283 | # the minimum_io_size. The chunk size is always
284 | # at least 64KiB.
285 | #
286 | # "performance" - if thin_pool_chunk_size is defined, use it.
287 | # Otherwise, calculate the chunk size for
288 | # performance based on device hints exposed in
289 | # sysfs: the optimal_io_size. The chunk size is
290 | # always at least 512KiB.
291 | # thin_pool_chunk_size_policy = "generic"
292 | # Specify the minimal chunk size (in KB) for thin pool volumes.
293 | # Use of the larger chunk size may improve performance for plain
294 | # thin volumes, however using them for snapshot volumes is less efficient,
295 | # as it consumes more space and takes extra time for copying.
296 | # When unset, lvm tries to estimate chunk size starting from 64KB
297 | # Supported values are in range from 64 to 1048576.
298 | # thin_pool_chunk_size = 64
299 | # Specify discards behaviour of the thin pool volume.
300 | # Select one of "ignore", "nopassdown", "passdown"
301 | # thin_pool_discards = "passdown"
302 | # Set to 0, to disable zeroing of thin pool data chunks before their
303 | # first use.
304 | # N.B. zeroing larger thin pool chunk size degrades performance.
305 | # thin_pool_zero = 1
306 | # Default physical extent size to use for newly created VGs (in KB).
307 | # physical_extent_size = 4096
308 | }
309 | log {
310 | # Controls the messages sent to stdout or stderr.
311 | # There are three levels of verbosity, 3 being the most verbose.
312 | verbose = 0
313 | # Set to 1 to suppress all non-essential messages from stdout.
314 | # This has the same effect as -qq.
315 | # When this is set, the following commands still produce output:
316 | # dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
317 | # pvs, version, vgcfgrestore -l, vgdisplay, vgs.
318 | # Non-essential messages are shifted from log level 4 to log level 5
319 | # for syslog and lvm2_log_fn purposes.
320 | # Any 'yes' or 'no' questions not overridden by other arguments
321 | # are suppressed and default to 'no'.
322 | silent = 0
323 | # Should we send log messages through syslog?
324 | # 1 is yes; 0 is no.
325 | syslog = 1
326 | # Should we log error and debug messages to a file?
327 | # By default there is no log file.
328 | #file = "/var/log/lvm2.log"
329 | # Should we overwrite the log file each time the program is run?
330 | # By default we append.
331 | overwrite = 0
332 | # What level of log messages should we send to the log file and/or syslog?
333 | # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
334 | # 7 is the most verbose (LOG_DEBUG).
335 | level = 0
336 | # Format of output messages
337 | # Whether or not (1 or 0) to indent messages according to their severity
338 | indent = 1
339 | # Whether or not (1 or 0) to display the command name on each line output
340 | command_names = 0
341 | # A prefix to use before the message text (but after the command name,
342 | # if selected). Default is two spaces, so you can see/grep the severity
343 | # of each message.
344 | prefix = " "
345 | # To make the messages look similar to the original LVM tools use:
346 | # indent = 0
347 | # command_names = 1
348 | # prefix = " -- "
349 | # Set this if you want log messages during activation.
350 | # Don't use this in low memory situations (can deadlock).
351 | # activation = 0
352 | # Some debugging messages are assigned to a class and only appear
353 | # in debug output if the class is listed here.
354 | # Classes currently available:
355 | # memory, devices, activation, allocation, lvmetad, metadata, cache,
356 | # locking
357 | # Use "all" to see everything.
358 | debug_classes = [ "memory", "devices", "activation", "allocation",
359 | "lvmetad", "metadata", "cache", "locking" ]
360 | }
361 | backup {
362 | # Should we maintain a backup of the current metadata configuration ?
363 | # Use 1 for Yes; 0 for No.
364 | # Think very hard before turning this off!
365 | backup = 1
366 | # Where shall we keep it ?
367 | # Remember to back up this directory regularly!
368 | backup_dir = "/etc/lvm/backup"
369 | # Should we maintain an archive of old metadata configurations.
370 | # Use 1 for Yes; 0 for No.
371 | # On by default. Think very hard before turning this off.
372 | archive = 1
373 | # Where should archived files go ?
374 | # Remember to back up this directory regularly!
375 | archive_dir = "/etc/lvm/archive"
376 | # What is the minimum number of archive files you wish to keep ?
377 | retain_min = 10
378 | # What is the minimum time you wish to keep an archive file for ?
379 | retain_days = 30
380 | }
381 | shell {
382 | # Number of lines of history to store in ~/.lvm_history
383 | history_size = 100
384 | }
385 | global {
386 | # The file creation mask for any files and directories created.
387 | # Interpreted as octal if the first digit is zero.
388 | umask = 077
389 | # Allow other users to read the files
390 | #umask = 022
391 | # Enabling test mode means that no changes to the on disk metadata
392 | # will be made. Equivalent to having the -t option on every
393 | # command. Defaults to off.
394 | test = 0
395 | # Default value for --units argument
396 | units = "h"
397 | # Since version 2.02.54, the tools distinguish between powers of
398 | # 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
399 | # KB, MB, GB).
400 | # If you have scripts that depend on the old behaviour, set this to 0
401 | # temporarily until you update them.
402 | si_unit_consistency = 1
403 | # Whether or not to display unit suffix for sizes. This setting has
404 | # no effect if the units are in human-readable form (global/units="h")
405 | # in which case the suffix is always displayed.
406 | suffix = 1
407 | # Whether or not to communicate with the kernel device-mapper.
408 | # Set to 0 if you want to use the tools to manipulate LVM metadata
409 | # without activating any logical volumes.
410 | # If the device-mapper kernel driver is not present in your kernel
411 | # setting this to 0 should suppress the error messages.
412 | activation = 1
413 | # If we can't communicate with device-mapper, should we try running
414 | # the LVM1 tools?
415 | # This option only applies to 2.4 kernels and is provided to help you
416 | # switch between device-mapper kernels and LVM1 kernels.
417 | # The LVM1 tools need to be installed with .lvm1 suffices
418 | # e.g. vgscan.lvm1 and they will stop working after you start using
419 | # the new lvm2 on-disk metadata format.
420 | # The default value is set when the tools are built.
421 | # fallback_to_lvm1 = 0
422 | # The default metadata format that commands should use - "lvm1" or "lvm2".
423 | # The command line override is -M1 or -M2.
424 | # Defaults to "lvm2".
425 | # format = "lvm2"
426 | # Location of proc filesystem
427 | proc = "/proc"
428 | # Type of locking to use. Defaults to local file-based locking (1).
429 | # Turn locking off by setting to 0 (dangerous: risks metadata corruption
430 | # if LVM2 commands get run concurrently).
431 | # Type 2 uses the external shared library locking_library.
432 | # Type 3 uses built-in clustered locking.
433 | # Type 4 uses read-only locking which forbids any operations that might
434 | # change metadata.
435 | # Type 5 offers dummy locking for tools that do not need any locks.
436 | # You should not need to set this directly: the tools will select when
437 | # to use it instead of the configured locking_type. Do not use lvmetad or
438 | # the kernel device-mapper driver with this locking type.
439 | # It is used by the --readonly option that offers read-only access to
440 | # Volume Group metadata that cannot be locked safely because it belongs to
441 | # an inaccessible domain and might be in use, for example a virtual machine
442 | # image or a disk that is shared by a clustered machine.
443 | #
444 | # N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
445 | # supported in clustered environment. If use_lvmetad=1 and locking_type=3
446 | # is set at the same time, LVM always issues a warning message about this
447 | # and then it automatically disables lvmetad use.
448 | locking_type = 1
449 | # Set to 0 to fail when a lock request cannot be satisfied immediately.
450 | wait_for_locks = 1
451 | # If using external locking (type 2) and initialisation fails,
452 | # with this set to 1 an attempt will be made to use the built-in
453 | # clustered locking.
454 | # If you are using a customised locking_library you should set this to 0.
455 | fallback_to_clustered_locking = 1
456 | # If an attempt to initialise type 2 or type 3 locking failed, perhaps
457 | # because cluster components such as clvmd are not running, with this set
458 | # to 1 an attempt will be made to use local file-based locking (type 1).
459 | # If this succeeds, only commands against local volume groups will proceed.
460 | # Volume Groups marked as clustered will be ignored.
461 | fallback_to_local_locking = 1
462 | # Local non-LV directory that holds file-based locks while commands are
463 | # in progress. A directory like /tmp that may get wiped on reboot is OK.
464 | locking_dir = "/run/lock/lvm"
465 | # Whenever there are competing read-only and read-write access requests for
466 | # a volume group's metadata, instead of always granting the read-only
467 | # requests immediately, delay them to allow the read-write requests to be
468 | # serviced. Without this setting, write access may be stalled by a high
469 | # volume of read-only requests.
470 | # NB. This option only affects locking_type = 1 viz. local file-based
471 | # locking.
472 | prioritise_write_locks = 1
473 | # Other entries can go here to allow you to load shared libraries
474 | # e.g. if support for LVM1 metadata was compiled as a shared library use
475 | # format_libraries = "liblvm2format1.so"
476 | # Full pathnames can be given.
477 | # Search this directory first for shared libraries.
478 | # library_dir = "/lib"
479 | # The external locking library to load if locking_type is set to 2.
480 | # locking_library = "liblvm2clusterlock.so"
481 | # Treat any internal errors as fatal errors, aborting the process that
482 | # encountered the internal error. Please only enable for debugging.
483 | abort_on_internal_errors = 0
484 | # Check whether CRC is matching when parsed VG is used multiple times.
485 | # This is useful to catch unexpected internal cached volume group
486 | # structure modification. Please only enable for debugging.
487 | detect_internal_vg_cache_corruption = 0
488 | # If set to 1, no operations that change on-disk metadata will be permitted.
489 | # Additionally, read-only commands that encounter metadata in need of repair
490 | # will still be allowed to proceed exactly as if the repair had been
491 | # performed (except for the unchanged vg_seqno).
492 | # Inappropriate use could mess up your system, so seek advice first!
493 | metadata_read_only = 0
494 | # 'mirror_segtype_default' defines which segtype will be used when the
495 | # shorthand '-m' option is used for mirroring. The possible options are:
496 | #
497 | # "mirror" - The original RAID1 implementation provided by LVM2/DM. It is
498 | # characterized by a flexible log solution (core, disk, mirrored)
499 | # and by the necessity to block I/O while reconfiguring in the
500 | # event of a failure.
501 | #
502 | # There is an inherent race in the dmeventd failure handling
503 | # logic with snapshots of devices using this type of RAID1 that
504 | # in the worst case could cause a deadlock.
505 | # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=817130#c10
506 | #
507 | # "raid1" - This implementation leverages MD's RAID1 personality through
508 | # device-mapper. It is characterized by a lack of log options.
509 | # (A log is always allocated for every device and they are placed
510 | # on the same device as the image - no separate devices are
511 | # required.) This mirror implementation does not require I/O
512 | # to be blocked in the kernel in the event of a failure.
513 | # This mirror implementation is not cluster-aware and cannot be
514 | # used in a shared (active/active) fashion in a cluster.
515 | #
516 | # Specify the '--type ' option to override this default
517 | # setting.
518 | mirror_segtype_default = "raid1"
519 | # 'raid10_segtype_default' determines the segment types used by default
520 | # when the '--stripes/-i' and '--mirrors/-m' arguments are both specified
521 | # during the creation of a logical volume.
522 | # Possible settings include:
523 | #
524 | # "raid10" - This implementation leverages MD's RAID10 personality through
525 | # device-mapper.
526 | #
527 | # "mirror" - LVM will layer the 'mirror' and 'stripe' segment types. It
528 | # will do this by creating a mirror on top of striped sub-LVs;
529 | # effectively creating a RAID 0+1 array. This is suboptimal
530 | # in terms of providing redundancy and performance. Changing to
531 | # this setting is not advised.
532 | # Specify the '--type ' option to override this default
533 | # setting.
534 | raid10_segtype_default = "raid10"
535 | # 'sparse_segtype_default' defines which segtype will be used when the
536 | # shorthand '-V and -L' option is used for sparse volume creation.
537 | #
538 | # "snapshot" - The original snapshot implementation provided by LVM2/DM.
539 | # It is using old snashot that mixes data and metadata within
540 | # a single COW storage volume and has poor performs when
541 | # the size of stored data passes hundereds of MB.
542 | #
543 | # "thin" - Newer implementation leverages thin provisioning target.
544 | # It has bigger minimal chunk size (64KiB) and uses separate volume
545 | # for metadata. It has better performance especially in case of
546 | # bigger data uses. This device type has also full snapshot support.
547 | #
548 | # Specify the '--type ' option to override this default
549 | # setting.
550 | sparse_segtype_default = "thin"
551 | # The default format for displaying LV names in lvdisplay was changed
552 | # in version 2.02.89 to show the LV name and path separately.
553 | # Previously this was always shown as /dev/vgname/lvname even when that
554 | # was never a valid path in the /dev filesystem.
555 | # Set to 1 to reinstate the previous format.
556 | #
557 | # lvdisplay_shows_full_device_path = 0
558 | # Whether to use (trust) a running instance of lvmetad. If this is set to
559 | # 0, all commands fall back to the usual scanning mechanisms. When set to 1
560 | # *and* when lvmetad is running (automatically instantiated by making use of
561 | # systemd's socket-based service activation or run as an initscripts service
562 | # or run manually), the volume group metadata and PV state flags are obtained
563 | # from the lvmetad instance and no scanning is done by the individual
564 | # commands. In a setup with lvmetad, lvmetad udev rules *must* be set up for
565 | # LVM to work correctly. Without proper udev rules, all changes in block
566 | # device configuration will be *ignored* until a manual 'pvscan --cache'
567 | # is performed. These rules are installed by default.
568 | #
569 | # If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
570 | # before changing use_lvmetad to 1 and started again afterwards.
571 | #
572 | # If using lvmetad, volume activation is also switched to automatic
573 | # event-based mode. In this mode, the volumes are activated based on
574 | # incoming udev events that automatically inform lvmetad about new PVs that
575 | # appear in the system. Once a VG is complete (all the PVs are present), it
576 | # is auto-activated. The activation/auto_activation_volume_list setting
577 | # controls which volumes are auto-activated (all by default).
578 | # A note about device filtering while lvmetad is used:
579 | # When lvmetad is updated (either automatically based on udev events or
580 | # directly by a pvscan --cache call), devices/filter is ignored and
581 | # all devices are scanned by default -- lvmetad always keeps unfiltered
582 | # information which is then provided to LVM commands and then each LVM
583 | # command does the filtering based on devices/filter setting itself. This
584 | # does not apply to non-regexp filters though: component filters such as
585 | # multipath and MD are checked at pvscan --cache time.
586 | # In order to completely prevent LVM from scanning a device, even when using
587 | # lvmetad, devices/global_filter must be used.
588 | # N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
589 | # supported in clustered environment. If use_lvmetad=1 and locking_type=3
590 | # is set at the same time, LVM always issues a warning message about this
591 | # and then it automatically disables use_lvmetad.
592 | use_lvmetad = 1
593 | # Full path of the utility called to check that a thin metadata device
594 | # is in a state that allows it to be used.
595 | # Each time a thin pool needs to be activated or after it is deactivated
596 | # this utility is executed. The activation will only proceed if the utility
597 | # has an exit status of 0.
598 | # Set to "" to skip this check. (Not recommended.)
599 | # The thin tools are available as part of the device-mapper-persistent-data
600 | # package from https://github.com/jthornber/thin-provisioning-tools.
601 | #
602 | # thin_check_executable = "/usr/sbin/thin_check"
603 | # Array of string options passed with thin_check command. By default,
604 | # option "-q" is for quiet output.
605 | # With thin_check version 2.1 or newer you can add "--ignore-non-fatal-errors"
606 | # to let it pass through ignorable errors and fix them later.
607 | # With thin_check version 3.2 or newer you should add
608 | # "--clear-needs-check-flag".
609 | #
610 | # thin_check_options = [ "-q", "--clear-needs-check-flag" ]
611 | # Full path of the utility called to repair a thin metadata device
612 | # is in a state that allows it to be used.
613 | # Each time a thin pool needs repair this utility is executed.
614 | # See thin_check_executable how to obtain binaries.
615 | #
616 | # thin_repair_executable = "/usr/sbin/thin_repair"
617 | # Array of extra string options passed with thin_repair command.
618 | # thin_repair_options = [ "" ]
619 | # Full path of the utility called to dump thin metadata content.
620 | # See thin_check_executable how to obtain binaries.
621 | #
622 | # thin_dump_executable = "/usr/sbin/thin_dump"
623 | # If set, given features are not used by thin driver.
624 | # This can be helpful not just for testing, but i.e. allows to avoid
625 | # using problematic implementation of some thin feature.
626 | # Features:
627 | # block_size
628 | # discards
629 | # discards_non_power_2
630 | # external_origin
631 | # metadata_resize
632 | # external_origin_extend
633 | # error_if_no_space
634 | #
635 | # thin_disabled_features = [ "discards", "block_size" ]
636 | # Full path of the utility called to check that a cache metadata device
637 | # is in a state that allows it to be used.
638 | # Each time a cached LV needs to be used or after it is deactivated
639 | # this utility is executed. The activation will only proceed if the utility
640 | # has an exit status of 0.
641 | # Set to "" to skip this check. (Not recommended.)
642 | # The cache tools are available as part of the device-mapper-persistent-data
643 | # package from https://github.com/jthornber/thin-provisioning-tools.
644 | #
645 | # cache_check_executable = "/usr/sbin/cache_check"
646 | # Array of string options passed with cache_check command. By default,
647 | # option "-q" is for quiet output.
648 | #
649 | # cache_check_options = [ "-q" ]
650 | # Full path of the utility called to repair a cache metadata device.
651 | # Each time a cache metadata needs repair this utility is executed.
652 | # See cache_check_executable how to obtain binaries.
653 | #
654 | # cache_repair_executable = "/usr/sbin/cache_repair"
655 | # Array of extra string options passed with cache_repair command.
656 | # cache_repair_options = [ "" ]
657 | # Full path of the utility called to dump cache metadata content.
658 | # See cache_check_executable how to obtain binaries.
659 | #
660 | # cache_dump_executable = "/usr/sbin/cache_dump"
661 | }
662 | activation {
663 | # Set to 1 to perform internal checks on the operations issued to
664 | # libdevmapper. Useful for debugging problems with activation.
665 | # Some of the checks may be expensive, so it's best to use this
666 | # only when there seems to be a problem.
667 | checks = 0
668 | # Set to 0 to disable udev synchronisation (if compiled into the binaries).
669 | # Processes will not wait for notification from udev.
670 | # They will continue irrespective of any possible udev processing
671 | # in the background. You should only use this if udev is not running
672 | # or has rules that ignore the devices LVM2 creates.
673 | # The command line argument --nodevsync takes precedence over this setting.
674 | # If set to 1 when udev is not running, and there are LVM2 processes
675 | # waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
676 | udev_sync = 1
677 | # Set to 0 to disable the udev rules installed by LVM2 (if built with
678 | # --enable-udev_rules). LVM2 will then manage the /dev nodes and symlinks
679 | # for active logical volumes directly itself.
680 | # N.B. Manual intervention may be required if this setting is changed
681 | # while any logical volumes are active.
682 | udev_rules = 1
683 | # Set to 1 for LVM2 to verify operations performed by udev. This turns on
684 | # additional checks (and if necessary, repairs) on entries in the device
685 | # directory after udev has completed processing its events.
686 | # Useful for diagnosing problems with LVM2/udev interactions.
687 | verify_udev_operations = 0
688 | # If set to 1 and if deactivation of an LV fails, perhaps because
689 | # a process run from a quick udev rule temporarily opened the device,
690 | # retry the operation for a few seconds before failing.
691 | retry_deactivation = 1
692 | # How to fill in missing stripes if activating an incomplete volume.
693 | # Using "error" will make inaccessible parts of the device return
694 | # I/O errors on access. You can instead use a device path, in which
695 | # case, that device will be used to in place of missing stripes.
696 | # But note that using anything other than "error" with mirrored
697 | # or snapshotted volumes is likely to result in data corruption.
698 | missing_stripe_filler = "error"
699 | # The linear target is an optimised version of the striped target
700 | # that only handles a single stripe. Set this to 0 to disable this
701 | # optimisation and always use the striped target.
702 | use_linear_target = 1
703 | # How much stack (in KB) to reserve for use while devices suspended
704 | # Prior to version 2.02.89 this used to be set to 256KB
705 | reserved_stack = 64
706 | # How much memory (in KB) to reserve for use while devices suspended
707 | reserved_memory = 8192
708 | # Nice value used while devices suspended
709 | process_priority = -18
710 | # If volume_list is defined, each LV is only activated if there is a
711 | # match against the list.
712 | #
713 | # "vgname" and "vgname/lvname" are matched exactly.
714 | # "@tag" matches any tag set in the LV or VG.
715 | # "@*" matches if any tag defined on the host is also set in the LV or VG
716 | #
717 | # If any host tags exist but volume_list is not defined, a default
718 | # single-entry list containing "@*" is assumed.
719 | #
720 | # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
721 | # If auto_activation_volume_list is defined, each LV that is to be
722 | # activated with the autoactivation option (--activate ay/-a ay) is
723 | # first checked against the list. There are two scenarios in which
724 | # the autoactivation option is used:
725 | #
726 | # - automatic activation of volumes based on incoming PVs. If all the
727 | # PVs making up a VG are present in the system, the autoactivation
728 | # is triggered. This requires lvmetad (global/use_lvmetad=1) and udev
729 | # to be running. In this case, "pvscan --cache -aay" is called
730 | # automatically without any user intervention while processing
731 | # udev events. Please, make sure you define auto_activation_volume_list
732 | # properly so only the volumes you want and expect are autoactivated.
733 | #
734 | # - direct activation on command line with the autoactivation option.
735 | # In this case, the user calls "vgchange --activate ay/-a ay" or
736 | # "lvchange --activate ay/-a ay" directly.
737 | #
738 | # By default, the auto_activation_volume_list is not defined and all
739 | # volumes will be activated either automatically or by using --activate ay/-a ay.
740 | #
741 | # N.B. The "activation/volume_list" is still honoured in all cases so even
742 | # if the VG/LV passes the auto_activation_volume_list, it still needs to
743 | # pass the volume_list for it to be activated in the end.
744 | # If auto_activation_volume_list is defined but empty, no volumes will be
745 | # activated automatically and --activate ay/-a ay will do nothing.
746 | #
747 | # auto_activation_volume_list = []
748 | # If auto_activation_volume_list is defined and it's not empty, only matching
749 | # volumes will be activated either automatically or by using --activate ay/-a ay.
750 | #
751 | # "vgname" and "vgname/lvname" are matched exactly.
752 | # "@tag" matches any tag set in the LV or VG.
753 | # "@*" matches if any tag defined on the host is also set in the LV or VG
754 | #
755 | # auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
756 | # If read_only_volume_list is defined, each LV that is to be activated
757 | # is checked against the list, and if it matches, it as activated
758 | # in read-only mode. (This overrides '--permission rw' stored in the
759 | # metadata.)
760 | #
761 | # "vgname" and "vgname/lvname" are matched exactly.
762 | # "@tag" matches any tag set in the LV or VG.
763 | # "@*" matches if any tag defined on the host is also set in the LV or VG
764 | #
765 | # read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
766 | # Each LV can have an 'activation skip' flag stored persistently against it.
767 | # During activation, this flag is used to decide whether such an LV is skipped.
768 | # The 'activation skip' flag can be set during LV creation and by default it
769 | # is automatically set for thin snapshot LVs. The 'auto_set_activation_skip'
770 | # enables or disables this automatic setting of the flag while LVs are created.
771 | # auto_set_activation_skip = 1
772 | # Control error behavior when provisioned device becomes full.
773 | # Set to 1 to instant error when there is missing free space in device.
774 | # error_when_full = 0
775 | # For RAID or 'mirror' segment types, 'raid_region_size' is the
776 | # size (in KiB) of each:
777 | # - synchronization operation when initializing
778 | # - each copy operation when performing a 'pvmove' (using 'mirror' segtype)
779 | # This setting has replaced 'mirror_region_size' since version 2.02.99
780 | raid_region_size = 512
781 | # Setting to use when there is no readahead value stored in the metadata.
782 | #
783 | # "none" - Disable readahead.
784 | # "auto" - Use default value chosen by kernel.
785 | readahead = "auto"
786 | # 'raid_fault_policy' defines how a device failure in a RAID logical
787 | # volume is handled. This includes logical volumes that have the following
788 | # segment types: raid1, raid4, raid5*, and raid6*.
789 | #
790 | # In the event of a failure, the following policies will determine what
791 | # actions are performed during the automated response to failures (when
792 | # dmeventd is monitoring the RAID logical volume) and when 'lvconvert' is
793 | # called manually with the options '--repair' and '--use-policies'.
794 | #
795 | # "warn" - Use the system log to warn the user that a device in the RAID
796 | # logical volume has failed. It is left to the user to run
797 | # 'lvconvert --repair' manually to remove or replace the failed
798 | # device. As long as the number of failed devices does not
799 | # exceed the redundancy of the logical volume (1 device for
800 | # raid4/5, 2 for raid6, etc) the logical volume will remain
801 | # usable.
802 | #
803 | # "allocate" - Attempt to use any extra physical volumes in the volume
804 | # group as spares and replace faulty devices.
805 | #
806 | raid_fault_policy = "warn"
807 | # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
808 | # how a device failure affecting a mirror (of "mirror" segment type) is
809 | # handled. A mirror is composed of mirror images (copies) and a log.
810 | # A disk log ensures that a mirror does not need to be re-synced
811 | # (all copies made the same) every time a machine reboots or crashes.
812 | #
813 | # In the event of a failure, the specified policy will be used to determine
814 | # what happens. This applies to automatic repairs (when the mirror is being
815 | # monitored by dmeventd) and to manual lvconvert --repair when
816 | # --use-policies is given.
817 | #
818 | # "remove" - Simply remove the faulty device and run without it. If
819 | # the log device fails, the mirror would convert to using
820 | # an in-memory log. This means the mirror will not
821 | # remember its sync status across crashes/reboots and
822 | # the entire mirror will be re-synced. If a
823 | # mirror image fails, the mirror will convert to a
824 | # non-mirrored device if there is only one remaining good
825 | # copy.
826 | #
827 | # "allocate" - Remove the faulty device and try to allocate space on
828 | # a new device to be a replacement for the failed device.
829 | # Using this policy for the log is fast and maintains the
830 | # ability to remember sync state through crashes/reboots.
831 | # Using this policy for a mirror device is slow, as it
832 | # requires the mirror to resynchronize the devices, but it
833 | # will preserve the mirror characteristic of the device.
834 | # This policy acts like "remove" if no suitable device and
835 | # space can be allocated for the replacement.
836 | #
837 | # "allocate_anywhere" - Not yet implemented. Useful to place the log device
838 | # temporarily on same physical volume as one of the mirror
839 | # images. This policy is not recommended for mirror devices
840 | # since it would break the redundant nature of the mirror. This
841 | # policy acts like "remove" if no suitable device and space can
842 | # be allocated for the replacement.
843 | mirror_log_fault_policy = "allocate"
844 | mirror_image_fault_policy = "remove"
845 | # 'snapshot_autoextend_threshold' and 'snapshot_autoextend_percent' define
846 | # how to handle automatic snapshot extension. The former defines when the
847 | # snapshot should be extended: when its space usage exceeds this many
848 | # percent. The latter defines how much extra space should be allocated for
849 | # the snapshot, in percent of its current size.
850 | #
851 | # For example, if you set snapshot_autoextend_threshold to 70 and
852 | # snapshot_autoextend_percent to 20, whenever a snapshot exceeds 70% usage,
853 | # it will be extended by another 20%. For a 1G snapshot, using up 700M will
854 | # trigger a resize to 1.2G. When the usage exceeds 840M, the snapshot will
855 | # be extended to 1.44G, and so on.
856 | #
857 | # Setting snapshot_autoextend_threshold to 100 disables automatic
858 | # extensions. The minimum value is 50 (A setting below 50 will be treated
859 | # as 50).
860 | snapshot_autoextend_threshold = 100
861 | snapshot_autoextend_percent = 20
862 | # 'thin_pool_autoextend_threshold' and 'thin_pool_autoextend_percent' define
863 | # how to handle automatic pool extension. The former defines when the
864 | # pool should be extended: when its space usage exceeds this many
865 | # percent. The latter defines how much extra space should be allocated for
866 | # the pool, in percent of its current size.
867 | #
868 | # For example, if you set thin_pool_autoextend_threshold to 70 and
869 | # thin_pool_autoextend_percent to 20, whenever a pool exceeds 70% usage,
870 | # it will be extended by another 20%. For a 1G pool, using up 700M will
871 | # trigger a resize to 1.2G. When the usage exceeds 840M, the pool will
872 | # be extended to 1.44G, and so on.
873 | #
874 | # Setting thin_pool_autoextend_threshold to 100 disables automatic
875 | # extensions. The minimum value is 50 (A setting below 50 will be treated
876 | # as 50).
877 | thin_pool_autoextend_threshold = 100
878 | thin_pool_autoextend_percent = 20
879 | # While activating devices, I/O to devices being (re)configured is
880 | # suspended, and as a precaution against deadlocks, LVM2 needs to pin
881 | # any memory it is using so it is not paged out. Groups of pages that
882 | # are known not to be accessed during activation need not be pinned
883 | # into memory. Each string listed in this setting is compared against
884 | # each line in /proc/self/maps, and the pages corresponding to any
885 | # lines that match are not pinned. On some systems locale-archive was
886 | # found to make up over 80% of the memory used by the process.
887 | # mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
888 | # Set to 1 to revert to the default behaviour prior to version 2.02.62
889 | # which used mlockall() to pin the whole process's memory while activating
890 | # devices.
891 | use_mlockall = 0
892 | # Monitoring is enabled by default when activating logical volumes.
893 | # Set to 0 to disable monitoring or use the --ignoremonitoring option.
894 | monitoring = 1
895 | # When pvmove or lvconvert must wait for the kernel to finish
896 | # synchronising or merging data, they check and report progress
897 | # at intervals of this number of seconds. The default is 15 seconds.
898 | # If this is set to 0 and there is only one thing to wait for, there
899 | # are no progress reports, but the process is awoken immediately the
900 | # operation is complete.
901 | polling_interval = 15
902 | # 'activation_mode' determines how Logical Volumes are activated if
903 | # any devices are missing. Possible settings are:
904 | #
905 | # "complete" - Only allow activation of an LV if all of the Physical
906 | # Volumes it uses are present. Other PVs in the Volume
907 | # Group may be missing.
908 | #
909 | # "degraded" - Like "complete", but additionally RAID Logical Volumes of
910 | # segment type raid1, raid4, raid5, radid6 and raid10 will
911 | # be activated if there is no data loss, i.e. they have
912 | # sufficient redundancy to present the entire addressable
913 | # range of the Logical Volume.
914 | #
915 | # "partial" - Allows the activation of any Logical Volume even if
916 | # a missing or failed PV could cause data loss with a
917 | # portion of the Logical Volume inaccessible.
918 | # This setting should not normally be used, but may
919 | # sometimes assist with data recovery.
920 | #
921 | # This setting was introduced in LVM version 2.02.108. It corresponds
922 | # with the '--activationmode' option for lvchange and vgchange.
923 | activation_mode = "degraded"
924 | }
925 | # If compact output is enabled, fields which don't have value
926 | # set for any of the rows reported are skipped on output. Compact
927 | # output is applicable only if report is buffered (report/buffered=1).
928 | # compact_output=0
929 | # Align columns on report output.
930 | # aligned=1
931 | # When buffered reporting is used, the report's content is appended
932 | # incrementally to include each object being reported until the report
933 | # is flushed to output which normally happens at the end of command
934 | # execution. Otherwise, if buffering is not used, each object is
935 | # reported as soon as its processing is finished.
936 | # buffered=1
937 | # Show headings for columns on report.
938 | # headings=1
939 | # A separator to use on report after each field.
940 | # separator=" "
941 | # A separator to use for list items when reported.
942 | # list_item_separator=","
943 | # Use a field name prefix for each field reported.
944 | # prefixes=0
945 | # Quote field values when using field name prefixes.
946 | # quoted=1
947 | # Output each column as a row. If set, this also implies report/prefixes=1.
948 | # colums_as_rows=0
949 | # Use binary values "0" or "1" instead of descriptive literal values for
950 | # columns that have exactly two valid values to report (not counting the
951 | # "unknown" value which denotes that the value could not be determined).
952 | #
953 | # binary_values_as_numeric = 0
954 | # Comma separated list of columns to sort by when reporting 'lvm devtypes' command.
955 | # See 'lvm devtypes -o help' for the list of possible fields.
956 | # devtypes_sort="devtype_name"
957 | # Comma separated list of columns to report for 'lvm devtypes' command.
958 | # See 'lvm devtypes -o help' for the list of possible fields.
959 | # devtypes_cols="devtype_name,devtype_max_partitions,devtype_description"
960 | # Comma separated list of columns to report for 'lvm devtypes' command in verbose mode.
961 | # See 'lvm devtypes -o help' for the list of possible fields.
962 | # devtypes_cols_verbose="devtype_name,devtype_max_partitions,devtype_description"
963 | # Comma separated list of columns to sort by when reporting 'lvs' command.
964 | # See 'lvs -o help' for the list of possible fields.
965 | # lvs_sort="vg_name,lv_name"
966 | # Comma separated list of columns to report for 'lvs' command.
967 | # See 'lvs -o help' for the list of possible fields.
968 | # lvs_cols="lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv"
969 | # Comma separated list of columns to report for 'lvs' command in verbose mode.
970 | # See 'lvs -o help' for the list of possible fields.
971 | # lvs_cols_verbose="lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert
972 | # Comma separated list of columns to sort by when reporting 'vgs' command.
973 | # See 'vgs -o help' for the list of possible fields.
974 | # vgs_sort="vg_name"
975 | # Comma separated list of columns to report for 'vgs' command.
976 | # See 'vgs -o help' for the list of possible fields.
977 | # vgs_cols="vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"
978 | # Comma separated list of columns to report for 'vgs' command in verbose mode.
979 | # See 'vgs -o help' for the list of possible fields.
980 | # vgs_cols_verbose="vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile"
981 | # Comma separated list of columns to sort by when reporting 'pvs' command.
982 | # See 'pvs -o help' for the list of possible fields.
983 | # pvs_sort="pv_name"
984 | # Comma separated list of columns to report for 'pvs' command.
985 | # See 'pvs -o help' for the list of possible fields.
986 | # pvs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"
987 | # Comma separated list of columns to report for 'pvs' command in verbose mode.
988 | # See 'pvs -o help' for the list of possible fields.
989 | # pvs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid"
990 | # Comma separated list of columns to sort by when reporting 'lvs --segments' command.
991 | # See 'lvs --segments -o help' for the list of possible fields.
992 | # segs_sort="vg_name,lv_name,seg_start"
993 | # Comma separated list of columns to report for 'lvs --segments' command.
994 | # See 'lvs --segments -o help' for the list of possible fields.
995 | # segs_cols="lv_name,vg_name,lv_attr,stripes,segtype,seg_size"
996 | # Comma separated list of columns to report for 'lvs --segments' command in verbose mode.
997 | # See 'lvs --segments -o help' for the list of possible fields.
998 | # segs_cols_verbose="lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize"
999 | # Comma separated list of columns to sort by when reporting 'pvs --segments' command.
1000 | # See 'pvs --segments -o help' for the list of possible fields.
1001 | # pvsegs_sort="pv_name,pvseg_start"
1002 | # Comma separated list of columns to sort by when reporting 'pvs --segments' command.
1003 | # See 'pvs --segments -o help' for the list of possible fields.
1004 | # pvsegs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size"
1005 | # Comma separated list of columns to sort by when reporting 'pvs --segments' command in verbose mode.
1006 | # See 'pvs --segments -o help' for the list of possible fields.
1007 | # pvsegs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges"
1008 | # Default number of copies of metadata to hold on each PV. 0, 1 or 2.
1009 | # You might want to override it from the command line with 0
1010 | # when running pvcreate on new PVs which are to be added to large VGs.
1011 | # pvmetadatacopies = 1
1012 | # Default number of copies of metadata to maintain for each VG.
1013 | # If set to a non-zero value, LVM automatically chooses which of
1014 | # the available metadata areas to use to achieve the requested
1015 | # number of copies of the VG metadata. If you set a value larger
1016 | # than the the total number of metadata areas available then
1017 | # metadata is stored in them all.
1018 | # The default value of 0 ("unmanaged") disables this automatic
1019 | # management and allows you to control which metadata areas
1020 | # are used at the individual PV level using 'pvchange
1021 | # --metadataignore y/n'.
1022 | # vgmetadatacopies = 0
1023 | # Approximate default size of on-disk metadata areas in sectors.
1024 | # You should increase this if you have large volume groups or
1025 | # you want to retain a large on-disk history of your metadata changes.
1026 | # pvmetadatasize = 255
1027 | # List of directories holding live copies of text format metadata.
1028 | # These directories must not be on logical volumes!
1029 | # It's possible to use LVM2 with a couple of directories here,
1030 | # preferably on different (non-LV) filesystems, and with no other
1031 | # on-disk metadata (pvmetadatacopies = 0). Or this can be in
1032 | # addition to on-disk metadata areas.
1033 | # The feature was originally added to simplify testing and is not
1034 | # supported under low memory situations - the machine could lock up.
1035 | #
1036 | # Never edit any files in these directories by hand unless you
1037 | # you are absolutely sure you know what you are doing! Use
1038 | # the supplied toolset to make changes (e.g. vgcfgrestore).
1039 | # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
1040 | dmeventd {
1041 | # mirror_library is the library used when monitoring a mirror device.
1042 | #
1043 | # "libdevmapper-event-lvm2mirror.so" attempts to recover from
1044 | # failures. It removes failed devices from a volume group and
1045 | # reconfigures a mirror as necessary. If no mirror library is
1046 | # provided, mirrors are not monitored through dmeventd.
1047 | mirror_library = "libdevmapper-event-lvm2mirror.so"
1048 | # snapshot_library is the library used when monitoring a snapshot device.
1049 | #
1050 | # "libdevmapper-event-lvm2snapshot.so" monitors the filling of
1051 | # snapshots and emits a warning through syslog when the use of
1052 | # the snapshot exceeds 80%. The warning is repeated when 85%, 90% and
1053 | # 95% of the snapshot is filled.
1054 | snapshot_library = "libdevmapper-event-lvm2snapshot.so"
1055 | # thin_library is the library used when monitoring a thin device.
1056 | #
1057 | # "libdevmapper-event-lvm2thin.so" monitors the filling of
1058 | # pool and emits a warning through syslog when the use of
1059 | # the pool exceeds 80%. The warning is repeated when 85%, 90% and
1060 | # 95% of the pool is filled.
1061 | thin_library = "libdevmapper-event-lvm2thin.so"
1062 | # Full path of the dmeventd binary.
1063 | #
1064 | # executable = "/usr/sbin/dmeventd"
1065 | }
1066 |
--------------------------------------------------------------------------------