├── ansible.cfg
├── vars
└── main.yml
├── test.yml
├── tasks
├── security.yml
├── ceph.yml
├── horizon.yml
├── service_create.yml
├── create_service_user.yml
├── firewall.yml
├── main.yml
├── endpoint_create.yml
├── database.yml
├── glance.yml
├── nova.yml
├── cinder.yml
├── neutron.yml
├── ha.yml
└── keystone.yml
├── templates
├── nofile-limits.conf.j2
├── wsgi-keystone.conf.j2
├── corosync.conf.j2
├── linuxbridge_agent.ini.j2
├── metadata_agent.ini.j2
├── dhcp_agent.ini.j2
├── ml2_conf.ini.j2
├── haproxy.cfg.j2
├── horizon_local_settings.j2
├── neutron.conf.j2
└── glance-registry.conf.j2
├── meta
└── main.yml
├── README.md
├── handlers
└── main.yml
└── defaults
└── main.yml
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | roles_path = ../
3 |
--------------------------------------------------------------------------------
/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for ansible-role-openstack-controller
3 |
--------------------------------------------------------------------------------
/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # test file
3 |
4 | - name: Install OpenStack Controller.
5 | hosts: controller
6 | roles:
7 | - role: ansible-role-openstack-controller
8 |
9 |
--------------------------------------------------------------------------------
/tasks/security.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for security
3 |
4 | - name: Disable the temporary authentication token mechanism.
5 | shell: |
6 | sed -i 's/token_auth admin_token_auth/token_auth/g' /usr/share/keystone/keystone-dist-paste.ini
--------------------------------------------------------------------------------
/templates/nofile-limits.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | # Raising open file limit for OpenStack services
3 | * soft nofile {{ openstack_soft_nofile_limits }}
4 | * hard nofile {{ openstack_hard_nofile_limits }}
5 |
--------------------------------------------------------------------------------
/tasks/ceph.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy openstack ceph key.
3 | copy: src=fetch/{{ ceph_fsid }}/{{ groups['ceph-mon'][0] }}/etc/ceph/ceph.client.{{ item.name }}.keyring dest=/etc/ceph/ceph.client.{{ item.name }}.keyring owner={{ item.owner }} group={{ item.owner }} mode=600
4 | with_items: openstack_keys
--------------------------------------------------------------------------------
/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: z@zstack.net
4 | description: openstack controller configure
5 | company:
6 | license: license (BSD, MIT)
7 | min_ansible_version: 1.8
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 7
12 | categories:
13 | - cloud
14 | - system
15 | dependencies: []
16 |
--------------------------------------------------------------------------------
/tasks/horizon.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for horizon
3 |
4 | - name: Install OpenStack horizon packages.
5 | yum: name={{ item }} state=installed
6 | with_items:
7 | - openstack-dashboard
8 |
9 | - name: Copy the configuration files for horizon.
10 | template: src=horizon_local_settings.j2 dest=/etc/openstack-dashboard/local_settings
11 | notify: restart horizon
12 |
--------------------------------------------------------------------------------
/tasks/service_create.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # - name:
3 | # include: service_create.yml
4 | # vars:
5 | # name:
6 | # type:
7 | # description:
8 |
9 | - name: 'Create keystone service "{{ name }}"'
10 | shell: |
11 | openstack service create --name {{ name }} --description "{{ description }}" {{ type }}
12 |
13 | environment: token_auth_env
14 | run_once: True
15 |
16 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Role Name: openstack-controller
2 |
3 | A Ansible Role for openstack controller configure.
4 |
5 | ## Requirements
6 |
7 | None.
8 |
9 | ## Role Variables
10 |
11 | `defaults/main.yml`
12 |
13 | - see `defaults/main.yml`
14 |
15 | ## Dependencies
16 |
17 | None.
18 |
19 | ## Example Playbook
20 |
21 | ## License
22 |
23 | MIT / BSD
24 |
25 | ## Author Information
26 |
27 | z@zstack.net
28 |
--------------------------------------------------------------------------------
/tasks/create_service_user.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # - include: create-service-user.yml
3 | # vars:
4 | # username: glance
5 | # password: "{{ openstack_glance_keystone_password }}"
6 |
7 | - name: 'Ensure the "{{ username }}" user exists'
8 | keystone_user: >
9 | user={{ username }}
10 | password={{ password }}
11 | tenant=service
12 | token={{ openstack_admin_token }}
13 | endpoint={{ openstack_endpoint_admin_url }}
14 | run_once: True
15 |
16 | - name: 'Ensure the "admin" role exists'
17 | keystone_user: >
18 | role=admin
19 | user={{ username }}
20 | tenant=service
21 | token={{ openstack_admin_token }}
22 | endpoint={{ openstack_endpoint_admin_url }}
23 | run_once: True
24 |
25 |
--------------------------------------------------------------------------------
/tasks/firewall.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for firewall
3 |
4 | - name: Configure firewall.
5 | shell: |
6 | iptables -I INPUT 5 -p tcp -m multiport --ports 5000 -m comment --comment "openstack keystone" -j ACCEPT
7 | iptables -I INPUT 5 -p tcp -m multiport --ports 9292,9191 -m comment --comment "openstack glance" -j ACCEPT
8 | iptables -I INPUT 5 -p tcp -m multiport --ports 8773,8774,8775,6080 -m comment --comment "openstack nova " -j ACCEPT
9 | iptables -I INPUT 5 -p tcp -m multiport --ports 8776 -m comment --comment "openstack cinder " -j ACCEPT
10 | iptables -I INPUT 5 -p tcp -m multiport --ports 9696 -m comment --comment "openstack neutron " -j ACCEPT
11 | iptables-save > /etc/sysconfig/iptables
12 | tags: firewall
--------------------------------------------------------------------------------
/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for ansible-role-openstack-controller
3 |
4 | - name: Configure open file limit for OpenStack services.
5 | template: src=nofile-limits.conf.j2 dest=/etc/security/limits.d/30-nofile.conf
6 | tags: limits
7 |
8 | # ha
9 | - include: ha.yml
10 | when: openstack_ha
11 |
12 | # Prepare Database
13 | #- include: database.yml
14 |
15 | # keystone
16 | - include: keystone.yml
17 |
18 | # glance
19 | - include: glance.yml
20 |
21 | # nova
22 | - include: nova.yml
23 |
24 | # neutron
25 | - include: neutron.yml
26 |
27 | # horizon
28 | - include: horizon.yml
29 |
30 | # cinder
31 | - include: cinder.yml
32 |
33 | # ceph
34 | - include: ceph.yml
35 | when: openstack_ceph_enable
36 |
37 | # firewall
38 | - include: firewall.yml
39 |
--------------------------------------------------------------------------------
/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for ansible-role-openstack-controller
3 | - name: restart pacemaker
4 | service: name=pacemaker state=restarted
5 |
6 | - name: restart httpd
7 | service: name=httpd state=restarted
8 |
9 | - name: restart haproxy
10 | service: name=haproxy state=restarted
11 | tags: haproxy
12 |
13 | - name: restart glance-api
14 | service: name=openstack-glance-api state=restarted
15 |
16 | - name: restart glance-registry
17 | service: name=openstack-glance-registry state=restarted
18 |
19 | - name: restart nova
20 | service: name={{ item }} state=restarted
21 | with_items:
22 | - openstack-nova-api
23 |
24 | - name: restart horizon
25 | service: name={{ item }} state=restarted
26 | with_items:
27 | - httpd
28 |
29 | - name: restart cinder
30 | service: name={{ item }} state=restarted
31 | with_items:
32 | - openstack-cinder-api
33 | - openstack-cinder-scheduler
34 |
35 | - name: restart cinder volume
36 | service: name={{ item }} state=restarted
37 | with_items:
38 | - openstack-cinder-volume
39 |
--------------------------------------------------------------------------------
/tasks/endpoint_create.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # - name: Create API endpoint for network service
3 | # include: endpoint_create.yml
4 | # vars:
5 | # service: identity
6 | # region: RegionOne
7 | # endpoints:
8 | # adminurl: {{ openstack_endpoint_admin_url }}
9 | # internalurl: {{ openstack_endpoint_internal_url }}
10 | # publicurl: {{ openstack_endpoint_public_url }}
11 |
12 |
13 | - name: "Check if API endpoint exists for service {{ service }}"
14 | shell: "openstack endpoint list --service {{ service }}"
15 | register: check_endpoint_result
16 | environment: token_auth_env
17 | ignore_errors: True
18 | run_once: True
19 |
20 | - name: "Specify API endpoints for {{ service }} service"
21 | when: check_endpoint_result.rc == 0
22 | shell: |
23 | openstack endpoint create --region {{ region }} {{ service }} public {{ endpoints.publicurl }}
24 | openstack endpoint create --region {{ region }} {{ service }} internal {{ endpoints.internalurl }}
25 | openstack endpoint create --region {{ region }} {{ service }} admin {{ endpoints.adminurl }}
26 | environment: token_auth_env
27 | run_once: True
28 |
--------------------------------------------------------------------------------
/templates/wsgi-keystone.conf.j2:
--------------------------------------------------------------------------------
1 | Listen {{ mgmt_ip }}:5000
2 | Listen {{ mgmt_ip }}:35357
3 |
4 |
5 | WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
6 | WSGIProcessGroup keystone-public
7 | WSGIScriptAlias / /usr/bin/keystone-wsgi-public
8 | WSGIApplicationGroup %{GLOBAL}
9 | WSGIPassAuthorization On
10 | = 2.4>
11 | ErrorLogFormat "%{cu}t %M"
12 |
13 | ErrorLog /var/log/httpd/keystone-error.log
14 | CustomLog /var/log/httpd/keystone-access.log combined
15 |
16 |
17 | = 2.4>
18 | Require all granted
19 |
20 |
21 | Order allow,deny
22 | Allow from all
23 |
24 |
25 |
26 |
27 |
28 | WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
29 | WSGIProcessGroup keystone-admin
30 | WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
31 | WSGIApplicationGroup %{GLOBAL}
32 | WSGIPassAuthorization On
33 | = 2.4>
34 | ErrorLogFormat "%{cu}t %M"
35 |
36 | ErrorLog /var/log/httpd/keystone-error.log
37 | CustomLog /var/log/httpd/keystone-access.log combined
38 |
39 |
40 | = 2.4>
41 | Require all granted
42 |
43 |
44 | Order allow,deny
45 | Allow from all
46 |
47 |
48 |
--------------------------------------------------------------------------------
/tasks/database.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for database
3 |
4 | - name: 'Create Database for openstack "{{ db_name }}"'
5 | mysql_db:
6 | name: "{{ db_name }}"
7 | collation: "{{ db_collation | default('utf8_general_ci') }}"
8 | encoding: "{{ db_encoding | default('utf8') }}"
9 | login_host: "{{ openstack_database_host }}"
10 | login_user: "{{ openstack_database_root | default('root') }}"
11 | login_password: "{{ openstack_database_root_password }}"
12 | state: present
13 | run_once: True
14 |
15 | - name: 'Setup Database User for openstack "{{ db_name }}"'
16 | mysql_user:
17 | name: "{{ db_user }}"
18 | password: "{{ db_password }}"
19 | host: "{{ item }}"
20 | priv: "{{ db_priv | default('*.*:ALL') }}"
21 | login_host: "{{ openstack_database_host }}"
22 | login_user: "{{ openstack_database_root | default('root') }}"
23 | login_password: "{{ openstack_database_root_password }}"
24 | state: present
25 | with_items:
26 | - localhost
27 | - "%"
28 | run_once: True
29 |
30 | # - name: Setup DB for openstack.
31 | # mysql_db:
32 | # login_host: "{{ openstack_database_host }}"
33 | # login_user: "{{ openstack_database_root}}"
34 | # login_password: "{{ openstack_database_root_password }}"
35 | # name: "{{ item.name }}"
36 | # collation: "{{ item.collation | default('utf8_general_ci') }}"
37 | # encoding: "{{ item.encoding | default('utf8') }}"
38 | # state: present
39 | # with_items:
40 | # - "{{ openstack_databases }}"
41 | #
42 | # - name: Setup DB User for openstack.
43 | # mysql_user:
44 | # login_host: "{{ openstack_database_host }}"
45 | # login_user: "{{ openstack_database_root}}"
46 | # login_password: "{{ openstack_database_root_password }}"
47 | # name: "{{ item.name }}"
48 | # host: "{{ item.host | default('localhost') }}"
49 | # password: "{{ item.password }}"
50 | # priv: "{{ item.priv | default('*.*:ALL') }}"
51 | # state: present
52 | # with_items:
53 | # - "{{ openstack_databases_users }}"
54 |
--------------------------------------------------------------------------------
/tasks/glance.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for glance
3 |
4 | # Prerequisites
5 | - name: Prepare Database for glance.
6 | include: database.yml
7 | vars:
8 | db_name: "{{ openstack_glance_db_name }}"
9 | db_user: "{{ openstack_glance_db_user }}"
10 | db_password: "{{ openstack_glance_db_password }}"
11 | db_priv: "{{ openstack_glance_db_name }}.*:ALL"
12 | tags: database
13 |
14 | - name: Create the service user for glance.
15 | include: create_service_user.yml
16 | vars:
17 | username: glance
18 | password: "{{ openstack_glance_keystone_password }}"
19 |
20 | - name: Create the service entity for glance.
21 | include: service_create.yml
22 | vars:
23 | name: glance
24 | type: image
25 | description: OpenStack Image service
26 |
27 | - name: Create the API endpoint for glance.
28 | include: endpoint_create.yml
29 | vars:
30 | service: image
31 | region: RegionOne
32 | endpoints:
33 | adminurl: "{{ openstack_glance_adminurl }}"
34 | internalurl: "{{ openstack_glance_internalurl }}"
35 | publicurl: "{{ openstack_glance_publicurl }}"
36 |
37 | # Install and configure
38 | - name: Install OpenStack glance packages.
39 | yum: name={{ item }} state=installed
40 | with_items:
41 | - openstack-glance
42 | - python-glance
43 | - python-glanceclient
44 |
45 | - name: Copy the configuration files for glance-api.
46 | template: src=glance-api.conf.j2 dest=/etc/glance/glance-api.conf
47 | notify: restart glance-api
48 |
49 | - name: Copy the configuration files for glance-registry.
50 | template: src=glance-registry.conf.j2 dest=/etc/glance/glance-registry.conf
51 | notify: restart glance-registry
52 |
53 | - name: DB sync for glance.
54 | shell: su -s /bin/sh -c "glance-manage db_sync" glance
55 | ignore_errors: True
56 | run_once: True
57 |
58 | - name: Ensure images service is started and enabled on boot
59 | service: name={{ item }} enabled=yes state=started
60 | with_items:
61 | - openstack-glance-api
62 | - openstack-glance-registry
63 |
--------------------------------------------------------------------------------
/tasks/nova.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for nova
3 |
4 | # Prerequisites
5 | - name: Prepare Database for nova.
6 | include: database.yml
7 | vars:
8 | db_name: "{{ openstack_nova_db_name }}"
9 | db_user: "{{ openstack_nova_db_user }}"
10 | db_password: "{{ openstack_nova_db_password }}"
11 | tags: database
12 |
13 | - name: Create the service user for nova.
14 | include: create_service_user.yml
15 | vars:
16 | username: nova
17 | password: "{{ openstack_nova_keystone_password }}"
18 |
19 | - name: Create the service entity for nova.
20 | include: service_create.yml
21 | vars:
22 | name: nova
23 | type: compute
24 | description: OpenStack Compute
25 |
26 | - name: Create the API endpoint for nova.
27 | include: endpoint_create.yml
28 | vars:
29 | service: compute
30 | region: RegionOne
31 | endpoints:
32 | adminurl: "{{ openstack_nova_adminurl }}"
33 | internalurl: "{{ openstack_nova_intelnalurl }}"
34 | publicurl: "{{ openstack_nova_publicurl }}"
35 |
36 | # Install and configure
37 | - name: Install OpenStack nova packages.
38 | yum: name={{ item }} state=installed
39 | with_items:
40 | - openstack-nova-api
41 | - openstack-nova-cert
42 | - openstack-nova-conductor
43 | - openstack-nova-console
44 | - openstack-nova-novncproxy
45 | - openstack-nova-scheduler
46 | - python-novaclient
47 |
48 | - name: Copy the configuration files for nova.
49 | template: src=nova.conf.j2 dest=/etc/nova/nova.conf
50 | notify: restart nova
51 |
52 | - name: DB sync for nova.
53 | shell: su -s /bin/sh -c "nova-manage db sync" nova
54 | ignore_errors: True
55 | run_once: True
56 |
57 | - name: Ensure nova service is started and enabled on boot.
58 | service: name={{ item }} enabled=yes state=started
59 | ignore_errors: True
60 | with_items:
61 | - openstack-nova-api
62 | - openstack-nova-cert
63 | - openstack-nova-consoleauth
64 | - openstack-nova-scheduler
65 | - openstack-nova-conductor
66 | - openstack-nova-novncproxy
67 |
--------------------------------------------------------------------------------
/templates/corosync.conf.j2:
--------------------------------------------------------------------------------
1 | quorum {
2 | provider: corosync_votequorum
3 | two_node: 0
4 | }
5 |
6 | nodelist {
7 | node {
8 | ring0_addr: {{ hostvars[groups['controller'][0]]['ansible_hostname'] }}
9 | nodeid: 1
10 | }
11 |
12 | node {
13 | ring0_addr: {{ hostvars[groups['controller'][1]]['ansible_hostname'] }}
14 | nodeid: 2
15 | }
16 |
17 | node {
18 | ring0_addr: {{ hostvars[groups['controller'][2]]['ansible_hostname'] }}
19 | nodeid: 3
20 | }
21 | }
22 |
23 | totem {
24 | version: 2
25 | token: 3000
26 | token_retransmits_before_loss_const: 10
27 | join: 60
28 | consensus: 3600
29 | vsftype: none
30 | max_messages: 20
31 | clear_node_high_bit: yes
32 | rrp_mode: none
33 | secauth: off
34 | threads: 4
35 | transport: udpu
36 | interface {
37 | ringnumber: 0
38 | bindnetaddr: {{ openstack_corosync_bindnetaddr }}
39 | broadcast: yes
40 | mcastport: {{ openstack_corosync_mcastport }}
41 | }
42 | }
43 |
44 | logging {
45 | fileline: off
46 | to_stderr: no
47 | to_logfile: no
48 | logfile: /var/log/cluster/corosync.log
49 | to_syslog: yes
50 | syslog_facility: daemon
51 | syslog_priority: info
52 | debug: off
53 | function_name: on
54 | timestamp: on
55 | logger_subsys {
56 | subsys: AMF
57 | debug: off
58 | tags: enter|leave|trace1|trace2|trace3|trace4|trace6
59 | }
60 | }
61 |
62 | amf {
63 | mode: disabled
64 | }
65 |
66 | aisexec {
67 | user: root
68 | group: root
69 | }
70 |
71 | service {
72 | ver: 1
73 | name: pacemaker
74 | }
75 |
--------------------------------------------------------------------------------
/tasks/cinder.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for cinder
3 |
4 | # Prerequisites
5 | - name: Prepare Database for cinder.
6 | include: database.yml
7 | vars:
8 | db_name: "{{ openstack_cinder_db_name }}"
9 | db_user: "{{ openstack_cinder_db_user }}"
10 | db_password: "{{ openstack_cinder_db_password }}"
11 | tags: database
12 |
13 | - name: Create the service user for cinder.
14 | include: create_service_user.yml
15 | vars:
16 | username: cinder
17 | password: "{{ openstack_cinder_user_password }}"
18 |
19 | - name: Create the service entity for cinder.
20 | include: service_create.yml
21 | vars:
22 | name: cinder
23 | type: volume
24 | description: OpenStack Block Storage
25 |
26 | - name: Create the service entity for cinderv2.
27 | include: service_create.yml
28 | vars:
29 | name: cinderv2
30 | type: volumev2
31 | description: OpenStack Block Storage
32 |
33 | - name: Create the API endpoint for volume.
34 | include: endpoint_create.yml
35 | vars:
36 | service: volume
37 | region: RegionOne
38 | endpoints:
39 | adminurl: "{{ openstack_cinder_adminurl_v1 }}"
40 | internalurl: "{{ openstack_cinder_internalurl_v1 }}"
41 | publicurl: "{{ openstack_cinder_publicurl_v1 }}"
42 |
43 | - name: Create the API endpoint for volumev2.
44 | include: endpoint_create.yml
45 | vars:
46 | service: volumev2
47 | region: RegionOne
48 | endpoints:
49 | adminurl: "{{ openstack_cinder_adminurl_v2 }}"
50 | internalurl: "{{ openstack_cinder_intelnalurl_v2 }}"
51 | publicurl: "{{ openstack_cinder_publicurl_v2 }}"
52 |
53 | # Install and configure
54 | - name: Install OpenStack cinder packages.
55 | yum: name={{ item }} state=installed
56 | with_items:
57 | - openstack-cinder
58 | - python-cinderclient
59 |
60 | - name: Copy the configuration files for cinder.
61 | template: src=cinder.conf.j2 dest=/etc/cinder/cinder.conf
62 | notify: restart cinder
63 |
64 | - name: DB sync for cinder.
65 | shell: su -s /bin/sh -c "cinder-manage db sync" cinder
66 | ignore_errors: True
67 | run_once: True
68 |
69 | - name: Ensure cinder service is started and enabled on boot.
70 | service: name={{ item }} enabled=yes state=started
71 | with_items:
72 | - openstack-cinder-api
73 | - openstack-cinder-scheduler
74 | - openstack-cinder-volume
75 |
--------------------------------------------------------------------------------
/tasks/neutron.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for neutron
3 |
4 | # Prerequisites
5 | - name: Prepare Database for neutron.
6 | include: database.yml
7 | vars:
8 | db_name: "{{ openstack_neutron_db_name }}"
9 | db_user: "{{ openstack_neutron_db_user }}"
10 | db_password: "{{ openstack_neutron_db_password }}"
11 | tags: database
12 |
13 | - name: Create the service user for neutron.
14 | include: create_service_user.yml
15 | vars:
16 | username: neutron
17 | password: "{{ openstack_neutron_keystone_password }}"
18 |
19 | - name: Create the service entity for neutron.
20 | include: service_create.yml
21 | vars:
22 | name: neutron
23 | type: network
24 | description: OpenStack Networking
25 |
26 | - name: Create the API endpoint for neutron.
27 | include: endpoint_create.yml
28 | vars:
29 | service: network
30 | region: RegionOne
31 | endpoints:
32 | adminurl: "{{ openstack_neutron_adminurl }}"
33 | internalurl: "{{ openstack_neutron_intelnalurl }}"
34 | publicurl: "{{ openstack_neutron_publicurl }}"
35 |
36 | # Install and configure
37 | - name: Install OpenStack neutron packages.
38 | yum: name={{ item }} state=installed
39 | with_items:
40 | - openstack-neutron
41 | - openstack-neutron-ml2
42 | - openstack-neutron-linuxbridge
43 | - python-neutronclient
44 | - ebtables
45 | - ipset
46 |
47 | - name: Copy the neutron configuration files for neutron.
48 | template: src=neutron.conf.j2 dest=/etc/neutron/neutron.conf
49 |
50 | - name: Copy the neutron ML2 configuration files for neutron.
51 | template: src=ml2_conf.ini.j2 dest=/etc/neutron/plugins/ml2/ml2_conf.ini
52 |
53 | - name: Copy the neutron linux bridge configuration files for neutron.
54 | template: src=linuxbridge_agent.ini.j2 dest=/etc/neutron/plugins/ml2/linuxbridge_agent.ini
55 |
56 | - name: Copy the neutron dhcp agent configuration files for neutron.
57 | template: src=dhcp_agent.ini.j2 dest=/etc/neutron/dhcp_agent.ini
58 |
59 | - name: Copy the metadata agent configuration files for neutron.
60 | template: src=metadata_agent.ini.j2 dest=/etc/neutron/metadata_agent.ini
61 |
62 | - name: Ensure ml2 config symbolic is exists.
63 | file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
64 |
65 | - name: DB sync for neutron.
66 | shell: |
67 | su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
68 | --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
69 | ignore_errors: True
70 | run_once: True
71 | notify: restart nova
72 |
73 | - name: Ensure neutron service is started and enabled on boot.
74 | service: name={{ item }} enabled=yes state=started
75 | ignore_errors: True
76 | with_items:
77 | - neutron-server
78 | - neutron-linuxbridge-agent
79 | - neutron-dhcp-agent
80 | - neutron-metadata-agent
81 |
--------------------------------------------------------------------------------
/templates/linuxbridge_agent.ini.j2:
--------------------------------------------------------------------------------
1 | [linux_bridge]
2 | physical_interface_mappings = public:{{ openstack_neutron_public_interface }}
3 |
4 | # (ListOpt) Comma-separated list of
5 | # : tuples mapping physical
6 | # network names to the agent's node-specific physical network
7 | # interfaces to be used for flat and VLAN networks. All physical
8 | # networks listed in network_vlan_ranges on the server should have
9 | # mappings to appropriate interfaces on each agent.
10 | #
11 | # physical_interface_mappings =
12 | # Example: physical_interface_mappings = physnet1:eth1
13 |
14 | [vxlan]
15 | enable_vxlan = False
16 |
17 | # (BoolOpt) enable VXLAN on the agent
18 | # VXLAN support can be enabled when agent is managed by ml2 plugin using
19 | # linuxbridge mechanism driver.
20 | # enable_vxlan = True
21 | #
22 | # (IntOpt) use specific TTL for vxlan interface protocol packets
23 | # ttl =
24 | #
25 | # (IntOpt) use specific TOS for vxlan interface protocol packets
26 | # tos =
27 | #
28 | # (StrOpt) multicast group or group range to use for broadcast emulation.
29 | # Specifying a range allows different VNIs to use different group addresses,
30 | # reducing or eliminating spurious broadcast traffic to the tunnel endpoints.
31 | # Ranges are specified by using CIDR notation. To reserve a unique group for
32 | # each possible (24-bit) VNI, use a /8 such as 239.0.0.0/8.
33 | # This setting must be the same on all the agents.
34 | # vxlan_group = 224.0.0.1
35 | #
36 | # (StrOpt) Local IP address to use for VXLAN endpoints (required)
37 | # local_ip =
38 | #
39 | # (BoolOpt) Flag to enable l2population extension. This option should be used
40 | # in conjunction with ml2 plugin l2population mechanism driver (in that case,
41 | # both linuxbridge and l2population mechanism drivers should be loaded).
42 | # It enables plugin to populate VXLAN forwarding table, in order to limit
43 | # the use of broadcast emulation (multicast will be turned off if kernel and
44 | # iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
45 | # l2_population = False
46 |
47 | [agent]
48 | prevent_arp_spoofing = True
49 |
50 | # Agent's polling interval in seconds
51 | # polling_interval = 2
52 |
53 | # (IntOpt) Set new timeout in seconds for new rpc calls after agent receives
54 | # SIGTERM. If value is set to 0, rpc timeout won't be changed.
55 | #
56 | # quitting_rpc_timeout = 10
57 |
58 | [securitygroup]
59 | enable_security_group = True
60 | firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
61 |
62 | # Firewall driver for realizing neutron security group function
63 | # firewall_driver = neutron.agent.firewall.NoopFirewallDriver
64 | # Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
65 |
66 | # Controls if neutron security group is enabled or not.
67 | # It should be false when you use nova security group.
68 | # enable_security_group = True
69 |
--------------------------------------------------------------------------------
/tasks/ha.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for openstack ha
3 |
4 | - name: Install pacemake & corosync packages.
5 | yum: name={{ item }} state=installed
6 | with_items:
7 | - pacemaker
8 | - corosync
9 | - pcs
10 | tags: ha
11 |
12 | - name: Copy the configuration files for corosync.
13 | template: src=corosync.conf.j2 dest=/etc/corosync/corosync.conf
14 | tags: ha
15 |
16 | - name: Ensure pcsd service is started.
17 | service: name={{ item }} state=started
18 | with_items:
19 | - pcsd
20 | tags: ha
21 |
22 | - name: Setup hacluster user password and to authenticate as the hacluster user.
23 | shell: |
24 | echo {{ openstack_hacluster_password }} | passwd --stdin hacluster
25 | pcs cluster auth {{ hostvars[groups['controller'][0]]['ansible_hostname'] }} {{ hostvars[groups['controller'][1]]['ansible_hostname'] }} {{ hostvars[groups['controller'][2]]['ansible_hostname'] }} -u hacluster -p {{ openstack_hacluster_password }} --force
26 | tags: ha
27 |
28 | - name: Setup and Started pacemaker cluster.
29 | shell: |
30 | #pcs cluster setup --name openstack {{ hostvars[groups['controller'][0]]['ansible_hostname'] }} {{ hostvars[groups['controller'][1]]['ansible_hostname'] }} {{ hostvars[groups['controller'][2]]['ansible_hostname'] }} --force
31 | pcs cluster start --all
32 | run_once: True
33 | tags: ha
34 |
35 | - name: Configure pacemaker cluster.
36 | shell: |
37 | pcs property set stonith-enabled=false
38 | when: ansible_hostname == hostvars[groups['controller'][0]]['ansible_hostname']
39 | tags: ha
40 |
41 | - name: Ensure corosync && pacemaker && pcsd service is enabled on boot.
42 | service: name={{ item }} enabled=yes
43 | with_items:
44 | - corosync
45 | - pacemaker
46 | - pcsd
47 | tags: ha
48 |
49 | # HAproxy
50 | - name: Install HAProxy packages.
51 | yum: name={{ item }} state=installed
52 | with_items:
53 | - haproxy
54 | tags: haproxy
55 |
56 | - name: Configure sysctl for HAProxy.
57 | sysctl: name=net.ipv4.ip_nonlocal_bind value=1 state=present
58 | tags: haproxy
59 |
60 | - name: Copy the configuration files for HAProxy.
61 | template: src=haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg
62 | tags: haproxy
63 |
64 | # - name: Ensure HAProxy is started and enabled on boot
65 | # service: name={{ item }} enabled=yes state=started
66 | # ignore_errors: True
67 | # with_items:
68 | # - haproxy
69 | # tags: haproxy
70 |
71 | # Pacemake cluster resource
72 | - name: Configure pacemaker resource.
73 | shell: |
74 | pcs resource create VIP_Internal ocf:heartbeat:IPaddr2 ip={{ openstack_internal_vip }} cidr_netmask={{ openstack_internal_netmask }} op monitor interval=30s
75 | pcs resource create VIP_Public ocf:heartbeat:IPaddr2 ip={{ openstack_public_vip }} cidr_netmask={{ openstack_public_netmask }} op monitor interval=30s
76 | pcs resource create p_haproxy systemd:haproxy --clone
77 | pcs constraint colocation add VIP_Internal with p_haproxy-clone
78 | pcs constraint colocation add VIP_Public with p_haproxy-clone
79 | pcs constraint colocation add VIP_Public with VIP_Internal
80 | pcs constraint order start VIP_Internal then p_haproxy-clone kind=Optional
81 | sleep 15
82 | run_once: True
83 | tags: ha
84 |
--------------------------------------------------------------------------------
/templates/metadata_agent.ini.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | auth_uri = {{ openstack_keystone_auth_uri }}
3 | auth_url = {{ openstack_keystone_auth_url }}
4 | auth_region = {{ openstack_keystone_auth_region }}
5 | auth_plugin = password
6 | project_domain_id = default
7 | user_domain_id = default
8 | project_name = service
9 | username = neutron
10 | password = {{ openstack_neutron_db_password }}
11 |
12 | nova_metadata_ip = {{ openstack_controller_host }}
13 | metadata_proxy_shared_secret = {{ openstack_neutron_metadata_secret }}
14 | verbose = True
15 |
16 | # Show debugging output in log (sets DEBUG log level output)
17 | # debug = True
18 |
19 | # The Neutron user information for accessing the Neutron API.
20 | # auth_url = http://localhost:5000/v2.0
21 | # auth_region = RegionOne
22 | # Turn off verification of the certificate for ssl
23 | # auth_insecure = False
24 | # Certificate Authority public key (CA cert) file for ssl
25 | # auth_ca_cert =
26 | admin_tenant_name = %SERVICE_TENANT_NAME%
27 | admin_user = %SERVICE_USER%
28 | admin_password = %SERVICE_PASSWORD%
29 |
30 | # Network service endpoint type to pull from the keystone catalog
31 | # endpoint_type = adminURL
32 |
33 | # IP address used by Nova metadata server
34 | # nova_metadata_ip = 127.0.0.1
35 |
36 | # TCP Port used by Nova metadata server
37 | # nova_metadata_port = 8775
38 |
39 | # Which protocol to use for requests to Nova metadata server, http or https
40 | # nova_metadata_protocol = http
41 |
42 | # Whether insecure SSL connection should be accepted for Nova metadata server
43 | # requests
44 | # nova_metadata_insecure = False
45 |
46 | # Client certificate for nova api, needed when nova api requires client
47 | # certificates
48 | # nova_client_cert =
49 |
50 | # Private key for nova client certificate
51 | # nova_client_priv_key =
52 |
53 | # When proxying metadata requests, Neutron signs the Instance-ID header with a
54 | # shared secret to prevent spoofing. You may select any string for a secret,
55 | # but it must match here and in the configuration used by the Nova Metadata
56 | # Server. NOTE: Nova uses the same config key, but in [neutron] section.
57 | # metadata_proxy_shared_secret =
58 |
59 | # Location of Metadata Proxy UNIX domain socket
60 | # metadata_proxy_socket = $state_path/metadata_proxy
61 |
62 | # Metadata Proxy UNIX domain socket mode, 4 values allowed:
63 | # 'deduce': deduce mode from metadata_proxy_user/group values,
64 | # 'user': set metadata proxy socket mode to 0o644, to use when
65 | # metadata_proxy_user is agent effective user or root,
66 | # 'group': set metadata proxy socket mode to 0o664, to use when
67 | # metadata_proxy_group is agent effective group,
68 | # 'all': set metadata proxy socket mode to 0o666, to use otherwise.
69 | # metadata_proxy_socket_mode = deduce
70 |
71 | # Number of separate worker processes for metadata server. Defaults to
72 | # half the number of CPU cores
73 | # metadata_workers =
74 |
75 | # Number of backlog requests to configure the metadata server socket with
76 | # metadata_backlog = 4096
77 |
78 | # URL to connect to the cache backend.
79 | # default_ttl=0 parameter will cause cache entries to never expire.
80 | # Otherwise default_ttl specifies time in seconds a cache entry is valid for.
81 | # No cache is used in case no value is passed.
82 | # cache_url = memory://?default_ttl=5
83 |
84 | [AGENT]
85 | # Log agent heartbeats from this Metadata agent
86 | # log_agent_heartbeats = False
87 |
--------------------------------------------------------------------------------
/tasks/keystone.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for keystone
3 |
4 | # Prerequisites
5 | - name: Prepare Database for keystone.
6 | include: database.yml
7 | vars:
8 | db_name: "{{ openstack_keystone_db_name }}"
9 | db_user: "{{ openstack_keystone_db_user }}"
10 | db_password: "{{ openstack_keystone_db_password }}"
11 | db_priv: "{{ openstack_keystone_db_name }}.*:ALL"
12 | tags: database
13 |
14 | - name: Install OpenStack keystone packages.
15 | yum: name={{ item }} state=installed
16 | with_items:
17 | - openstack-keystone
18 | - python-keystoneclient
19 | - httpd
20 | - mod_wsgi
21 | - python-memcached
22 |
23 | - name: Configure httpd Listen Port.
24 | lineinfile: >
25 | dest=/etc/httpd/conf/httpd.conf
26 | regexp="{{ item.regexp }}"
27 | line="{{ item.line }}"
28 | state=present
29 | with_items:
30 | - { regexp: "^#?Listen", line: "Listen {{ mgmt_ip }}:80" }
31 |
32 | - name: Copy the configuration files for keystone.
33 | template: src=keystone.conf.j2 dest=/etc/keystone/keystone.conf
34 | #notify: restart keystone
35 |
36 | - name: Copy the configuration files for keystone wsgi.
37 | template: src=wsgi-keystone.conf.j2 dest=/etc/httpd/conf.d/wsgi-keystone.conf
38 | notify: restart httpd
39 |
40 | - name: Ensure keystone is started and enabled on boot.
41 | service: "name={{ item }} state=started enabled=yes"
42 | with_items:
43 | - httpd
44 |
45 | - name: Waiting for keystone service is started.
46 | wait_for: "host={{ openstack_controller_host }} port=35357 delay=10 timeout=30"
47 | run_once: True
48 |
49 | - name: DB sync for keystone.
50 | shell: su -s /bin/sh -c "keystone-manage db_sync" keystone
51 | ignore_errors: True
52 | run_once: True
53 |
54 | - name: Create the service entity for keystone.
55 | include: service_create.yml
56 | vars:
57 | name: keystone
58 | type: identity
59 | description: OpenStack Identity
60 |
61 | - name: Create the API endpoint for keystone.
62 | include: endpoint_create.yml
63 | vars:
64 | service: identity
65 | region: RegionOne
66 | endpoints:
67 | adminurl: "{{ openstack_endpoint_admin_url }}"
68 | internalurl: "{{ openstack_endpoint_internal_url }}"
69 | publicurl: "{{ openstack_endpoint_public_url }}"
70 |
71 | # admin
72 | - name: "Ensure the 'admin' project exists"
73 | keystone_user: >
74 | tenant=admin
75 | tenant_description="Admin Project"
76 | token={{ openstack_admin_token }}
77 | endpoint={{ openstack_endpoint_admin_url }}
78 | run_once: True
79 |
80 | - name: "Ensure the 'admin' user exists"
81 | keystone_user: >
82 | user=admin
83 | password={{ openstack_admin_password }}
84 | tenant=admin
85 | token={{ openstack_admin_token }}
86 | endpoint={{ openstack_endpoint_admin_url }}
87 | run_once: True
88 |
89 | - name: "Ensure the 'admin' role exists"
90 | keystone_user: >
91 | role=admin
92 | user=admin
93 | tenant=admin
94 | token={{ openstack_admin_token }}
95 | endpoint={{ openstack_endpoint_admin_url }}
96 | run_once: True
97 |
98 | # service
99 | - name: "Ensure the 'service' project exists"
100 | keystone_user: >
101 | tenant=service
102 | tenant_description="Service Project"
103 | token={{ openstack_admin_token }}
104 | endpoint={{ openstack_endpoint_admin_url }}
105 | run_once: True
106 |
107 |
--------------------------------------------------------------------------------
/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for ansible-role-openstack-controller
3 |
4 | openstack_soft_nofile_limits: 102400
5 | openstack_hard_nofile_limits: 102400
6 |
7 | # ha
8 | openstack_ha: True
9 | ## corosync
10 | openstack_corosync_bindnetaddr: "{{ openstack_internal_net }}"
11 | openstack_corosync_mcastaddr: "239.255.42.1"
12 | openstack_corosync_mcastport: 5405
13 |
14 | openstack_hacluster_password: "openstack"
15 |
16 | # ceph
17 | openstack_ceph_enable: "false"
18 |
19 | #
20 | openstack_admin_vip: "{{ openstack_controller_host }}"
21 | openstack_internal_vip: "{{ openstack_controller_host }}"
22 | openstack_public_vip: "{{ openstack_controller_host }}"
23 | openstack_os_url: "http://{{ openstack_controller_host }}:35357/v3"
24 | openstack_os_identity_api_version: "3"
25 | openstack_endpoint_admin_url: "http://{{ openstack_admin_vip }}:35357/v2.0"
26 | openstack_endpoint_internal_url: "http://{{ openstack_internal_vip }}:5000/v2.0"
27 | openstack_endpoint_public_url: "http://{{ openstack_public_vip }}:5000/v2.0"
28 |
29 | # auth
30 | openstack_admin_token: "openstack"
31 | openstack_admin_user: "openstack"
32 | openstack_admin_password: "openstack"
33 |
34 | # keystone
35 | openstack_keystone_db_name: "keystone"
36 | openstack_keystone_db_user: "keystone"
37 | openstack_keystone_db_password: "keystone"
38 | openstack_keystone_config_default_verbose: "True"
39 | openstack_keystone_config_token_provider: "uuid"
40 | openstack_keystone_config_token_driver: "memcache"
41 | openstack_keystone_config_revoke_driver: "sql"
42 |
43 | # glance
44 | openstack_glance_keystone_password: "openstack"
45 | openstack_glance_db_name: "glance"
46 | openstack_glance_db_user: "glance"
47 | openstack_glance_db_password: "glance"
48 | openstack_glance_adminurl: "http://{{ openstack_admin_vip }}:9292"
49 | openstack_glance_internalurl: "http://{{ openstack_internal_vip }}:9292"
50 | openstack_glance_publicurl: "http://{{ openstack_public_vip }}:9292"
51 |
52 | # nova controller
53 | openstack_nova_keystone_password: "openstack"
54 | openstack_nova_db_name: "nova"
55 | openstack_nova_db_user: "nova"
56 | openstack_nova_db_password: "nova"
57 | openstack_nova_adminurl: "http://{{ openstack_admin_vip }}:8774/v2/%\\(tenant_id\\)s"
58 | openstack_nova_intelnalurl: "http://{{ openstack_internal_vip }}:8774/v2/%\\(tenant_id\\)s"
59 | openstack_nova_publicurl: "http://{{ openstack_public_vip }}:8774/v2/%\\(tenant_id\\)s"
60 |
61 | # neutron controller
62 | openstack_neutron_keystone_password: "openstack"
63 | openstack_neutron_db_name: "neutron"
64 | openstack_neutron_db_user: "neutron"
65 | openstack_neutron_db_password: "neutron"
66 | openstack_neutron_adminurl: "http://{{ openstack_admin_vip }}:9696"
67 | openstack_neutron_intelnalurl: "http://{{ openstack_internal_vip }}:9696"
68 | openstack_neutron_publicurl: "http://{{ openstack_public_vip }}:9696"
69 | openstack_neutron_public_interface: "br-eth2"
70 | openstack_neutron_metadata_secret: "openstack"
71 |
72 | # cinder
73 | openstack_cinder_user_password: "openstack"
74 | openstack_cinder_db_name: "cinder"
75 | openstack_cinder_db_user: "cinder"
76 | openstack_cinder_db_password: "cinder"
77 | openstack_cinder_adminurl_v1: "http://{{ openstack_admin_vip }}:8776/v1/%\\(tenant_id\\)s"
78 | openstack_cinder_internalurl_v1: "http://{{ openstack_internal_vip }}:8776/v1/%\\(tenant_id\\)s"
79 | openstack_cinder_publicurl_v1: "http://{{ openstack_public_vip }}:8776/v1/%\\(tenant_id\\)s"
80 | openstack_cinder_adminurl_v2: "http://{{ openstack_admin_vip }}:8776/v2/%\\(tenant_id\\)s"
81 | openstack_cinder_intelnalurl_v2: "http://{{ openstack_internal_vip }}:8776/v2/%\\(tenant_id\\)s"
82 | openstack_cinder_publicurl_v2: "http://{{ openstack_public_vip }}:8776/v2/%\\(tenant_id\\)s"
83 |
84 | ## auth env
85 | token_auth_env:
86 | OS_TOKEN: "{{ openstack_admin_token }}"
87 | OS_URL: "{{ openstack_os_url }}"
88 | OS_IDENTITY_API_VERSION: "{{ openstack_os_identity_api_version }}"
89 |
90 | admin_auth_env:
91 | OS_PROJECT_DOMAIN_ID: default
92 | OS_USER_DOMAIN_ID: default
93 | OS_PROJECT_NAME: admin
94 | OS_TENANT_NAME: admin
95 | OS_USERNAME: admin
96 | OS_PASSWORD: "{{ openstack_admin_password }}"
97 | OS_AUTH_URL: "{{ openstack_os_url }}"
98 | OS_IDENTITY_API_VERSION: "{{ openstack_os_identity_api_version }}"
99 |
100 |
--------------------------------------------------------------------------------
/templates/dhcp_agent.ini.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
3 | dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
4 | enable_isolated_metadata = True
5 | verbose = True
6 |
7 | # Show debugging output in log (sets DEBUG log level output)
8 | # debug = False
9 |
10 | # The DHCP agent will resync its state with Neutron to recover from any
11 | # transient notification or rpc errors. The interval is number of
12 | # seconds between attempts.
13 | # resync_interval = 5
14 |
15 | # The DHCP agent requires an interface driver be set. Choose the one that best
16 | # matches your plugin.
17 | # interface_driver =
18 |
19 | # Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
20 | # BigSwitch/Floodlight)
21 | # interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
22 |
23 | # Name of Open vSwitch bridge to use
24 | # ovs_integration_bridge = br-int
25 |
26 | # Use veth for an OVS interface or not.
27 | # Support kernels with limited namespace support
28 | # (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
29 | # ovs_use_veth = False
30 |
31 | # Example of interface_driver option for LinuxBridge
32 | # interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
33 |
34 | # The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
35 | # no additional setup of the DHCP server.
36 | # dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
37 |
38 | # Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
39 | # iproute2 package that supports namespaces). This option is deprecated and
40 | # will be removed in a future release, at which point the old behavior of
41 | # use_namespaces = True will be enforced.
42 | # use_namespaces = True
43 |
44 | # In some cases the neutron router is not present to provide the metadata
45 | # IP but the DHCP server can be used to provide this info. Setting this
46 | # value will force the DHCP server to append specific host routes to the
47 | # DHCP request. If this option is set, then the metadata service will be
48 | # activated for all the networks.
49 | # force_metadata = False
50 |
51 | # The DHCP server can assist with providing metadata support on isolated
52 | # networks. Setting this value to True will cause the DHCP server to append
53 | # specific host routes to the DHCP request. The metadata service will only
54 | # be activated when the subnet does not contain any router port. The guest
55 | # instance must be configured to request host routes via DHCP (Option 121).
56 | # This option doesn't have any effect when force_metadata is set to True.
57 | # enable_isolated_metadata = False
58 |
59 | # Allows for serving metadata requests coming from a dedicated metadata
60 | # access network whose cidr is 169.254.169.254/16 (or larger prefix), and
61 | # is connected to a Neutron router from which the VMs send metadata
62 | # request. In this case DHCP Option 121 will not be injected in VMs, as
63 | # they will be able to reach 169.254.169.254 through a router.
64 | # This option requires enable_isolated_metadata = True
65 | # enable_metadata_network = False
66 |
67 | # Number of threads to use during sync process. Should not exceed connection
68 | # pool size configured on server.
69 | # num_sync_threads = 4
70 |
71 | # Location to store DHCP server config files
72 | # dhcp_confs = $state_path/dhcp
73 |
74 | # Domain to use for building the hostnames. This option will be deprecated in
75 | # a future release. It is being replaced by dns_domain in neutron.conf
76 | # dhcp_domain = openstacklocal
77 |
78 | # Override the default dnsmasq settings with this file
79 | # dnsmasq_config_file =
80 |
81 | # Comma-separated list of DNS servers which will be used by dnsmasq
82 | # as forwarders.
83 | # dnsmasq_dns_servers =
84 |
85 | # Base log dir for dnsmasq logging. The log contains DHCP and DNS log
86 | # information and is useful for debugging issues with either DHCP or DNS.
87 | # If this section is null, disable dnsmasq log.
88 | # dnsmasq_base_log_dir =
89 |
90 | # Limit number of leases to prevent a denial-of-service.
91 | # dnsmasq_lease_max = 16777216
92 |
93 | # Location to DHCP lease relay UNIX domain socket
94 | # dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
95 |
96 | # Use broadcast in DHCP replies
97 | # dhcp_broadcast_reply = False
98 |
99 | # dhcp_delete_namespaces, which is True by default, can be set to False if
100 | # namespaces can't be deleted cleanly on the host running the DHCP agent.
101 | # Disable this if you hit the issue in
102 | # https://bugs.launchpad.net/neutron/+bug/1052535 or if
103 | # you are sure that your version of iproute suffers from the problem.
104 | # This should not be a problem any more. Refer to bug:
105 | # https://bugs.launchpad.net/neutron/+bug/1418079
106 | # This option is deprecated and will be removed in the M release
107 | # dhcp_delete_namespaces = True
108 |
109 | # Timeout for ovs-vsctl commands.
110 | # If the timeout expires, ovs commands will fail with ALARMCLOCK error.
111 | # ovs_vsctl_timeout = 10
112 |
113 | [AGENT]
114 | # Log agent heartbeats from this DHCP agent
115 | # log_agent_heartbeats = False
116 |
--------------------------------------------------------------------------------
/templates/ml2_conf.ini.j2:
--------------------------------------------------------------------------------
1 | [ml2]
2 | type_drivers = flat,vlan
3 | tenant_network_types =
4 | mechanism_drivers = linuxbridge
5 | extension_drivers = port_security
6 |
7 | # (ListOpt) List of network type driver entrypoints to be loaded from
8 | # the neutron.ml2.type_drivers namespace.
9 | #
10 | # type_drivers = local,flat,vlan,gre,vxlan,geneve
11 | # Example: type_drivers = flat,vlan,gre,vxlan,geneve
12 |
13 | # (ListOpt) Ordered list of network_types to allocate as tenant
14 | # networks. The default value 'local' is useful for single-box testing
15 | # but provides no connectivity between hosts.
16 | #
17 | # tenant_network_types = local
18 | # Example: tenant_network_types = vlan,gre,vxlan,geneve
19 |
20 |
21 | # (ListOpt) Ordered list of networking mechanism driver entrypoints
22 | # to be loaded from the neutron.ml2.mechanism_drivers namespace.
23 | # mechanism_drivers =
24 | # Example: mechanism_drivers = openvswitch,mlnx
25 | # Example: mechanism_drivers = arista
26 | # Example: mechanism_drivers = openvswitch,cisco_nexus,logger
27 | # Example: mechanism_drivers = openvswitch,brocade
28 | # Example: mechanism_drivers = linuxbridge,brocade
29 |
30 | # (ListOpt) Ordered list of extension driver entrypoints
31 | # to be loaded from the neutron.ml2.extension_drivers namespace.
32 | # extension_drivers =
33 | # Example: extension_drivers = anewextensiondriver
34 |
35 | # =========== items for MTU selection and advertisement =============
36 | # (IntOpt) Path MTU. The maximum permissible size of an unfragmented
37 | # packet travelling from and to addresses where encapsulated Neutron
38 | # traffic is sent. Drivers calculate maximum viable MTU for
39 | # validating tenant requests based on this value (typically,
40 | # path_mtu - max encap header size). If <=0, the path MTU is
41 | # indeterminate and no calculation takes place.
42 | # path_mtu = 0
43 |
44 | # (IntOpt) Segment MTU. The maximum permissible size of an
45 | # unfragmented packet travelling a L2 network segment. If <=0,
46 | # the segment MTU is indeterminate and no calculation takes place.
47 | # segment_mtu = 0
48 |
49 | # (ListOpt) Physical network MTUs. List of mappings of physical
50 | # network to MTU value. The format of the mapping is
51 | # :. This mapping allows specifying a
52 | # physical network MTU value that differs from the default
53 | # segment_mtu value.
54 | # physical_network_mtus =
55 | # Example: physical_network_mtus = physnet1:1550, physnet2:1500
56 | # ======== end of items for MTU selection and advertisement =========
57 |
58 | # (StrOpt) Default network type for external networks when no provider
59 | # attributes are specified. By default it is None, which means that if
60 | # provider attributes are not specified while creating external networks
61 | # then they will have the same type as tenant networks.
62 | # Allowed values for external_network_type config option depend on the
63 | # network type values configured in type_drivers config option.
64 | # external_network_type =
65 | # Example: external_network_type = local
66 |
67 | [ml2_type_flat]
68 | flat_networks = public
69 |
70 | # (ListOpt) List of physical_network names with which flat networks
71 | # can be created. Use * to allow flat networks with arbitrary
72 | # physical_network names.
73 | #
74 | # flat_networks =
75 | # Example:flat_networks = physnet1,physnet2
76 | # Example:flat_networks = *
77 |
78 | [ml2_type_vlan]
79 | network_vlan_ranges = public
80 |
81 | # (ListOpt) List of [::] tuples
82 | # specifying physical_network names usable for VLAN provider and
83 | # tenant networks, as well as ranges of VLAN tags on each
84 | # physical_network available for allocation as tenant networks.
85 | #
86 | # network_vlan_ranges =
87 | # Example: network_vlan_ranges = physnet1:1000:2999,physnet2
88 |
89 | [ml2_type_gre]
90 | # (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
91 | # tunnel_id_ranges =
92 |
93 | [ml2_type_vxlan]
94 | # (ListOpt) Comma-separated list of : tuples enumerating
95 | # ranges of VXLAN VNI IDs that are available for tenant network allocation.
96 | #
97 | # vni_ranges =
98 |
99 | # (StrOpt) Multicast group for the VXLAN interface. When configured, will
100 | # enable sending all broadcast traffic to this multicast group. When left
101 | # unconfigured, will disable multicast VXLAN mode.
102 | #
103 | # vxlan_group =
104 | # Example: vxlan_group = 239.1.1.1
105 |
106 | [ml2_type_geneve]
107 | # (ListOpt) Comma-separated list of : tuples enumerating
108 | # ranges of Geneve VNI IDs that are available for tenant network allocation.
109 | #
110 | # vni_ranges =
111 |
112 | # (IntOpt) Geneve encapsulation header size is dynamic, this
113 | # value is used to calculate the maximum MTU for the driver.
114 | # this is the sum of the sizes of the outer ETH+IP+UDP+GENEVE
115 | # header sizes.
116 | # The default size for this field is 50, which is the size of the
117 | # Geneve header without any additional option headers
118 | #
119 | # max_header_size =
120 | # Example: max_header_size = 50 (Geneve headers with no additional options)
121 |
122 | [securitygroup]
123 | enable_ipset = True
124 |
125 | # Controls if neutron security group is enabled or not.
126 | # It should be false when you use nova security group.
127 | # enable_security_group = True
128 |
129 | # Use ipset to speed-up the iptables security groups. Enabling ipset support
130 | # requires that ipset is installed on L2 agent node.
131 | # enable_ipset = True
132 |
--------------------------------------------------------------------------------
/templates/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | global
2 | chroot /var/lib/haproxy
3 | daemon
4 | group haproxy
5 | maxconn 4000
6 | pidfile /var/run/haproxy.pid
7 | user haproxy
8 |
9 | defaults
10 | log global
11 | maxconn 4000
12 | option redispatch
13 | retries 3
14 | timeout http-request 10s
15 | timeout queue 1m
16 | timeout connect 10s
17 | timeout client 1m
18 | timeout server 1m
19 | timeout check 10s
20 |
21 | listen Stats
22 | bind {{ openstack_controller_host }}:10000
23 | mode http
24 | stats enable
25 | stats uri /stats
26 | stats refresh 5s
27 | stats show-node
28 | stats show-legends
29 | stats hide-version
30 | stats auth admin:admin
31 |
32 | listen dashboard_cluster_80
33 | bind {{ openstack_controller_host }}:80
34 | balance source
35 | option tcpka
36 | option httpchk
37 | option tcplog
38 | server controller1 {{ hostvars.controller01.mgmt_ip }}:80 check inter 2000 rise 2 fall 5
39 | server controller2 {{ hostvars.controller02.mgmt_ip }}:80 check inter 2000 rise 2 fall 5
40 | server controller3 {{ hostvars.controller03.mgmt_ip }}:80 check inter 2000 rise 2 fall 5
41 |
42 | listen galera_cluster
43 | bind {{ openstack_controller_host }}:3306
44 | balance source
45 | option httpchk
46 | server controller1 {{ hostvars.controller01.mgmt_ip }}:3306 check port 9200 inter 2000 rise 2 fall 5
47 | server controller2 {{ hostvars.controller02.mgmt_ip }}:3306 backup check port 9200 inter 2000 rise 2 fall 5
48 | server controller3 {{ hostvars.controller03.mgmt_ip }}:3306 backup check port 9200 inter 2000 rise 2 fall 5
49 |
50 | listen glance_api_cluster
51 | bind {{ openstack_controller_host }}:9292
52 | bind {{ openstack_public_vip }}:9292
53 | balance source
54 | option tcpka
55 | option httpchk
56 | option tcplog
57 | server controller1 {{ hostvars.controller01.mgmt_ip }}:9292 check inter 2000 rise 2 fall 5
58 | server controller2 {{ hostvars.controller02.mgmt_ip }}:9292 check inter 2000 rise 2 fall 5
59 | server controller3 {{ hostvars.controller03.mgmt_ip }}:9292 check inter 2000 rise 2 fall 5
60 |
61 | listen glance_registry_cluster
62 | bind {{ openstack_controller_host }}:9191
63 | balance source
64 | option tcpka
65 | option tcplog
66 | server controller1 {{ hostvars.controller01.mgmt_ip }}:9191 check inter 2000 rise 2 fall 5
67 | server controller2 {{ hostvars.controller02.mgmt_ip }}:9191 check inter 2000 rise 2 fall 5
68 | server controller3 {{ hostvars.controller03.mgmt_ip }}:9191 check inter 2000 rise 2 fall 5
69 |
70 | listen keystone_admin_cluster
71 | bind {{ openstack_controller_host }}:35357
72 | balance source
73 | option tcpka
74 | option httpchk
75 | option tcplog
76 | server controller1 {{ hostvars.controller01.mgmt_ip }}:35357 check inter 2000 rise 2 fall 5
77 | server controller2 {{ hostvars.controller02.mgmt_ip }}:35357 check inter 2000 rise 2 fall 5
78 | server controller3 {{ hostvars.controller03.mgmt_ip }}:35357 check inter 2000 rise 2 fall 5
79 |
80 | listen keystone_public_internal_cluster
81 | bind {{ openstack_controller_host }}:5000
82 | bind {{ openstack_public_vip }}:5000
83 | balance source
84 | option tcpka
85 | option httpchk
86 | option tcplog
87 | server controller1 {{ hostvars.controller01.mgmt_ip }}:5000 check inter 2000 rise 2 fall 5
88 | server controller2 {{ hostvars.controller02.mgmt_ip }}:5000 check inter 2000 rise 2 fall 5
89 | server controller3 {{ hostvars.controller03.mgmt_ip }}:5000 check inter 2000 rise 2 fall 5
90 |
91 | listen nova_ec2_api_cluster
92 | bind {{ openstack_controller_host }}:8773
93 | bind {{ openstack_public_vip }}:8773
94 | balance source
95 | option tcpka
96 | option tcplog
97 | server controller1 {{ hostvars.controller01.mgmt_ip }}:8773 check inter 2000 rise 2 fall 5
98 | server controller2 {{ hostvars.controller02.mgmt_ip }}:8773 check inter 2000 rise 2 fall 5
99 | server controller3 {{ hostvars.controller03.mgmt_ip }}:8773 check inter 2000 rise 2 fall 5
100 |
101 | listen nova_compute_api_cluster
102 | bind {{ openstack_controller_host }}:8774
103 | bind {{ openstack_public_vip }}:8774
104 | balance source
105 | option tcpka
106 | option httpchk
107 | option tcplog
108 | server controller1 {{ hostvars.controller01.mgmt_ip }}:8774 check inter 2000 rise 2 fall 5
109 | server controller2 {{ hostvars.controller02.mgmt_ip }}:8774 check inter 2000 rise 2 fall 5
110 | server controller3 {{ hostvars.controller03.mgmt_ip }}:8774 check inter 2000 rise 2 fall 5
111 |
112 | listen nova_metadata_api_cluster
113 | bind {{ openstack_controller_host }}:8775
114 | balance source
115 | option tcpka
116 | option tcplog
117 | server controller1 {{ hostvars.controller01.mgmt_ip }}:8775 check inter 2000 rise 2 fall 5
118 | server controller2 {{ hostvars.controller02.mgmt_ip }}:8775 check inter 2000 rise 2 fall 5
119 | server controller3 {{ hostvars.controller03.mgmt_ip }}:8775 check inter 2000 rise 2 fall 5
120 |
121 | listen cinder_api_cluster
122 | bind {{ openstack_controller_host }}:8776
123 | bind {{ openstack_public_vip }}:8776
124 | balance source
125 | option tcpka
126 | option httpchk
127 | option tcplog
128 | server controller1 {{ hostvars.controller01.mgmt_ip }}:8776 check inter 2000 rise 2 fall 5
129 | server controller2 {{ hostvars.controller02.mgmt_ip }}:8776 check inter 2000 rise 2 fall 5
130 | server controller3 {{ hostvars.controller03.mgmt_ip }}:8776 check inter 2000 rise 2 fall 5
131 |
132 | listen spice_cluster
133 | bind {{ openstack_controller_host }}:6080
134 | bind {{ openstack_public_vip }}:6080
135 | balance source
136 | option tcpka
137 | option tcplog
138 | server controller1 {{ hostvars.controller01.mgmt_ip }}:6080 check inter 2000 rise 2 fall 5
139 | server controller2 {{ hostvars.controller02.mgmt_ip }}:6080 check inter 2000 rise 2 fall 5
140 | server controller3 {{ hostvars.controller03.mgmt_ip }}:6080 check inter 2000 rise 2 fall 5
141 |
142 | listen neutron_api_cluster
143 | bind {{ openstack_controller_host }}:9696
144 | bind {{ openstack_public_vip }}:9696
145 | balance source
146 | option tcpka
147 | option httpchk
148 | option tcplog
149 | server controller1 {{ hostvars.controller01.mgmt_ip }}:9696 check inter 2000 rise 2 fall 5
150 | server controller2 {{ hostvars.controller02.mgmt_ip }}:9696 check inter 2000 rise 2 fall 5
151 | server controller3 {{ hostvars.controller03.mgmt_ip }}:9696 check inter 2000 rise 2 fall 5
152 |
--------------------------------------------------------------------------------
/templates/horizon_local_settings.j2:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from django.utils.translation import ugettext_lazy as _
4 |
5 |
6 | from openstack_dashboard import exceptions
7 | from openstack_dashboard.settings import HORIZON_CONFIG
8 |
9 | DEBUG = False
10 | TEMPLATE_DEBUG = DEBUG
11 |
12 |
13 | # WEBROOT is the location relative to Webserver root
14 | # should end with a slash.
15 | WEBROOT = '/dashboard/'
16 | # LOGIN_URL = WEBROOT + 'auth/login/'
17 | # LOGOUT_URL = WEBROOT + 'auth/logout/'
18 | #
19 | # LOGIN_REDIRECT_URL can be used as an alternative for
20 | # HORIZON_CONFIG.user_home, if user_home is not set.
21 | # Do not set it to '/home/', as this will cause circular redirect loop
22 | #LOGIN_REDIRECT_URL = WEBROOT
23 |
24 | # Required for Django 1.5.
25 | # If horizon is running in production (DEBUG is False), set this
26 | # with the list of host/domain names that the application can serve.
27 | # For more information see:
28 | # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
29 | #ALLOWED_HOSTS = ['horizon.example.com', 'localhost']
30 | ALLOWED_HOSTS = ['*', ]
31 |
32 | # Set SSL proxy settings:
33 | # For Django 1.4+ pass this header from the proxy after terminating the SSL,
34 | # and don't forget to strip it from the client's request.
35 | # For more information see:
36 | # https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
37 | #SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
38 | # https://docs.djangoproject.com/en/1.5/ref/settings/#secure-proxy-ssl-header
39 | #SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
40 |
41 | # If Horizon is being served through SSL, then uncomment the following two
42 | # settings to better secure the cookies from security exploits
43 | #CSRF_COOKIE_SECURE = True
44 | #SESSION_COOKIE_SECURE = True
45 |
46 | # Overrides for OpenStack API versions. Use this setting to force the
47 | # OpenStack dashboard to use a specific API version for a given service API.
48 | # Versions specified here should be integers or floats, not strings.
49 | # NOTE: The version should be formatted as it appears in the URL for the
50 | # service API. For example, The identity service APIs have inconsistent
51 | # use of the decimal point, so valid options would be 2.0 or 3.
52 | #OPENSTACK_API_VERSIONS = {
53 | # "data-processing": 1.1,
54 | # "identity": 3,
55 | # "volume": 2,
56 | #}
57 |
58 | # Set this to True if running on multi-domain model. When this is enabled, it
59 | # will require user to enter the Domain name in addition to username for login.
60 | #OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
61 |
62 | # Overrides the default domain used when running on single-domain model
63 | # with Keystone V3. All entities will be created in the default domain.
64 | #OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
65 |
66 | # Set Console type:
67 | # valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
68 | # Set to None explicitly if you want to deactivate the console.
69 | #CONSOLE_TYPE = "AUTO"
70 |
71 | # Show backdrop element outside the modal, do not close the modal
72 | # after clicking on backdrop.
73 | #HORIZON_CONFIG["modal_backdrop"] = "static"
74 |
75 | # Specify a regular expression to validate user passwords.
76 | #HORIZON_CONFIG["password_validator"] = {
77 | # "regex": '.*',
78 | # "help_text": _("Your password does not meet the requirements."),
79 | #}
80 |
81 | # Disable simplified floating IP address management for deployments with
82 | # multiple floating IP pools or complex network requirements.
83 | #HORIZON_CONFIG["simple_ip_management"] = False
84 |
85 | # Turn off browser autocompletion for forms including the login form and
86 | # the database creation workflow if so desired.
87 | #HORIZON_CONFIG["password_autocomplete"] = "off"
88 |
89 | # Setting this to True will disable the reveal button for password fields,
90 | # including on the login form.
91 | #HORIZON_CONFIG["disable_password_reveal"] = False
92 |
93 | LOCAL_PATH = '/tmp'
94 |
95 | # Set custom secret key:
96 | # You can either set it to a specific value or you can let horizon generate a
97 | # default secret key that is unique on this machine, e.i. regardless of the
98 | # amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,
99 | # there may be situations where you would want to set this explicitly, e.g.
100 | # when multiple dashboard instances are distributed on different machines
101 | # (usually behind a load-balancer). Either you have to make sure that a session
102 | # gets all requests routed to the same dashboard instance or you set the same
103 | # SECRET_KEY for all of them.
104 | SECRET_KEY='80929a14885231295490'
105 |
106 | # We recommend you use memcached for development; otherwise after every reload
107 | # of the django development server, you will have to login again. To use
108 | # memcached set CACHES to something like
109 | #CACHES = {
110 | # 'default': {
111 | # 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
112 | # 'LOCATION': '127.0.0.1:11211',
113 | # }
114 | #}
115 |
116 | #CACHES = {
117 | # 'default': {
118 | # 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
119 | # }
120 | #}
121 | CACHES = {
122 | 'default': {
123 | 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
124 | 'LOCATION': '{{ openstack_memcached_servers }}',
125 | }
126 | }
127 |
128 | # Send email to the console by default
129 | EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
130 | # Or send them to /dev/null
131 | #EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
132 |
133 | # Configure these for your outgoing email host
134 | #EMAIL_HOST = 'smtp.my-company.com'
135 | #EMAIL_PORT = 25
136 | #EMAIL_HOST_USER = 'djangomail'
137 | #EMAIL_HOST_PASSWORD = 'top-secret!'
138 |
139 | # For multiple regions uncomment this configuration, and add (endpoint, title).
140 | #AVAILABLE_REGIONS = [
141 | # ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
142 | # ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
143 | #]
144 |
145 | OPENSTACK_HOST = "{{ openstack_controller_host }}"
146 | OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
147 | #OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
148 | OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
149 |
150 | # Enables keystone web single-sign-on if set to True.
151 | #WEBSSO_ENABLED = False
152 |
153 | # Determines which authentication choice to show as default.
154 | #WEBSSO_INITIAL_CHOICE = "credentials"
155 |
156 | # The list of authentication mechanisms
157 | # which include keystone federation protocols.
158 | # Current supported protocol IDs are 'saml2' and 'oidc'
159 | # which represent SAML 2.0, OpenID Connect respectively.
160 | # Do not remove the mandatory credentials mechanism.
161 | #WEBSSO_CHOICES = (
162 | # ("credentials", _("Keystone Credentials")),
163 | # ("oidc", _("OpenID Connect")),
164 | # ("saml2", _("Security Assertion Markup Language")))
165 |
166 | # Disable SSL certificate checks (useful for self-signed certificates):
167 | #OPENSTACK_SSL_NO_VERIFY = True
168 |
169 | # The CA certificate to use to verify SSL connections
170 | #OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
171 |
172 | # The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
173 | # capabilities of the auth backend for Keystone.
174 | # If Keystone has been configured to use LDAP as the auth backend then set
175 | # can_edit_user to False and name to 'ldap'.
176 | #
177 | # TODO(tres): Remove these once Keystone has an API to identify auth backend.
178 | OPENSTACK_KEYSTONE_BACKEND = {
179 | 'name': 'native',
180 | 'can_edit_user': True,
181 | 'can_edit_group': True,
182 | 'can_edit_project': True,
183 | 'can_edit_domain': True,
184 | 'can_edit_role': True,
185 | }
186 |
187 | # Setting this to True, will add a new "Retrieve Password" action on instance,
188 | # allowing Admin session password retrieval/decryption.
189 | #OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
190 |
191 | # The Launch Instance user experience has been significantly enhanced.
192 | # You can choose whether to enable the new launch instance experience,
193 | # the legacy experience, or both. The legacy experience will be removed
194 | # in a future release, but is available as a temporary backup setting to ensure
195 | # compatibility with existing deployments. Further development will not be
196 | # done on the legacy experience. Please report any problems with the new
197 | # experience via the Launchpad tracking system.
198 | #
199 | # Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
200 | # determine the experience to enable. Set them both to true to enable
201 | # both.
202 | #LAUNCH_INSTANCE_LEGACY_ENABLED = True
203 | #LAUNCH_INSTANCE_NG_ENABLED = False
204 |
205 | # The Xen Hypervisor has the ability to set the mount point for volumes
206 | # attached to instances (other Hypervisors currently do not). Setting
207 | # can_set_mount_point to True will add the option to set the mount point
208 | # from the UI.
209 | OPENSTACK_HYPERVISOR_FEATURES = {
210 | 'can_set_mount_point': False,
211 | 'can_set_password': False,
212 | 'requires_keypair': False,
213 | }
214 |
215 | # The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
216 | # services provided by cinder that is not exposed by its extension API.
217 | OPENSTACK_CINDER_FEATURES = {
218 | 'enable_backup': False,
219 | }
220 |
221 | # The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
222 | # services provided by neutron. Options currently available are load
223 | # balancer service, security groups, quotas, VPN service.
224 | OPENSTACK_NEUTRON_NETWORK = {
225 | 'enable_router': False,
226 | 'enable_quotas': False,
227 | 'enable_ipv6': False,
228 | 'enable_distributed_router': False,
229 | 'enable_ha_router': False,
230 | 'enable_lb': False,
231 | 'enable_firewall': False,
232 | 'enable_vpn': False,
233 | 'enable_fip_topology_check': True,
234 |
235 | # Neutron can be configured with a default Subnet Pool to be used for IPv4
236 | # subnet-allocation. Specify the label you wish to display in the Address
237 | # pool selector on the create subnet step if you want to use this feature.
238 | 'default_ipv4_subnet_pool_label': None,
239 |
240 | # Neutron can be configured with a default Subnet Pool to be used for IPv6
241 | # subnet-allocation. Specify the label you wish to display in the Address
242 | # pool selector on the create subnet step if you want to use this feature.
243 | # You must set this to enable IPv6 Prefix Delegation in a PD-capable
244 | # environment.
245 | 'default_ipv6_subnet_pool_label': None,
246 |
247 | # The profile_support option is used to detect if an external router can be
248 | # configured via the dashboard. When using specific plugins the
249 | # profile_support can be turned on if needed.
250 | 'profile_support': None,
251 | #'profile_support': 'cisco',
252 |
253 | # Set which provider network types are supported. Only the network types
254 | # in this list will be available to choose from when creating a network.
255 | # Network types include local, flat, vlan, gre, and vxlan.
256 | 'supported_provider_types': ['*'],
257 |
258 | # Set which VNIC types are supported for port binding. Only the VNIC
259 | # types in this list will be available to choose from when creating a
260 | # port.
261 | # VNIC types include 'normal', 'macvtap' and 'direct'.
262 | # Set to empty list or None to disable VNIC type selection.
263 | 'supported_vnic_types': ['*']
264 | }
265 |
266 | # The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
267 | # in the OpenStack Dashboard related to the Image service, such as the list
268 | # of supported image formats.
269 | #OPENSTACK_IMAGE_BACKEND = {
270 | # 'image_formats': [
271 | # ('', _('Select format')),
272 | # ('aki', _('AKI - Amazon Kernel Image')),
273 | # ('ami', _('AMI - Amazon Machine Image')),
274 | # ('ari', _('ARI - Amazon Ramdisk Image')),
275 | # ('docker', _('Docker')),
276 | # ('iso', _('ISO - Optical Disk Image')),
277 | # ('ova', _('OVA - Open Virtual Appliance')),
278 | # ('qcow2', _('QCOW2 - QEMU Emulator')),
279 | # ('raw', _('Raw')),
280 | # ('vdi', _('VDI - Virtual Disk Image')),
281 | # ('vhd', ('VHD - Virtual Hard Disk')),
282 | # ('vmdk', _('VMDK - Virtual Machine Disk')),
283 | # ]
284 | #}
285 |
286 | # The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
287 | # image custom property attributes that appear on image detail pages.
288 | IMAGE_CUSTOM_PROPERTY_TITLES = {
289 | "architecture": _("Architecture"),
290 | "kernel_id": _("Kernel ID"),
291 | "ramdisk_id": _("Ramdisk ID"),
292 | "image_state": _("Euca2ools state"),
293 | "project_id": _("Project ID"),
294 | "image_type": _("Image Type"),
295 | }
296 |
297 | # The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
298 | # custom properties should not be displayed in the Image Custom Properties
299 | # table.
300 | IMAGE_RESERVED_CUSTOM_PROPERTIES = []
301 |
302 | # OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
303 | # in the Keystone service catalog. Use this setting when Horizon is running
304 | # external to the OpenStack environment. The default is 'publicURL'.
305 | #OPENSTACK_ENDPOINT_TYPE = "publicURL"
306 |
307 | # SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
308 | # case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
309 | # in the Keystone service catalog. Use this setting when Horizon is running
310 | # external to the OpenStack environment. The default is None. This
311 | # value should differ from OPENSTACK_ENDPOINT_TYPE if used.
312 | #SECONDARY_ENDPOINT_TYPE = "publicURL"
313 |
314 | # The number of objects (Swift containers/objects or images) to display
315 | # on a single page before providing a paging element (a "more" link)
316 | # to paginate results.
317 | API_RESULT_LIMIT = 1000
318 | API_RESULT_PAGE_SIZE = 20
319 |
320 | # The size of chunk in bytes for downloading objects from Swift
321 | SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
322 |
323 | # Specify a maximum number of items to display in a dropdown.
324 | DROPDOWN_MAX_ITEMS = 30
325 |
326 | # The timezone of the server. This should correspond with the timezone
327 | # of your entire OpenStack installation, and hopefully be in UTC.
328 | #TIME_ZONE = "UTC"
329 | TIME_ZONE = "Asia/Shanghai"
330 |
331 | # When launching an instance, the menu of available flavors is
332 | # sorted by RAM usage, ascending. If you would like a different sort order,
333 | # you can provide another flavor attribute as sorting key. Alternatively, you
334 | # can provide a custom callback method to use for sorting. You can also provide
335 | # a flag for reverse sort. For more info, see
336 | # http://docs.python.org/2/library/functions.html#sorted
337 | #CREATE_INSTANCE_FLAVOR_SORT = {
338 | # 'key': 'name',
339 | # # or
340 | # 'key': my_awesome_callback_method,
341 | # 'reverse': False,
342 | #}
343 |
344 | # Set this to True to display an 'Admin Password' field on the Change Password
345 | # form to verify that it is indeed the admin logged-in who wants to change
346 | # the password.
347 | #ENFORCE_PASSWORD_CHECK = False
348 |
349 | # Modules that provide /auth routes that can be used to handle different types
350 | # of user authentication. Add auth plugins that require extra route handling to
351 | # this list.
352 | #AUTHENTICATION_URLS = [
353 | # 'openstack_auth.urls',
354 | #]
355 |
356 | # The Horizon Policy Enforcement engine uses these values to load per service
357 | # policy rule files. The content of these files should match the files the
358 | # OpenStack services are using to determine role based access control in the
359 | # target installation.
360 |
361 |
362 | # Map of local copy of service policy files.
363 | # Please insure that your identity policy file matches the one being used on
364 | # your keystone servers. There is an alternate policy file that may be used
365 | # in the Keystone v3 multi-domain case, policy.v3cloudsample.json.
366 | # This file is not included in the Horizon repository by default but can be
367 | # found at
368 | # http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \
369 | # policy.v3cloudsample.json
370 | # Having matching policy files on the Horizon and Keystone servers is essential
371 | # for normal operation. This holds true for all services and their policy files.
372 | POLICY_FILES_PATH = '/etc/openstack-dashboard'
373 | POLICY_FILES_PATH = '/etc/openstack-dashboard'
374 | # Map of local copy of service policy files
375 | #POLICY_FILES = {
376 | # 'identity': 'keystone_policy.json',
377 | # 'compute': 'nova_policy.json',
378 | # 'volume': 'cinder_policy.json',
379 | # 'image': 'glance_policy.json',
380 | # 'orchestration': 'heat_policy.json',
381 | # 'network': 'neutron_policy.json',
382 | # 'telemetry': 'ceilometer_policy.json',
383 | #}
384 |
385 | # Trove user and database extension support. By default support for
386 | # creating users and databases on database instances is turned on.
387 | # To disable these extensions set the permission here to something
388 | # unusable such as ["!"].
389 | #TROVE_ADD_USER_PERMS = []
390 | #TROVE_ADD_DATABASE_PERMS = []
391 |
392 | # Change this patch to the appropriate static directory containing
393 | # two files: _variables.scss and _styles.scss
394 | #CUSTOM_THEME_PATH = 'themes/default'
395 |
396 | LOGGING = {
397 | 'version': 1,
398 | # When set to True this will disable all logging except
399 | # for loggers specified in this configuration dictionary. Note that
400 | # if nothing is specified here and disable_existing_loggers is True,
401 | # django.db.backends will still log unless it is disabled explicitly.
402 | 'disable_existing_loggers': False,
403 | 'handlers': {
404 | 'null': {
405 | 'level': 'DEBUG',
406 | 'class': 'django.utils.log.NullHandler',
407 | },
408 | 'console': {
409 | # Set the level to "DEBUG" for verbose output logging.
410 | 'level': 'INFO',
411 | 'class': 'logging.StreamHandler',
412 | },
413 | },
414 | 'loggers': {
415 | # Logging from django.db.backends is VERY verbose, send to null
416 | # by default.
417 | 'django.db.backends': {
418 | 'handlers': ['null'],
419 | 'propagate': False,
420 | },
421 | 'requests': {
422 | 'handlers': ['null'],
423 | 'propagate': False,
424 | },
425 | 'horizon': {
426 | 'handlers': ['console'],
427 | 'level': 'DEBUG',
428 | 'propagate': False,
429 | },
430 | 'openstack_dashboard': {
431 | 'handlers': ['console'],
432 | 'level': 'DEBUG',
433 | 'propagate': False,
434 | },
435 | 'novaclient': {
436 | 'handlers': ['console'],
437 | 'level': 'DEBUG',
438 | 'propagate': False,
439 | },
440 | 'cinderclient': {
441 | 'handlers': ['console'],
442 | 'level': 'DEBUG',
443 | 'propagate': False,
444 | },
445 | 'keystoneclient': {
446 | 'handlers': ['console'],
447 | 'level': 'DEBUG',
448 | 'propagate': False,
449 | },
450 | 'glanceclient': {
451 | 'handlers': ['console'],
452 | 'level': 'DEBUG',
453 | 'propagate': False,
454 | },
455 | 'neutronclient': {
456 | 'handlers': ['console'],
457 | 'level': 'DEBUG',
458 | 'propagate': False,
459 | },
460 | 'heatclient': {
461 | 'handlers': ['console'],
462 | 'level': 'DEBUG',
463 | 'propagate': False,
464 | },
465 | 'ceilometerclient': {
466 | 'handlers': ['console'],
467 | 'level': 'DEBUG',
468 | 'propagate': False,
469 | },
470 | 'troveclient': {
471 | 'handlers': ['console'],
472 | 'level': 'DEBUG',
473 | 'propagate': False,
474 | },
475 | 'swiftclient': {
476 | 'handlers': ['console'],
477 | 'level': 'DEBUG',
478 | 'propagate': False,
479 | },
480 | 'openstack_auth': {
481 | 'handlers': ['console'],
482 | 'level': 'DEBUG',
483 | 'propagate': False,
484 | },
485 | 'nose.plugins.manager': {
486 | 'handlers': ['console'],
487 | 'level': 'DEBUG',
488 | 'propagate': False,
489 | },
490 | 'django': {
491 | 'handlers': ['console'],
492 | 'level': 'DEBUG',
493 | 'propagate': False,
494 | },
495 | 'iso8601': {
496 | 'handlers': ['null'],
497 | 'propagate': False,
498 | },
499 | 'scss': {
500 | 'handlers': ['null'],
501 | 'propagate': False,
502 | },
503 | }
504 | }
505 |
506 | # 'direction' should not be specified for all_tcp/udp/icmp.
507 | # It is specified in the form.
508 | SECURITY_GROUP_RULES = {
509 | 'all_tcp': {
510 | 'name': _('All TCP'),
511 | 'ip_protocol': 'tcp',
512 | 'from_port': '1',
513 | 'to_port': '65535',
514 | },
515 | 'all_udp': {
516 | 'name': _('All UDP'),
517 | 'ip_protocol': 'udp',
518 | 'from_port': '1',
519 | 'to_port': '65535',
520 | },
521 | 'all_icmp': {
522 | 'name': _('All ICMP'),
523 | 'ip_protocol': 'icmp',
524 | 'from_port': '-1',
525 | 'to_port': '-1',
526 | },
527 | 'ssh': {
528 | 'name': 'SSH',
529 | 'ip_protocol': 'tcp',
530 | 'from_port': '22',
531 | 'to_port': '22',
532 | },
533 | 'smtp': {
534 | 'name': 'SMTP',
535 | 'ip_protocol': 'tcp',
536 | 'from_port': '25',
537 | 'to_port': '25',
538 | },
539 | 'dns': {
540 | 'name': 'DNS',
541 | 'ip_protocol': 'tcp',
542 | 'from_port': '53',
543 | 'to_port': '53',
544 | },
545 | 'http': {
546 | 'name': 'HTTP',
547 | 'ip_protocol': 'tcp',
548 | 'from_port': '80',
549 | 'to_port': '80',
550 | },
551 | 'pop3': {
552 | 'name': 'POP3',
553 | 'ip_protocol': 'tcp',
554 | 'from_port': '110',
555 | 'to_port': '110',
556 | },
557 | 'imap': {
558 | 'name': 'IMAP',
559 | 'ip_protocol': 'tcp',
560 | 'from_port': '143',
561 | 'to_port': '143',
562 | },
563 | 'ldap': {
564 | 'name': 'LDAP',
565 | 'ip_protocol': 'tcp',
566 | 'from_port': '389',
567 | 'to_port': '389',
568 | },
569 | 'https': {
570 | 'name': 'HTTPS',
571 | 'ip_protocol': 'tcp',
572 | 'from_port': '443',
573 | 'to_port': '443',
574 | },
575 | 'smtps': {
576 | 'name': 'SMTPS',
577 | 'ip_protocol': 'tcp',
578 | 'from_port': '465',
579 | 'to_port': '465',
580 | },
581 | 'imaps': {
582 | 'name': 'IMAPS',
583 | 'ip_protocol': 'tcp',
584 | 'from_port': '993',
585 | 'to_port': '993',
586 | },
587 | 'pop3s': {
588 | 'name': 'POP3S',
589 | 'ip_protocol': 'tcp',
590 | 'from_port': '995',
591 | 'to_port': '995',
592 | },
593 | 'ms_sql': {
594 | 'name': 'MS SQL',
595 | 'ip_protocol': 'tcp',
596 | 'from_port': '1433',
597 | 'to_port': '1433',
598 | },
599 | 'mysql': {
600 | 'name': 'MYSQL',
601 | 'ip_protocol': 'tcp',
602 | 'from_port': '3306',
603 | 'to_port': '3306',
604 | },
605 | 'rdp': {
606 | 'name': 'RDP',
607 | 'ip_protocol': 'tcp',
608 | 'from_port': '3389',
609 | 'to_port': '3389',
610 | },
611 | }
612 |
613 | # Deprecation Notice:
614 | #
615 | # The setting FLAVOR_EXTRA_KEYS has been deprecated.
616 | # Please load extra spec metadata into the Glance Metadata Definition Catalog.
617 | #
618 | # The sample quota definitions can be found in:
619 | # /etc/metadefs/compute-quota.json
620 | #
621 | # The metadata definition catalog supports CLI and API:
622 | # $glance --os-image-api-version 2 help md-namespace-import
623 | # $glance-manage db_load_metadefs
624 | #
625 | # See Metadata Definitions on: http://docs.openstack.org/developer/glance/
626 |
627 | # Indicate to the Sahara data processing service whether or not
628 | # automatic floating IP allocation is in effect. If it is not
629 | # in effect, the user will be prompted to choose a floating IP
630 | # pool for use in their cluster. False by default. You would want
631 | # to set this to True if you were running Nova Networking with
632 | # auto_assign_floating_ip = True.
633 | #SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
634 |
635 | # The hash algorithm to use for authentication tokens. This must
636 | # match the hash algorithm that the identity server and the
637 | # auth_token middleware are using. Allowed values are the
638 | # algorithms supported by Python's hashlib library.
639 | #OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
640 |
641 | # Hashing tokens from Keystone keeps the Horizon session data smaller, but it
642 | # doesn't work in some cases when using PKI tokens. Uncomment this value and
643 | # set it to False if using PKI tokens and there are 401 errors due to token
644 | # hashing.
645 | #OPENSTACK_TOKEN_HASH_ENABLED = True
646 |
647 | # AngularJS requires some settings to be made available to
648 | # the client side. Some settings are required by in-tree / built-in horizon
649 | # features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
650 | # form of ['SETTING_1','SETTING_2'], etc.
651 | #
652 | # You may remove settings from this list for security purposes, but do so at
653 | # the risk of breaking a built-in horizon feature. These settings are required
654 | # for horizon to function properly. Only remove them if you know what you
655 | # are doing. These settings may in the future be moved to be defined within
656 | # the enabled panel configuration.
657 | # You should not add settings to this list for out of tree extensions.
658 | # See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
659 | REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES']
660 |
661 | # Additional settings can be made available to the client side for
662 | # extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
663 | # !! Please use extreme caution as the settings are transferred via HTTP/S
664 | # and are not encrypted on the browser. This is an experimental API and
665 | # may be deprecated in the future without notice.
666 | #REST_API_ADDITIONAL_SETTINGS = []
667 |
668 | # DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
669 | # within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
670 | # Scripting (XFS) vulnerability, so this option allows extra security hardening
671 | # where iframes are not used in deployment. Default setting is True.
672 | # For more information see:
673 | # http://tinyurl.com/anticlickjack
674 | #DISALLOW_IFRAME_EMBED = True
675 |
--------------------------------------------------------------------------------
/templates/neutron.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | verbose = True
3 |
4 | core_plugin = ml2
5 | service_plugins =
6 | rpc_backend = rabbit
7 | auth_strategy = keystone
8 |
9 | notify_nova_on_port_status_changes = True
10 | notify_nova_on_port_data_changes = True
11 | nova_url = http://{{ openstack_controller_host }}:8774/v2
12 |
13 | {% if openstack_ha %}
14 | dhcp_agents_per_network = 3
15 | l3_ha = True
16 | allow_automatic_l3agent_failover = True
17 | {% endif %}
18 |
19 | dhcp_lease_duration = -1
20 |
21 | # Print more verbose output (set logging level to INFO instead of default WARNING level).
22 | # verbose = True
23 |
24 | # =========Start Global Config Option for Distributed L3 Router===============
25 | # Setting the "router_distributed" flag to "True" will default to the creation
26 | # of distributed tenant routers. The admin can override this flag by specifying
27 | # the type of the router on the create request (admin-only attribute). Default
28 | # value is "False" to support legacy mode (centralized) routers.
29 | #
30 | # router_distributed = False
31 | #
32 | # ===========End Global Config Option for Distributed L3 Router===============
33 |
34 | # Print debugging output (set logging level to DEBUG instead of default WARNING level).
35 | # debug = False
36 |
37 | # Where to store Neutron state files. This directory must be writable by the
38 | # user executing the agent.
39 | # state_path = /var/lib/neutron
40 |
41 | # log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
42 | # log_date_format = %Y-%m-%d %H:%M:%S
43 |
44 | # use_syslog -> syslog
45 | # log_file and log_dir -> log_dir/log_file
46 | # (not log_file) and log_dir -> log_dir/{binary_name}.log
47 | # use_stderr -> stderr
48 | # (not user_stderr) and (not log_file) -> stdout
49 | # publish_errors -> notification system
50 |
51 | # use_syslog = False
52 | # syslog_log_facility = LOG_USER
53 |
54 | # use_stderr = False
55 | # log_file =
56 | # log_dir =
57 |
58 | # publish_errors = False
59 |
60 | # Address to bind the API server to
61 | # bind_host = 0.0.0.0
62 | bind_host = {{ mgmt_ip }}
63 |
64 | # Port the bind the API server to
65 | # bind_port = 9696
66 |
67 | # Path to the extensions. Note that this can be a colon-separated list of
68 | # paths. For example:
69 | # api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
70 | # The __path__ of neutron.extensions is appended to this, so if your
71 | # extensions are in there you don't need to specify them here
72 | # api_extensions_path =
73 |
74 | # (StrOpt) Neutron core plugin entrypoint to be loaded from the
75 | # neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
76 | # plugins included in the neutron source distribution. For compatibility with
77 | # previous versions, the class name of a plugin can be specified instead of its
78 | # entrypoint name.
79 | #
80 | # core_plugin =
81 | # Example: core_plugin = ml2
82 |
83 | # (StrOpt) Neutron IPAM (IP address management) driver to be loaded from the
84 | # neutron.ipam_drivers namespace. See setup.cfg for the entry point names.
85 | # If ipam_driver is not set (default behavior), no ipam driver is used.
86 | # Example: ipam_driver =
87 | # In order to use the reference implementation of neutron ipam driver, use
88 | # 'internal'.
89 | # Example: ipam_driver = internal
90 |
91 | # (ListOpt) List of service plugin entrypoints to be loaded from the
92 | # neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
93 | # the plugins included in the neutron source distribution. For compatibility
94 | # with previous versions, the class name of a plugin can be specified instead
95 | # of its entrypoint name.
96 | #
97 | # service_plugins =
98 | # Example: service_plugins = router,firewall,lbaas,vpnaas,metering,qos
99 |
100 | # Paste configuration file
101 | # api_paste_config = /usr/share/neutron/api-paste.ini
102 |
103 | # (StrOpt) Hostname to be used by the neutron server, agents and services
104 | # running on this machine. All the agents and services running on this machine
105 | # must use the same host value.
106 | # The default value is hostname of the machine.
107 | #
108 | # host =
109 |
110 | # The strategy to be used for auth.
111 | # Supported values are 'keystone'(default), 'noauth'.
112 | # auth_strategy = noauth
113 |
114 | # Base MAC address. The first 3 octets will remain unchanged. If the
115 | # 4h octet is not 00, it will also be used. The others will be
116 | # randomly generated.
117 | # 3 octet
118 | # base_mac = fa:16:3e:00:00:00
119 | # 4 octet
120 | # base_mac = fa:16:3e:4f:00:00
121 |
122 | # DVR Base MAC address. The first 3 octets will remain unchanged. If the
123 | # 4th octet is not 00, it will also be used. The others will be randomly
124 | # generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
125 | # avoid mixing them up with MAC's allocated for tenant ports.
126 | # A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
127 | # The default is 3 octet
128 | # dvr_base_mac = fa:16:3f:00:00:00
129 |
130 | # Maximum amount of retries to generate a unique MAC address
131 | # mac_generation_retries = 16
132 |
133 | # DHCP Lease duration (in seconds). Use -1 to
134 | # tell dnsmasq to use infinite lease times.
135 | # dhcp_lease_duration = 86400
136 |
137 | # Domain to use for building the hostnames
138 | # dns_domain = openstacklocal
139 |
140 | # Allow sending resource operation notification to DHCP agent
141 | # dhcp_agent_notification = True
142 |
143 | # Enable or disable bulk create/update/delete operations
144 | # allow_bulk = True
145 | # Enable or disable pagination
146 | # allow_pagination = False
147 | # Enable or disable sorting
148 | # allow_sorting = False
149 | # Enable or disable overlapping IPs for subnets
150 | # Attention: the following parameter MUST be set to False if Neutron is
151 | # being used in conjunction with nova security groups
152 | # allow_overlapping_ips = True
153 | # Ensure that configured gateway is on subnet. For IPv6, validate only if
154 | # gateway is not a link local address. Deprecated, to be removed during the
155 | # K release, at which point the check will be mandatory.
156 | # force_gateway_on_subnet = True
157 |
158 | # Default maximum number of items returned in a single response,
159 | # value == infinite and value < 0 means no max limit, and value must
160 | # be greater than 0. If the number of items requested is greater than
161 | # pagination_max_limit, server will just return pagination_max_limit
162 | # of number of items.
163 | # pagination_max_limit = -1
164 |
165 | # Maximum number of DNS nameservers per subnet
166 | # max_dns_nameservers = 5
167 |
168 | # Maximum number of host routes per subnet
169 | # max_subnet_host_routes = 20
170 |
171 | # Maximum number of fixed ips per port
172 | # max_fixed_ips_per_port = 5
173 |
174 | # Maximum number of routes per router
175 | # max_routes = 30
176 |
177 | # Default Subnet Pool to be used for IPv4 subnet-allocation.
178 | # Specifies by UUID the pool to be used in case of subnet-create being called
179 | # without a subnet-pool ID. The default of None means that no pool will be
180 | # used unless passed explicitly to subnet create. If no pool is used, then a
181 | # CIDR must be passed to create a subnet and that subnet will not be allocated
182 | # from any pool; it will be considered part of the tenant's private address
183 | # space.
184 | # default_ipv4_subnet_pool =
185 |
186 | # Default Subnet Pool to be used for IPv6 subnet-allocation.
187 | # Specifies by UUID the pool to be used in case of subnet-create being
188 | # called without a subnet-pool ID. Set to "prefix_delegation"
189 | # to enable IPv6 Prefix Delegation in a PD-capable environment.
190 | # See the description for default_ipv4_subnet_pool for more information.
191 | # default_ipv6_subnet_pool =
192 |
193 | # =========== items for MTU selection and advertisement =============
194 | # Advertise MTU. If True, effort is made to advertise MTU
195 | # settings to VMs via network methods (ie. DHCP and RA MTU options)
196 | # when the network's preferred MTU is known.
197 | # advertise_mtu = False
198 | # ======== end of items for MTU selection and advertisement =========
199 |
200 | # =========== items for agent management extension =============
201 | # Seconds to regard the agent as down; should be at least twice
202 | # report_interval, to be sure the agent is down for good
203 | # agent_down_time = 75
204 |
205 | # Agent starts with admin_state_up=False when enable_new_agents=False.
206 | # In the case, user's resources will not be scheduled automatically to the
207 | # agent until admin changes admin_state_up to True.
208 | # enable_new_agents = True
209 | # =========== end of items for agent management extension =====
210 |
211 | # =========== items for agent scheduler extension =============
212 | # Driver to use for scheduling network to DHCP agent
213 | # network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
214 | # Driver to use for scheduling router to a default L3 agent
215 | # router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
216 | # Driver to use for scheduling a loadbalancer pool to an lbaas agent
217 | # loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
218 |
219 | # (StrOpt) Representing the resource type whose load is being reported by
220 | # the agent.
221 | # This can be 'networks','subnets' or 'ports'. When specified (Default is networks),
222 | # the server will extract particular load sent as part of its agent configuration object
223 | # from the agent report state, which is the number of resources being consumed, at
224 | # every report_interval.
225 | # dhcp_load_type can be used in combination with network_scheduler_driver =
226 | # neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
227 | # When the network_scheduler_driver is WeightScheduler, dhcp_load_type can
228 | # be configured to represent the choice for the resource being balanced.
229 | # Example: dhcp_load_type = networks
230 | # Values:
231 | # networks - number of networks hosted on the agent
232 | # subnets - number of subnets associated with the networks hosted on the agent
233 | # ports - number of ports associated with the networks hosted on the agent
234 | # dhcp_load_type = networks
235 |
236 | # Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
237 | # networks to first DHCP agent which sends get_active_networks message to
238 | # neutron server
239 | # network_auto_schedule = True
240 |
241 | # Allow auto scheduling routers to L3 agent. It will schedule non-hosted
242 | # routers to first L3 agent which sends sync_routers message to neutron server
243 | # router_auto_schedule = True
244 |
245 | # Allow automatic rescheduling of routers from dead L3 agents with
246 | # admin_state_up set to True to alive agents.
247 | # allow_automatic_l3agent_failover = False
248 |
249 | # Allow automatic removal of networks from dead DHCP agents with
250 | # admin_state_up set to True.
251 | # Networks could then be rescheduled if network_auto_schedule is True
252 | # allow_automatic_dhcp_failover = True
253 |
254 | # Number of DHCP agents scheduled to host a tenant network.
255 | # If this number is greater than 1, the scheduler automatically
256 | # assigns multiple DHCP agents for a given tenant network,
257 | # providing high availability for DHCP service.
258 | # dhcp_agents_per_network = 1
259 |
260 | # Enable services on agents with admin_state_up False.
261 | # If this option is False, when admin_state_up of an agent is turned to
262 | # False, services on it will be disabled. If this option is True, services
263 | # on agents with admin_state_up False keep available and manual scheduling
264 | # to such agents is available. Agents with admin_state_up False are not
265 | # selected for automatic scheduling regardless of this option.
266 | # enable_services_on_agents_with_admin_state_down = False
267 |
268 | # =========== end of items for agent scheduler extension =====
269 |
270 | # =========== items for l3 extension ==============
271 | # Enable high availability for virtual routers.
272 | # l3_ha = False
273 | #
274 | # Maximum number of l3 agents which a HA router will be scheduled on. If it
275 | # is set to 0 the router will be scheduled on every agent.
276 | # max_l3_agents_per_router = 3
277 | #
278 | # Minimum number of l3 agents which a HA router will be scheduled on. The
279 | # default value is 2.
280 | # min_l3_agents_per_router = 2
281 | #
282 | # CIDR of the administrative network if HA mode is enabled
283 | # l3_ha_net_cidr = 169.254.192.0/18
284 | #
285 | # Enable snat by default on external gateway when available
286 | # enable_snat_by_default = True
287 | #
288 | # The network type to use when creating the HA network for an HA router.
289 | # By default or if empty, the first 'tenant_network_types'
290 | # is used. This is helpful when the VRRP traffic should use a specific
291 | # network which not the default one.
292 | # ha_network_type =
293 | # Example: ha_network_type = flat
294 | #
295 | # The physical network name with which the HA network can be created.
296 | # ha_network_physical_name =
297 | # Example: ha_network_physical_name = physnet1
298 | # =========== end of items for l3 extension =======
299 |
300 | # =========== items for metadata proxy configuration ==============
301 | # User (uid or name) running metadata proxy after its initialization
302 | # (if empty: agent effective user)
303 | # metadata_proxy_user =
304 |
305 | # Group (gid or name) running metadata proxy after its initialization
306 | # (if empty: agent effective group)
307 | # metadata_proxy_group =
308 |
309 | # Enable/Disable log watch by metadata proxy, it should be disabled when
310 | # metadata_proxy_user/group is not allowed to read/write its log file and
311 | # 'copytruncate' logrotate option must be used if logrotate is enabled on
312 | # metadata proxy log files. Option default value is deduced from
313 | # metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent
314 | # effective user id/name.
315 | # metadata_proxy_watch_log =
316 |
317 | # Location of Metadata Proxy UNIX domain socket
318 | # metadata_proxy_socket = $state_path/metadata_proxy
319 | # =========== end of items for metadata proxy configuration ==============
320 |
321 | # ========== items for VLAN trunking networks ==========
322 | # Setting this flag to True will allow plugins that support it to
323 | # create VLAN transparent networks. This flag has no effect for
324 | # plugins that do not support VLAN transparent networks.
325 | # vlan_transparent = False
326 | # ========== end of items for VLAN trunking networks ==========
327 |
328 | # =========== WSGI parameters related to the API server ==============
329 | # Number of separate API worker processes to spawn. If not specified or < 1,
330 | # the default value is equal to the number of CPUs available.
331 | # api_workers =
332 |
333 | # Number of separate RPC worker processes to spawn. If not specified or < 1,
334 | # a single RPC worker process is spawned by the parent process.
335 | # rpc_workers = 1
336 |
337 | # Timeout for client connections socket operations. If an
338 | # incoming connection is idle for this number of seconds it
339 | # will be closed. A value of '0' means wait forever. (integer
340 | # value)
341 | # client_socket_timeout = 900
342 |
343 | # wsgi keepalive option. Determines if connections are allowed to be held open
344 | # by clients after a request is fulfilled. A value of False will ensure that
345 | # the socket connection will be explicitly closed once a response has been
346 | # sent to the client.
347 | # wsgi_keep_alive = True
348 |
349 | # Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
350 | # starting API server. Not supported on OS X.
351 | # tcp_keepidle = 600
352 |
353 | # Number of seconds to keep retrying to listen
354 | # retry_until_window = 30
355 |
356 | # Number of backlog requests to configure the socket with.
357 | # backlog = 4096
358 |
359 | # Max header line to accommodate large tokens
360 | # max_header_line = 16384
361 |
362 | # Enable SSL on the API server
363 | # use_ssl = False
364 |
365 | # Certificate file to use when starting API server securely
366 | # ssl_cert_file = /path/to/certfile
367 |
368 | # Private key file to use when starting API server securely
369 | # ssl_key_file = /path/to/keyfile
370 |
371 | # CA certificate file to use when starting API server securely to
372 | # verify connecting clients. This is an optional parameter only required if
373 | # API clients need to authenticate to the API server using SSL certificates
374 | # signed by a trusted CA
375 | # ssl_ca_file = /path/to/cafile
376 | # ======== end of WSGI parameters related to the API server ==========
377 |
378 | # ======== neutron nova interactions ==========
379 | # Send notification to nova when port status is active.
380 | # notify_nova_on_port_status_changes = False
381 |
382 | # Send notifications to nova when port data (fixed_ips/floatingips) change
383 | # so nova can update it's cache.
384 | # notify_nova_on_port_data_changes = False
385 |
386 | # URL for connection to nova (Only supports one nova region currently).
387 | # nova_url = http://127.0.0.1:8774/v2
388 |
389 | # Name of nova region to use. Useful if keystone manages more than one region
390 | # nova_region_name =
391 |
392 | # Username for connection to nova in admin context
393 | # nova_admin_username =
394 |
395 | # The uuid of the admin nova tenant
396 | # nova_admin_tenant_id =
397 |
398 | # The name of the admin nova tenant. If the uuid of the admin nova tenant
399 | # is set, this is optional. Useful for cases where the uuid of the admin
400 | # nova tenant is not available when configuration is being done.
401 | # nova_admin_tenant_name =
402 |
403 | # Password for connection to nova in admin context.
404 | # nova_admin_password =
405 |
406 | # Authorization URL for connection to nova in admin context.
407 | # nova_admin_auth_url =
408 |
409 | # CA file for novaclient to verify server certificates
410 | # nova_ca_certificates_file =
411 |
412 | # Boolean to control ignoring SSL errors on the nova url
413 | # nova_api_insecure = False
414 |
415 | # Number of seconds between sending events to nova if there are any events to send
416 | # send_events_interval = 2
417 |
418 | # ======== end of neutron nova interactions ==========
419 |
420 | #
421 | # Options defined in oslo.messaging
422 | #
423 |
424 | # Use durable queues in amqp. (boolean value)
425 | # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
426 | # amqp_durable_queues=false
427 |
428 | # Auto-delete queues in amqp. (boolean value)
429 | # amqp_auto_delete=false
430 |
431 | # Size of RPC connection pool. (integer value)
432 | # rpc_conn_pool_size=30
433 |
434 | # Qpid broker hostname. (string value)
435 | # qpid_hostname=localhost
436 |
437 | # Qpid broker port. (integer value)
438 | # qpid_port=5672
439 |
440 | # Qpid HA cluster host:port pairs. (list value)
441 | # qpid_hosts=$qpid_hostname:$qpid_port
442 |
443 | # Username for Qpid connection. (string value)
444 | # qpid_username=
445 |
446 | # Password for Qpid connection. (string value)
447 | # qpid_password=
448 |
449 | # Space separated list of SASL mechanisms to use for auth.
450 | # (string value)
451 | # qpid_sasl_mechanisms=
452 |
453 | # Seconds between connection keepalive heartbeats. (integer
454 | # value)
455 | # qpid_heartbeat=60
456 |
457 | # Transport to use, either 'tcp' or 'ssl'. (string value)
458 | # qpid_protocol=tcp
459 |
460 | # Whether to disable the Nagle algorithm. (boolean value)
461 | # qpid_tcp_nodelay=true
462 |
463 | # The qpid topology version to use. Version 1 is what was
464 | # originally used by impl_qpid. Version 2 includes some
465 | # backwards-incompatible changes that allow broker federation
466 | # to work. Users should update to version 2 when they are
467 | # able to take everything down, as it requires a clean break.
468 | # (integer value)
469 | # qpid_topology_version=1
470 |
471 | # SSL version to use (valid only if SSL enabled). valid values
472 | # are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
473 | # distributions. (string value)
474 | # kombu_ssl_version=
475 |
476 | # SSL key file (valid only if SSL enabled). (string value)
477 | # kombu_ssl_keyfile=
478 |
479 | # SSL cert file (valid only if SSL enabled). (string value)
480 | # kombu_ssl_certfile=
481 |
482 | # SSL certification authority file (valid only if SSL
483 | # enabled). (string value)
484 | # kombu_ssl_ca_certs=
485 |
486 | # How long to wait before reconnecting in response to an AMQP
487 | # consumer cancel notification. (floating point value)
488 | # kombu_reconnect_delay=1.0
489 |
490 | # The RabbitMQ broker address where a single node is used.
491 | # (string value)
492 | # rabbit_host=localhost
493 |
494 | # The RabbitMQ broker port where a single node is used.
495 | # (integer value)
496 | # rabbit_port=5672
497 |
498 | # RabbitMQ HA cluster host:port pairs. (list value)
499 | # rabbit_hosts=$rabbit_host:$rabbit_port
500 |
501 | # Connect over SSL for RabbitMQ. (boolean value)
502 | # rabbit_use_ssl=false
503 |
504 | # The RabbitMQ userid. (string value)
505 | # rabbit_userid=guest
506 |
507 | # The RabbitMQ password. (string value)
508 | # rabbit_password=guest
509 |
510 | # the RabbitMQ login method (string value)
511 | # rabbit_login_method=AMQPLAIN
512 |
513 | # The RabbitMQ virtual host. (string value)
514 | # rabbit_virtual_host=/
515 |
516 | # How frequently to retry connecting with RabbitMQ. (integer
517 | # value)
518 | # rabbit_retry_interval=1
519 |
520 | # How long to backoff for between retries when connecting to
521 | # RabbitMQ. (integer value)
522 | # rabbit_retry_backoff=2
523 |
524 | # Maximum number of RabbitMQ connection retries. Default is 0
525 | # (infinite retry count). (integer value)
526 | # rabbit_max_retries=0
527 |
528 | # Use HA queues in RabbitMQ (x-ha-policy: all). If you change
529 | # this option, you must wipe the RabbitMQ database. (boolean
530 | # value)
531 | # rabbit_ha_queues=false
532 |
533 | # If passed, use a fake RabbitMQ provider. (boolean value)
534 | # fake_rabbit=false
535 |
536 | # ZeroMQ bind address. Should be a wildcard (*), an ethernet
537 | # interface, or IP. The "host" option should point or resolve
538 | # to this address. (string value)
539 | # rpc_zmq_bind_address=*
540 |
541 | # MatchMaker driver. (string value)
542 | # rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
543 |
544 | # ZeroMQ receiver listening port. (integer value)
545 | # rpc_zmq_port=9501
546 |
547 | # Number of ZeroMQ contexts, defaults to 1. (integer value)
548 | # rpc_zmq_contexts=1
549 |
550 | # Maximum number of ingress messages to locally buffer per
551 | # topic. Default is unlimited. (integer value)
552 | # rpc_zmq_topic_backlog=
553 |
554 | # Directory for holding IPC sockets. (string value)
555 | # rpc_zmq_ipc_dir=/var/run/openstack
556 |
557 | # Name of this node. Must be a valid hostname, FQDN, or IP
558 | # address. Must match "host" option, if running Nova. (string
559 | # value)
560 | # rpc_zmq_host=oslo
561 |
562 | # Seconds to wait before a cast expires (TTL). Only supported
563 | # by impl_zmq. (integer value)
564 | # rpc_cast_timeout=30
565 |
566 | # Heartbeat frequency. (integer value)
567 | # matchmaker_heartbeat_freq=300
568 |
569 | # Heartbeat time-to-live. (integer value)
570 | # matchmaker_heartbeat_ttl=600
571 |
572 | # Size of RPC greenthread pool. (integer value)
573 | # rpc_thread_pool_size=64
574 |
575 | # Driver or drivers to handle sending notifications. (multi
576 | # valued)
577 | # notification_driver=
578 |
579 | # AMQP topic used for OpenStack notifications. (list value)
580 | # Deprecated group/name - [rpc_notifier2]/topics
581 | # notification_topics=notifications
582 |
583 | # Seconds to wait for a response from a call. (integer value)
584 | # rpc_response_timeout=60
585 |
586 | # A URL representing the messaging driver to use and its full
587 | # configuration. If not set, we fall back to the rpc_backend
588 | # option and driver specific configuration. (string value)
589 | # transport_url=
590 |
591 | # The messaging driver to use, defaults to rabbit. Other
592 | # drivers include qpid and zmq. (string value)
593 | # rpc_backend=rabbit
594 |
595 | # The default exchange under which topics are scoped. May be
596 | # overridden by an exchange name specified in the
597 | # transport_url option. (string value)
598 | # control_exchange=openstack
599 |
600 |
601 | [matchmaker_redis]
602 |
603 | #
604 | # Options defined in oslo.messaging
605 | #
606 |
607 | # Host to locate redis. (string value)
608 | # host=127.0.0.1
609 |
610 | # Use this port to connect to redis host. (integer value)
611 | # port=6379
612 |
613 | # Password for Redis server (optional). (string value)
614 | # password=
615 |
616 |
617 | [matchmaker_ring]
618 |
619 | #
620 | # Options defined in oslo.messaging
621 | #
622 |
623 | # Matchmaker ring file (JSON). (string value)
624 | # Deprecated group/name - [DEFAULT]/matchmaker_ringfile
625 | # ringfile=/etc/oslo/matchmaker_ring.json
626 |
627 | [quotas]
628 | quota_port = -1
629 |
630 | # Default driver to use for quota checks
631 | # quota_driver = neutron.db.quota.driver.DbQuotaDriver
632 |
633 | # Resource name(s) that are supported in quota features
634 | # This option is deprecated for removal in the M release, please refrain from using it
635 | # quota_items = network,subnet,port
636 |
637 | # Default number of resource allowed per tenant. A negative value means
638 | # unlimited.
639 | # default_quota = -1
640 |
641 | # Number of networks allowed per tenant. A negative value means unlimited.
642 | # quota_network = 10
643 |
644 | # Number of subnets allowed per tenant. A negative value means unlimited.
645 | # quota_subnet = 10
646 |
647 | # Number of ports allowed per tenant. A negative value means unlimited.
648 | # quota_port = 50
649 |
650 | # Number of security groups allowed per tenant. A negative value means
651 | # unlimited.
652 | # quota_security_group = 10
653 |
654 | # Number of security group rules allowed per tenant. A negative value means
655 | # unlimited.
656 | # quota_security_group_rule = 100
657 |
658 | # Number of vips allowed per tenant. A negative value means unlimited.
659 | # quota_vip = 10
660 |
661 | # Number of pools allowed per tenant. A negative value means unlimited.
662 | # quota_pool = 10
663 |
664 | # Number of pool members allowed per tenant. A negative value means unlimited.
665 | # The default is unlimited because a member is not a real resource consumer
666 | # on Openstack. However, on back-end, a member is a resource consumer
667 | # and that is the reason why quota is possible.
668 | # quota_member = -1
669 |
670 | # Number of health monitors allowed per tenant. A negative value means
671 | # unlimited.
672 | # The default is unlimited because a health monitor is not a real resource
673 | # consumer on Openstack. However, on back-end, a member is a resource consumer
674 | # and that is the reason why quota is possible.
675 | # quota_health_monitor = -1
676 |
677 | # Number of loadbalancers allowed per tenant. A negative value means unlimited.
678 | # quota_loadbalancer = 10
679 |
680 | # Number of listeners allowed per tenant. A negative value means unlimited.
681 | # quota_listener = -1
682 |
683 | # Number of v2 health monitors allowed per tenant. A negative value means
684 | # unlimited. These health monitors exist under the lbaas v2 API
685 | # quota_healthmonitor = -1
686 |
687 | # Number of routers allowed per tenant. A negative value means unlimited.
688 | # quota_router = 10
689 |
690 | # Number of floating IPs allowed per tenant. A negative value means unlimited.
691 | # quota_floatingip = 50
692 |
693 | # Number of firewalls allowed per tenant. A negative value means unlimited.
694 | # quota_firewall = 1
695 |
696 | # Number of firewall policies allowed per tenant. A negative value means
697 | # unlimited.
698 | # quota_firewall_policy = 1
699 |
700 | # Number of firewall rules allowed per tenant. A negative value means
701 | # unlimited.
702 | # quota_firewall_rule = 100
703 |
704 | [agent]
705 | # Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
706 | # root filter facility.
707 | # Change to "sudo" to skip the filtering and just run the command directly
708 | # root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
709 |
710 | # Set to true to add comments to generated iptables rules that describe
711 | # each rule's purpose. (System must support the iptables comments module.)
712 | # comment_iptables_rules = True
713 |
714 | # Root helper daemon application to use when possible.
715 | # root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
716 |
717 | # Use the root helper when listing the namespaces on a system. This may not
718 | # be required depending on the security configuration. If the root helper is
719 | # not required, set this to False for a performance improvement.
720 | # use_helper_for_ns_read = True
721 |
722 | # The interval to check external processes for failure in seconds (0=disabled)
723 | # check_child_processes_interval = 60
724 |
725 | # Action to take when an external process spawned by an agent dies
726 | # Values:
727 | # respawn - Respawns the external process
728 | # exit - Exits the agent
729 | # check_child_processes_action = respawn
730 |
731 | # =========== items for agent management extension =============
732 | # seconds between nodes reporting state to server; should be less than
733 | # agent_down_time, best if it is half or less than agent_down_time
734 | # report_interval = 30
735 |
736 | # =========== end of items for agent management extension =====
737 |
738 | [keystone_authtoken]
739 | auth_uri = {{ openstack_keystone_auth_uri }}
740 | auth_url = {{ openstack_keystone_auth_url }}
741 | auth_plugin = password
742 | project_domain_id = default
743 | user_domain_id = default
744 | project_name = service
745 | username = neutron
746 | password = {{ openstack_neutron_keystone_password }}
747 |
748 | #auth_uri = http://127.0.0.1:35357/v2.0/
749 | #identity_uri = http://127.0.0.1:5000
750 | #admin_tenant_name = %SERVICE_TENANT_NAME%
751 | #admin_user = %SERVICE_USER%
752 | #admin_password = %SERVICE_PASSWORD%
753 |
754 | [database]
755 | connection = mysql://neutron:{{ openstack_neutron_db_password }}@{{ openstack_database_host }}/neutron
756 |
757 | # This line MUST be changed to actually run the plugin.
758 | # Example:
759 | # connection = mysql+pymysql://root:pass@127.0.0.1:3306/neutron
760 | # Replace 127.0.0.1 above with the IP address of the database used by the
761 | # main neutron server. (Leave it as is if the database runs on this host.)
762 | # connection = sqlite://
763 | # NOTE: In deployment the [database] section and its connection attribute may
764 | # be set in the corresponding core plugin '.ini' file. However, it is suggested
765 | # to put the [database] section and its connection attribute in this
766 | # configuration file.
767 |
768 | # Database engine for which script will be generated when using offline
769 | # migration
770 | # engine =
771 |
772 | # The SQLAlchemy connection string used to connect to the slave database
773 | # slave_connection =
774 |
775 | # Database reconnection retry times - in event connectivity is lost
776 | # set to -1 implies an infinite retry count
777 | # max_retries = 10
778 |
779 | # Database reconnection interval in seconds - if the initial connection to the
780 | # database fails
781 | # retry_interval = 10
782 |
783 | # Minimum number of SQL connections to keep open in a pool
784 | # min_pool_size = 1
785 |
786 | # Maximum number of SQL connections to keep open in a pool
787 | # max_pool_size = 10
788 |
789 | # Timeout in seconds before idle sql connections are reaped
790 | # idle_timeout = 3600
791 |
792 | # If set, use this value for max_overflow with sqlalchemy
793 | # max_overflow = 20
794 |
795 | # Verbosity of SQL debugging information. 0=None, 100=Everything
796 | # connection_debug = 0
797 |
798 | # Add python stack traces to SQL as comment strings
799 | # connection_trace = False
800 |
801 | # If set, use this value for pool_timeout with sqlalchemy
802 | # pool_timeout = 10
803 |
804 | [nova]
805 | auth_url = {{ openstack_keystone_auth_url }}
806 | auth_plugin = password
807 | project_domain_id = default
808 | user_domain_id = default
809 | region_name = RegionOne
810 | project_name = service
811 | username = nova
812 | password = {{ openstack_nova_keystone_password }}
813 |
814 | # Name of the plugin to load
815 | # auth_plugin =
816 |
817 | # Config Section from which to load plugin specific options
818 | # auth_section =
819 |
820 | # PEM encoded Certificate Authority to use when verifying HTTPs connections.
821 | # cafile =
822 |
823 | # PEM encoded client certificate cert file
824 | # certfile =
825 |
826 | # Verify HTTPS connections.
827 | # insecure = False
828 |
829 | # PEM encoded client certificate key file
830 | # keyfile =
831 |
832 | # Name of nova region to use. Useful if keystone manages more than one region.
833 | # region_name =
834 |
835 | # Timeout value for http requests
836 | # timeout =
837 |
838 | [oslo_concurrency]
839 | lock_path = /var/lib/neutron/tmp
840 |
841 | # Directory to use for lock files. For security, the specified directory should
842 | # only be writable by the user running the processes that need locking.
843 | # Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
844 | # a lock path must be set.
845 | # lock_path = $state_path/lock
846 |
847 | # Enables or disables inter-process locks.
848 | # disable_process_locking = False
849 |
850 | [oslo_policy]
851 |
852 | # The JSON file that defines policies.
853 | # policy_file = policy.json
854 |
855 | # Default rule. Enforced when a requested rule is not found.
856 | # policy_default_rule = default
857 |
858 | # Directories where policy configuration files are stored.
859 | # They can be relative to any directory in the search path defined by the
860 | # config_dir option, or absolute paths. The file defined by policy_file
861 | # must exist for these directories to be searched. Missing or empty
862 | # directories are ignored.
863 | # policy_dirs = policy.d
864 |
865 | [oslo_messaging_amqp]
866 |
867 | #
868 | # From oslo.messaging
869 | #
870 |
871 | # Address prefix used when sending to a specific server (string value)
872 | # Deprecated group/name - [amqp1]/server_request_prefix
873 | # server_request_prefix = exclusive
874 |
875 | # Address prefix used when broadcasting to all servers (string value)
876 | # Deprecated group/name - [amqp1]/broadcast_prefix
877 | # broadcast_prefix = broadcast
878 |
879 | # Address prefix when sending to any server in group (string value)
880 | # Deprecated group/name - [amqp1]/group_request_prefix
881 | # group_request_prefix = unicast
882 |
883 | # Name for the AMQP container (string value)
884 | # Deprecated group/name - [amqp1]/container_name
885 | # container_name =
886 |
887 | # Timeout for inactive connections (in seconds) (integer value)
888 | # Deprecated group/name - [amqp1]/idle_timeout
889 | # idle_timeout = 0
890 |
891 | # Debug: dump AMQP frames to stdout (boolean value)
892 | # Deprecated group/name - [amqp1]/trace
893 | # trace = false
894 |
895 | # CA certificate PEM file for verifing server certificate (string value)
896 | # Deprecated group/name - [amqp1]/ssl_ca_file
897 | # ssl_ca_file =
898 |
899 | # Identifying certificate PEM file to present to clients (string value)
900 | # Deprecated group/name - [amqp1]/ssl_cert_file
901 | # ssl_cert_file =
902 |
903 | # Private key PEM file used to sign cert_file certificate (string value)
904 | # Deprecated group/name - [amqp1]/ssl_key_file
905 | # ssl_key_file =
906 |
907 | # Password for decrypting ssl_key_file (if encrypted) (string value)
908 | # Deprecated group/name - [amqp1]/ssl_key_password
909 | # ssl_key_password =
910 |
911 | # Accept clients using either SSL or plain TCP (boolean value)
912 | # Deprecated group/name - [amqp1]/allow_insecure_clients
913 | # allow_insecure_clients = false
914 |
915 |
916 | [oslo_messaging_qpid]
917 |
918 | #
919 | # From oslo.messaging
920 | #
921 |
922 | # Use durable queues in AMQP. (boolean value)
923 | # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
924 | # amqp_durable_queues = false
925 |
926 | # Auto-delete queues in AMQP. (boolean value)
927 | # Deprecated group/name - [DEFAULT]/amqp_auto_delete
928 | # amqp_auto_delete = false
929 |
930 | # Size of RPC connection pool. (integer value)
931 | # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
932 | # rpc_conn_pool_size = 30
933 |
934 | # Qpid broker hostname. (string value)
935 | # Deprecated group/name - [DEFAULT]/qpid_hostname
936 | # qpid_hostname = localhost
937 |
938 | # Qpid broker port. (integer value)
939 | # Deprecated group/name - [DEFAULT]/qpid_port
940 | # qpid_port = 5672
941 |
942 | # Qpid HA cluster host:port pairs. (list value)
943 | # Deprecated group/name - [DEFAULT]/qpid_hosts
944 | # qpid_hosts = $qpid_hostname:$qpid_port
945 |
946 | # Username for Qpid connection. (string value)
947 | # Deprecated group/name - [DEFAULT]/qpid_username
948 | # qpid_username =
949 |
950 | # Password for Qpid connection. (string value)
951 | # Deprecated group/name - [DEFAULT]/qpid_password
952 | # qpid_password =
953 |
954 | # Space separated list of SASL mechanisms to use for auth. (string value)
955 | # Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
956 | # qpid_sasl_mechanisms =
957 |
958 | # Seconds between connection keepalive heartbeats. (integer value)
959 | # Deprecated group/name - [DEFAULT]/qpid_heartbeat
960 | # qpid_heartbeat = 60
961 |
962 | # Transport to use, either 'tcp' or 'ssl'. (string value)
963 | # Deprecated group/name - [DEFAULT]/qpid_protocol
964 | # qpid_protocol = tcp
965 |
966 | # Whether to disable the Nagle algorithm. (boolean value)
967 | # Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
968 | # qpid_tcp_nodelay = true
969 |
970 | # The number of prefetched messages held by receiver. (integer value)
971 | # Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
972 | # qpid_receiver_capacity = 1
973 |
974 | # The qpid topology version to use. Version 1 is what was originally used by
975 | # impl_qpid. Version 2 includes some backwards-incompatible changes that allow
976 | # broker federation to work. Users should update to version 2 when they are
977 | # able to take everything down, as it requires a clean break. (integer value)
978 | # Deprecated group/name - [DEFAULT]/qpid_topology_version
979 | # qpid_topology_version = 1
980 |
981 |
982 | [oslo_messaging_rabbit]
983 | {% if openstack_rabbit_hosts %}
984 | rabbit_hosts = {{ openstack_rabbit_hosts }}
985 | rabbit_retry_interval=1
986 | rabbit_retry_backoff=2
987 | rabbit_max_retries=0
988 | rabbit_durable_queues=true
989 | rabbit_ha_queues=true
990 | {% elif openstack_rabbit_host %}
991 | rabbit_host = {{ openstack_rabbit_host }}
992 | {% endif %}
993 | rabbit_userid = {{ openstack_rabbit_userid }}
994 | rabbit_password = {{ openstack_rabbit_password }}
995 |
996 | #
997 | # From oslo.messaging
998 | #
999 |
1000 | # Use durable queues in AMQP. (boolean value)
1001 | # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
1002 | # amqp_durable_queues = false
1003 |
1004 | # Auto-delete queues in AMQP. (boolean value)
1005 | # Deprecated group/name - [DEFAULT]/amqp_auto_delete
1006 | # amqp_auto_delete = false
1007 |
1008 | # Size of RPC connection pool. (integer value)
1009 | # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
1010 | # rpc_conn_pool_size = 30
1011 |
1012 | # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
1013 | # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
1014 | # distributions. (string value)
1015 | # Deprecated group/name - [DEFAULT]/kombu_ssl_version
1016 | # kombu_ssl_version =
1017 |
1018 | # SSL key file (valid only if SSL enabled). (string value)
1019 | # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
1020 | # kombu_ssl_keyfile =
1021 |
1022 | # SSL cert file (valid only if SSL enabled). (string value)
1023 | # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
1024 | # kombu_ssl_certfile =
1025 |
1026 | # SSL certification authority file (valid only if SSL enabled). (string value)
1027 | # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
1028 | # kombu_ssl_ca_certs =
1029 |
1030 | # How long to wait before reconnecting in response to an AMQP consumer cancel
1031 | # notification. (floating point value)
1032 | # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
1033 | # kombu_reconnect_delay = 1.0
1034 |
1035 | # The RabbitMQ broker address where a single node is used. (string value)
1036 | # Deprecated group/name - [DEFAULT]/rabbit_host
1037 | # rabbit_host = localhost
1038 |
1039 | # The RabbitMQ broker port where a single node is used. (integer value)
1040 | # Deprecated group/name - [DEFAULT]/rabbit_port
1041 | # rabbit_port = 5672
1042 |
1043 | # RabbitMQ HA cluster host:port pairs. (list value)
1044 | # Deprecated group/name - [DEFAULT]/rabbit_hosts
1045 | # rabbit_hosts = $rabbit_host:$rabbit_port
1046 |
1047 | # Connect over SSL for RabbitMQ. (boolean value)
1048 | # Deprecated group/name - [DEFAULT]/rabbit_use_ssl
1049 | # rabbit_use_ssl = false
1050 |
1051 | # The RabbitMQ userid. (string value)
1052 | # Deprecated group/name - [DEFAULT]/rabbit_userid
1053 | # rabbit_userid = guest
1054 |
1055 | # The RabbitMQ password. (string value)
1056 | # Deprecated group/name - [DEFAULT]/rabbit_password
1057 | # rabbit_password = guest
1058 |
1059 | # The RabbitMQ login method. (string value)
1060 | # Deprecated group/name - [DEFAULT]/rabbit_login_method
1061 | # rabbit_login_method = AMQPLAIN
1062 |
1063 | # The RabbitMQ virtual host. (string value)
1064 | # Deprecated group/name - [DEFAULT]/rabbit_virtual_host
1065 | # rabbit_virtual_host = /
1066 |
1067 | # How frequently to retry connecting with RabbitMQ. (integer value)
1068 | # rabbit_retry_interval = 1
1069 |
1070 | # How long to backoff for between retries when connecting to RabbitMQ. (integer
1071 | # value)
1072 | # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
1073 | # rabbit_retry_backoff = 2
1074 |
1075 | # Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
1076 | # count). (integer value)
1077 | # Deprecated group/name - [DEFAULT]/rabbit_max_retries
1078 | # rabbit_max_retries = 0
1079 |
1080 | # Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
1081 | # must wipe the RabbitMQ database. (boolean value)
1082 | # Deprecated group/name - [DEFAULT]/rabbit_ha_queues
1083 | # rabbit_ha_queues = false
1084 |
1085 | # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
1086 | # Deprecated group/name - [DEFAULT]/fake_rabbit
1087 | # fake_rabbit = false
1088 |
1089 | [qos]
1090 | # Drivers list to use to send the update notification
1091 | # notification_drivers = message_queue
1092 |
--------------------------------------------------------------------------------
/templates/glance-registry.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | notification_driver = noop
3 | verbose = True
4 |
5 | #
6 | # From glance.registry
7 | #
8 |
9 | # When true, this option sets the owner of an image to be the tenant.
10 | # Otherwise, the owner of the image will be the authenticated user
11 | # issuing the request. (boolean value)
12 | #owner_is_tenant=true
13 |
14 | # Role used to identify an authenticated user as administrator.
15 | # (string value)
16 | #admin_role=admin
17 |
18 | # Allow unauthenticated users to access the API with read-only
19 | # privileges. This only applies when using ContextMiddleware. (boolean
20 | # value)
21 | #allow_anonymous_access=false
22 |
23 | # Limits request ID length. (integer value)
24 | #max_request_id_length=64
25 |
26 | # Whether to allow users to specify image properties beyond what the
27 | # image schema provides (boolean value)
28 | #allow_additional_image_properties=true
29 |
30 | # Maximum number of image members per image. Negative values evaluate
31 | # to unlimited. (integer value)
32 | #image_member_quota=128
33 |
34 | # Maximum number of properties allowed on an image. Negative values
35 | # evaluate to unlimited. (integer value)
36 | #image_property_quota=128
37 |
38 | # Maximum number of tags allowed on an image. Negative values evaluate
39 | # to unlimited. (integer value)
40 | #image_tag_quota=128
41 |
42 | # Maximum number of locations allowed on an image. Negative values
43 | # evaluate to unlimited. (integer value)
44 | #image_location_quota=10
45 |
46 | # Python module path of data access API (string value)
47 | #data_api=glance.db.sqlalchemy.api
48 |
49 | # Default value for the number of items returned by a request if not
50 | # specified explicitly in the request (integer value)
51 | #limit_param_default=25
52 |
53 | # Maximum permissible number of items that could be returned by a
54 | # request (integer value)
55 | #api_limit_max=1000
56 |
57 | # Whether to include the backend image storage location in image
58 | # properties. Revealing storage location can be a security risk, so
59 | # use this setting with caution! (boolean value)
60 | #show_image_direct_url=false
61 |
62 | # Whether to include the backend image locations in image properties.
63 | # For example, if using the file system store a URL of
64 | # "file:///path/to/image" will be returned to the user in the
65 | # 'direct_url' meta-data field. Revealing storage location can be a
66 | # security risk, so use this setting with caution! The overrides
67 | # show_image_direct_url. (boolean value)
68 | #show_multiple_locations=false
69 |
70 | # Maximum size of image a user can upload in bytes. Defaults to
71 | # 1099511627776 bytes (1 TB).WARNING: this value should only be
72 | # increased after careful consideration and must be set to a value
73 | # under 8 EB (9223372036854775808). (integer value)
74 | # Maximum value: 9223372036854775808
75 | #image_size_cap=1099511627776
76 |
77 | # Set a system wide quota for every user. This value is the total
78 | # capacity that a user can use across all storage systems. A value of
79 | # 0 means unlimited.Optional unit can be specified for the value.
80 | # Accepted units are B, KB, MB, GB and TB representing Bytes,
81 | # KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
82 | # unit is specified then Bytes is assumed. Note that there should not
83 | # be any space between value and unit and units are case sensitive.
84 | # (string value)
85 | #user_storage_quota=0
86 |
87 | # Deploy the v1 OpenStack Images API. (boolean value)
88 | #enable_v1_api=true
89 |
90 | # Deploy the v2 OpenStack Images API. (boolean value)
91 | #enable_v2_api=true
92 |
93 | # Deploy the v3 OpenStack Objects API. (boolean value)
94 | #enable_v3_api=false
95 |
96 | # Deploy the v1 OpenStack Registry API. (boolean value)
97 | #enable_v1_registry=true
98 |
99 | # Deploy the v2 OpenStack Registry API. (boolean value)
100 | #enable_v2_registry=true
101 |
102 | # The hostname/IP of the pydev process listening for debug connections
103 | # (string value)
104 | #pydev_worker_debug_host=
105 |
106 | # The port on which a pydev process is listening for connections.
107 | # (integer value)
108 | # Minimum value: 1
109 | # Maximum value: 65535
110 | #pydev_worker_debug_port=5678
111 |
112 | # AES key for encrypting store 'location' metadata. This includes, if
113 | # used, Swift or S3 credentials. Should be set to a random string of
114 | # length 16, 24 or 32 bytes (string value)
115 | #metadata_encryption_key=
116 |
117 | # Digest algorithm which will be used for digital signature. Use the
118 | # command "openssl list-message-digest-algorithms" to get the
119 | # available algorithmssupported by the version of OpenSSL on the
120 | # platform. Examples are "sha1", "sha256", "sha512", etc. (string
121 | # value)
122 | #digest_algorithm=sha256
123 |
124 | # Address to bind the server. Useful when selecting a particular
125 | # network interface. (string value)
126 | #bind_host=0.0.0.0
127 | bind_host={{ mgmt_ip }}
128 |
129 | # The port on which the server will listen. (integer value)
130 | # Minimum value: 1
131 | # Maximum value: 65535
132 | #bind_port=
133 |
134 | # The backlog value that will be used when creating the TCP listener
135 | # socket. (integer value)
136 | #backlog=4096
137 |
138 | # The value for the socket option TCP_KEEPIDLE. This is the time in
139 | # seconds that the connection must be idle before TCP starts sending
140 | # keepalive probes. (integer value)
141 | #tcp_keepidle=600
142 |
143 | # CA certificate file to use to verify connecting clients. (string
144 | # value)
145 | #ca_file=
146 |
147 | # Certificate file to use when starting API server securely. (string
148 | # value)
149 | #cert_file=
150 |
151 | # Private key file to use when starting API server securely. (string
152 | # value)
153 | #key_file=
154 |
155 | # The number of child process workers that will be created to service
156 | # requests. The default will be equal to the number of CPUs available.
157 | # (integer value)
158 | #workers=4
159 |
160 | # Maximum line size of message headers to be accepted. max_header_line
161 | # may need to be increased when using large tokens (typically those
162 | # generated by the Keystone v3 API with big service catalogs (integer
163 | # value)
164 | #max_header_line=16384
165 |
166 | # If False, server will return the header "Connection: close", If
167 | # True, server will return "Connection: Keep-Alive" in its responses.
168 | # In order to close the client socket connection explicitly after the
169 | # response is sent and read successfully by the client, you simply
170 | # have to set this option to False when you create a wsgi server.
171 | # (boolean value)
172 | #http_keepalive=true
173 |
174 | # Timeout for client connections' socket operations. If an incoming
175 | # connection is idle for this number of seconds it will be closed. A
176 | # value of '0' means wait forever. (integer value)
177 | #client_socket_timeout=900
178 |
179 | #
180 | # From oslo.log
181 | #
182 |
183 | # Print debugging output (set logging level to DEBUG instead of
184 | # default INFO level). (boolean value)
185 | #debug=False
186 |
187 | # If set to false, will disable INFO logging level, making WARNING the
188 | # default. (boolean value)
189 | # This option is deprecated for removal.
190 | # Its value may be silently ignored in the future.
191 | #verbose=True
192 |
193 | # The name of a logging configuration file. This file is appended to
194 | # any existing logging configuration files. For details about logging
195 | # configuration files, see the Python logging module documentation.
196 | # (string value)
197 | # Deprecated group/name - [DEFAULT]/log_config
198 | #log_config_append=
199 |
200 | # DEPRECATED. A logging.Formatter log message format string which may
201 | # use any of the available logging.LogRecord attributes. This option
202 | # is deprecated. Please use logging_context_format_string and
203 | # logging_default_format_string instead. (string value)
204 | #log_format=
205 |
206 | # Format string for %%(asctime)s in log records. Default: %(default)s
207 | # . (string value)
208 | #log_date_format=%Y-%m-%d %H:%M:%S
209 |
210 | # (Optional) Name of log file to output to. If no default is set,
211 | # logging will go to stdout. (string value)
212 | # Deprecated group/name - [DEFAULT]/logfile
213 | #log_file=/var/log/glance/registry.log
214 |
215 | # (Optional) The base directory used for relative --log-file paths.
216 | # (string value)
217 | # Deprecated group/name - [DEFAULT]/logdir
218 | #log_dir=
219 |
220 | # Use syslog for logging. Existing syslog format is DEPRECATED and
221 | # will be changed later to honor RFC5424. (boolean value)
222 | #use_syslog=false
223 |
224 | # (Optional) Enables or disables syslog rfc5424 format for logging. If
225 | # enabled, prefixes the MSG part of the syslog message with APP-NAME
226 | # (RFC5424). The format without the APP-NAME is deprecated in Kilo,
227 | # and will be removed in Mitaka, along with this option. (boolean
228 | # value)
229 | # This option is deprecated for removal.
230 | # Its value may be silently ignored in the future.
231 | #use_syslog_rfc_format=true
232 |
233 | # Syslog facility to receive log lines. (string value)
234 | #syslog_log_facility=LOG_USER
235 |
236 | # Log output to standard error. (boolean value)
237 | #use_stderr=False
238 |
239 | # Format string to use for log messages with context. (string value)
240 | #logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
241 |
242 | # Format string to use for log messages without context. (string
243 | # value)
244 | #logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
245 |
246 | # Data to append to log format when level is DEBUG. (string value)
247 | #logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
248 |
249 | # Prefix each line of exception output with this format. (string
250 | # value)
251 | #logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
252 |
253 | # List of logger=LEVEL pairs. (list value)
254 | #default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN
255 |
256 | # Enables or disables publication of error events. (boolean value)
257 | #publish_errors=false
258 |
259 | # The format for an instance that is passed with the log message.
260 | # (string value)
261 | #instance_format="[instance: %(uuid)s] "
262 |
263 | # The format for an instance UUID that is passed with the log message.
264 | # (string value)
265 | #instance_uuid_format="[instance: %(uuid)s] "
266 |
267 | # Enables or disables fatal status of deprecations. (boolean value)
268 | #fatal_deprecations=false
269 |
270 | #
271 | # From oslo.messaging
272 | #
273 |
274 | # Size of RPC connection pool. (integer value)
275 | # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
276 | #rpc_conn_pool_size=30
277 |
278 | # ZeroMQ bind address. Should be a wildcard (*), an ethernet
279 | # interface, or IP. The "host" option should point or resolve to this
280 | # address. (string value)
281 | #rpc_zmq_bind_address=*
282 |
283 | # MatchMaker driver. (string value)
284 | #rpc_zmq_matchmaker=local
285 |
286 | # ZeroMQ receiver listening port. (integer value)
287 | #rpc_zmq_port=9501
288 |
289 | # Number of ZeroMQ contexts, defaults to 1. (integer value)
290 | #rpc_zmq_contexts=1
291 |
292 | # Maximum number of ingress messages to locally buffer per topic.
293 | # Default is unlimited. (integer value)
294 | #rpc_zmq_topic_backlog=
295 |
296 | # Directory for holding IPC sockets. (string value)
297 | #rpc_zmq_ipc_dir=/var/run/openstack
298 |
299 | # Name of this node. Must be a valid hostname, FQDN, or IP address.
300 | # Must match "host" option, if running Nova. (string value)
301 | #rpc_zmq_host=localhost
302 |
303 | # Seconds to wait before a cast expires (TTL). Only supported by
304 | # impl_zmq. (integer value)
305 | #rpc_cast_timeout=30
306 |
307 | # Heartbeat frequency. (integer value)
308 | #matchmaker_heartbeat_freq=300
309 |
310 | # Heartbeat time-to-live. (integer value)
311 | #matchmaker_heartbeat_ttl=600
312 |
313 | # Size of executor thread pool. (integer value)
314 | # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
315 | #executor_thread_pool_size=64
316 |
317 | # The Drivers(s) to handle sending notifications. Possible values are
318 | # messaging, messagingv2, routing, log, test, noop (multi valued)
319 | #notification_driver =
320 |
321 | # AMQP topic used for OpenStack notifications. (list value)
322 | # Deprecated group/name - [rpc_notifier2]/topics
323 | #notification_topics=notifications
324 |
325 | # Seconds to wait for a response from a call. (integer value)
326 | #rpc_response_timeout=60
327 |
328 | # A URL representing the messaging driver to use and its full
329 | # configuration. If not set, we fall back to the rpc_backend option
330 | # and driver specific configuration. (string value)
331 | #transport_url=
332 |
333 | # The messaging driver to use, defaults to rabbit. Other drivers
334 | # include qpid and zmq. (string value)
335 | #rpc_backend=rabbit
336 |
337 | # The default exchange under which topics are scoped. May be
338 | # overridden by an exchange name specified in the transport_url
339 | # option. (string value)
340 | #control_exchange=openstack
341 |
342 |
343 | [database]
344 | connection = mysql://glance:{{ openstack_glance_db_password }}@{{ openstack_database_host }}/glance
345 |
346 | #
347 | # From oslo.db
348 | #
349 |
350 | # The file name to use with SQLite. (string value)
351 | # Deprecated group/name - [DEFAULT]/sqlite_db
352 | #sqlite_db=oslo.sqlite
353 |
354 | # If True, SQLite uses synchronous mode. (boolean value)
355 | # Deprecated group/name - [DEFAULT]/sqlite_synchronous
356 | #sqlite_synchronous=true
357 |
358 | # The back end to use for the database. (string value)
359 | # Deprecated group/name - [DEFAULT]/db_backend
360 | #backend=sqlalchemy
361 |
362 | # The SQLAlchemy connection string to use to connect to the database.
363 | # (string value)
364 | # Deprecated group/name - [DEFAULT]/sql_connection
365 | # Deprecated group/name - [DATABASE]/sql_connection
366 | # Deprecated group/name - [sql]/connection
367 | #connection=mysql://glance:glance@localhost/glance
368 |
369 | # The SQLAlchemy connection string to use to connect to the slave
370 | # database. (string value)
371 | #slave_connection=
372 |
373 | # The SQL mode to be used for MySQL sessions. This option, including
374 | # the default, overrides any server-set SQL mode. To use whatever SQL
375 | # mode is set by the server configuration, set this to no value.
376 | # Example: mysql_sql_mode= (string value)
377 | #mysql_sql_mode=TRADITIONAL
378 |
379 | # Timeout before idle SQL connections are reaped. (integer value)
380 | # Deprecated group/name - [DEFAULT]/sql_idle_timeout
381 | # Deprecated group/name - [DATABASE]/sql_idle_timeout
382 | # Deprecated group/name - [sql]/idle_timeout
383 | #idle_timeout=3600
384 |
385 | # Minimum number of SQL connections to keep open in a pool. (integer
386 | # value)
387 | # Deprecated group/name - [DEFAULT]/sql_min_pool_size
388 | # Deprecated group/name - [DATABASE]/sql_min_pool_size
389 | #min_pool_size=1
390 |
391 | # Maximum number of SQL connections to keep open in a pool. (integer
392 | # value)
393 | # Deprecated group/name - [DEFAULT]/sql_max_pool_size
394 | # Deprecated group/name - [DATABASE]/sql_max_pool_size
395 | #max_pool_size=
396 |
397 | # Maximum number of database connection retries during startup. Set to
398 | # -1 to specify an infinite retry count. (integer value)
399 | # Deprecated group/name - [DEFAULT]/sql_max_retries
400 | # Deprecated group/name - [DATABASE]/sql_max_retries
401 | #max_retries=10
402 |
403 | # Interval between retries of opening a SQL connection. (integer
404 | # value)
405 | # Deprecated group/name - [DEFAULT]/sql_retry_interval
406 | # Deprecated group/name - [DATABASE]/reconnect_interval
407 | #retry_interval=10
408 |
409 | # If set, use this value for max_overflow with SQLAlchemy. (integer
410 | # value)
411 | # Deprecated group/name - [DEFAULT]/sql_max_overflow
412 | # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
413 | #max_overflow=
414 |
415 | # Verbosity of SQL debugging information: 0=None, 100=Everything.
416 | # (integer value)
417 | # Deprecated group/name - [DEFAULT]/sql_connection_debug
418 | #connection_debug=0
419 |
420 | # Add Python stack traces to SQL as comment strings. (boolean value)
421 | # Deprecated group/name - [DEFAULT]/sql_connection_trace
422 | #connection_trace=false
423 |
424 | # If set, use this value for pool_timeout with SQLAlchemy. (integer
425 | # value)
426 | # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
427 | #pool_timeout=
428 |
429 | # Enable the experimental use of database reconnect on connection
430 | # lost. (boolean value)
431 | #use_db_reconnect=false
432 |
433 | # Seconds between retries of a database transaction. (integer value)
434 | #db_retry_interval=1
435 |
436 | # If True, increases the interval between retries of a database
437 | # operation up to db_max_retry_interval. (boolean value)
438 | #db_inc_retry_interval=true
439 |
440 | # If db_inc_retry_interval is set, the maximum seconds between retries
441 | # of a database operation. (integer value)
442 | #db_max_retry_interval=10
443 |
444 | # Maximum retries in case of connection error or deadlock error before
445 | # error is raised. Set to -1 to specify an infinite retry count.
446 | # (integer value)
447 | #db_max_retries=20
448 |
449 | #
450 | # From oslo.db.concurrency
451 | #
452 |
453 | # Enable the experimental use of thread pooling for all DB API calls
454 | # (boolean value)
455 | # Deprecated group/name - [DEFAULT]/dbapi_use_tpool
456 | #use_tpool=false
457 |
458 |
459 | [glance_store]
460 |
461 | #
462 | # From glance.store
463 | #
464 |
465 | # List of stores enabled (list value)
466 | #stores=file,http
467 |
468 | # Default scheme to use to store image data. The scheme must be
469 | # registered by one of the stores defined by the 'stores' config
470 | # option. (string value)
471 | #default_store=file
472 |
473 | # Minimum interval seconds to execute updating dynamic storage
474 | # capabilities based on backend status then. It's not a periodic
475 | # routine, the update logic will be executed only when interval
476 | # seconds elapsed and an operation of store has triggered. The feature
477 | # will be enabled only when the option value greater then zero.
478 | # (integer value)
479 | #store_capabilities_update_min_interval=0
480 |
481 | #
482 | # From glance.store
483 | #
484 |
485 | # If True, swiftclient won't check for a valid SSL certificate when
486 | # authenticating. (boolean value)
487 | #swift_store_auth_insecure=false
488 |
489 | # A string giving the CA certificate file to use in SSL connections
490 | # for verifying certs. (string value)
491 | #swift_store_cacert=
492 |
493 | # The region of the swift endpoint to be used for single tenant. This
494 | # setting is only necessary if the tenant has multiple swift
495 | # endpoints. (string value)
496 | #swift_store_region=
497 |
498 | # If set, the configured endpoint will be used. If None, the storage
499 | # url from the auth response will be used. (string value)
500 | #swift_store_endpoint=
501 |
502 | # A string giving the endpoint type of the swift service to use
503 | # (publicURL, adminURL or internalURL). This setting is only used if
504 | # swift_store_auth_version is 2. (string value)
505 | #swift_store_endpoint_type=publicURL
506 |
507 | # A string giving the service type of the swift service to use. This
508 | # setting is only used if swift_store_auth_version is 2. (string
509 | # value)
510 | #swift_store_service_type=object-store
511 |
512 | # Container within the account that the account should use for storing
513 | # images in Swift when using single container mode. In multiple
514 | # container mode, this will be the prefix for all containers. (string
515 | # value)
516 | #swift_store_container=glance
517 |
518 | # The size, in MB, that Glance will start chunking image files and do
519 | # a large object manifest in Swift. (integer value)
520 | #swift_store_large_object_size=5120
521 |
522 | # The amount of data written to a temporary disk buffer during the
523 | # process of chunking the image file. (integer value)
524 | #swift_store_large_object_chunk_size=200
525 |
526 | # A boolean value that determines if we create the container if it
527 | # does not exist. (boolean value)
528 | #swift_store_create_container_on_put=false
529 |
530 | # If set to True, enables multi-tenant storage mode which causes
531 | # Glance images to be stored in tenant specific Swift accounts.
532 | # (boolean value)
533 | #swift_store_multi_tenant=false
534 |
535 | # When set to 0, a single-tenant store will only use one container to
536 | # store all images. When set to an integer value between 1 and 32, a
537 | # single-tenant store will use multiple containers to store images,
538 | # and this value will determine how many containers are created.Used
539 | # only when swift_store_multi_tenant is disabled. The total number of
540 | # containers that will be used is equal to 16^N, so if this config
541 | # option is set to 2, then 16^2=256 containers will be used to store
542 | # images. (integer value)
543 | #swift_store_multiple_containers_seed=0
544 |
545 | # A list of tenants that will be granted read/write access on all
546 | # Swift containers created by Glance in multi-tenant mode. (list
547 | # value)
548 | #swift_store_admin_tenants =
549 |
550 | # If set to False, disables SSL layer compression of https swift
551 | # requests. Setting to False may improve performance for images which
552 | # are already in a compressed format, eg qcow2. (boolean value)
553 | #swift_store_ssl_compression=true
554 |
555 | # The number of times a Swift download will be retried before the
556 | # request fails. (integer value)
557 | #swift_store_retry_get_count=0
558 |
559 | # The reference to the default swift account/backing store parameters
560 | # to use for adding new images. (string value)
561 | #default_swift_reference=ref1
562 |
563 | # Version of the authentication service to use. Valid versions are 2
564 | # and 3 for keystone and 1 (deprecated) for swauth and rackspace.
565 | # (deprecated - use "auth_version" in swift_store_config_file) (string
566 | # value)
567 | #swift_store_auth_version=2
568 |
569 | # The address where the Swift authentication service is listening.
570 | # (deprecated - use "auth_address" in swift_store_config_file) (string
571 | # value)
572 | #swift_store_auth_address=
573 |
574 | # The user to authenticate against the Swift authentication service
575 | # (deprecated - use "user" in swift_store_config_file) (string value)
576 | #swift_store_user=
577 |
578 | # Auth key for the user authenticating against the Swift
579 | # authentication service. (deprecated - use "key" in
580 | # swift_store_config_file) (string value)
581 | #swift_store_key=
582 |
583 | # The config file that has the swift account(s)configs. (string value)
584 | #swift_store_config_file=
585 |
586 | # The host where the S3 server is listening. (string value)
587 | #s3_store_host=
588 |
589 | # The S3 query token access key. (string value)
590 | #s3_store_access_key=
591 |
592 | # The S3 query token secret key. (string value)
593 | #s3_store_secret_key=
594 |
595 | # The S3 bucket to be used to store the Glance data. (string value)
596 | #s3_store_bucket=
597 |
598 | # The local directory where uploads will be staged before they are
599 | # transferred into S3. (string value)
600 | #s3_store_object_buffer_dir=
601 |
602 | # A boolean to determine if the S3 bucket should be created on upload
603 | # if it does not exist or if an error should be returned to the user.
604 | # (boolean value)
605 | #s3_store_create_bucket_on_put=false
606 |
607 | # The S3 calling format used to determine the bucket. Either subdomain
608 | # or path can be used. (string value)
609 | #s3_store_bucket_url_format=subdomain
610 |
611 | # What size, in MB, should S3 start chunking image files and do a
612 | # multipart upload in S3. (integer value)
613 | #s3_store_large_object_size=100
614 |
615 | # What multipart upload part size, in MB, should S3 use when uploading
616 | # parts. The size must be greater than or equal to 5M. (integer value)
617 | #s3_store_large_object_chunk_size=10
618 |
619 | # The number of thread pools to perform a multipart upload in S3.
620 | # (integer value)
621 | #s3_store_thread_pools=10
622 |
623 | # Directory to which the Filesystem backend store writes images.
624 | # (string value)
625 | #filesystem_store_datadir=
626 |
627 | # List of directories and its priorities to which the Filesystem
628 | # backend store writes images. (multi valued)
629 | #filesystem_store_datadirs =
630 |
631 | # The path to a file which contains the metadata to be returned with
632 | # any location associated with this store. The file must contain a
633 | # valid JSON object. The object should contain the keys 'id' and
634 | # 'mountpoint'. The value for both keys should be 'string'. (string
635 | # value)
636 | #filesystem_store_metadata_file=
637 |
638 | # The required permission for created image file. In this way the user
639 | # other service used, e.g. Nova, who consumes the image could be the
640 | # exclusive member of the group that owns the files created. Assigning
641 | # it less then or equal to zero means don't change the default
642 | # permission of the file. This value will be decoded as an octal
643 | # digit. (integer value)
644 | #filesystem_store_file_perm=0
645 |
646 | # ESX/ESXi or vCenter Server target system. The server value can be an
647 | # IP address or a DNS name. (string value)
648 | #vmware_server_host=
649 |
650 | # Username for authenticating with VMware ESX/VC server. (string
651 | # value)
652 | #vmware_server_username=
653 |
654 | # Password for authenticating with VMware ESX/VC server. (string
655 | # value)
656 | #vmware_server_password=
657 |
658 | # DEPRECATED. Inventory path to a datacenter. If the
659 | # vmware_server_host specified is an ESX/ESXi, the
660 | # vmware_datacenter_path is optional. If specified, it should be "ha-
661 | # datacenter". This option is deprecated in favor of vmware_datastores
662 | # and will be removed in the Liberty release. (string value)
663 | # This option is deprecated for removal.
664 | # Its value may be silently ignored in the future.
665 | #vmware_datacenter_path=ha-datacenter
666 |
667 | # DEPRECATED. Datastore associated with the datacenter. This option is
668 | # deprecated in favor of vmware_datastores and will be removed in the
669 | # Liberty release. (string value)
670 | # This option is deprecated for removal.
671 | # Its value may be silently ignored in the future.
672 | #vmware_datastore_name=
673 |
674 | # Number of times VMware ESX/VC server API must be retried upon
675 | # connection related issues. (integer value)
676 | #vmware_api_retry_count=10
677 |
678 | # The interval used for polling remote tasks invoked on VMware ESX/VC
679 | # server. (integer value)
680 | #vmware_task_poll_interval=5
681 |
682 | # The name of the directory where the glance images will be stored in
683 | # the VMware datastore. (string value)
684 | #vmware_store_image_dir=/openstack_glance
685 |
686 | # Allow to perform insecure SSL requests to ESX/VC. (boolean value)
687 | #vmware_api_insecure=false
688 |
689 | # A list of datastores where the image can be stored. This option may
690 | # be specified multiple times for specifying multiple datastores.
691 | # Either one of vmware_datastore_name or vmware_datastores is
692 | # required. The datastore name should be specified after its
693 | # datacenter path, seperated by ":". An optional weight may be given
694 | # after the datastore name, seperated again by ":". Thus, the required
695 | # format becomes ::.
696 | # When adding an image, the datastore with highest weight will be
697 | # selected, unless there is not enough free space available in cases
698 | # where the image size is already known. If no weight is given, it is
699 | # assumed to be zero and the directory will be considered for
700 | # selection last. If multiple datastores have the same weight, then
701 | # the one with the most free space available is selected. (multi
702 | # valued)
703 | #vmware_datastores =
704 |
705 | # Images will be chunked into objects of this size (in megabytes). For
706 | # best performance, this should be a power of two. (integer value)
707 | #sheepdog_store_chunk_size=64
708 |
709 | # Port of sheep daemon. (integer value)
710 | #sheepdog_store_port=7000
711 |
712 | # IP address of sheep daemon. (string value)
713 | #sheepdog_store_address=localhost
714 |
715 | # RADOS images will be chunked into objects of this size (in
716 | # megabytes). For best performance, this should be a power of two.
717 | # (integer value)
718 | #rbd_store_chunk_size=8
719 |
720 | # RADOS pool in which images are stored. (string value)
721 | #rbd_store_pool=images
722 |
723 | # RADOS user to authenticate as (only applicable if using Cephx. If
724 | # , a default will be chosen based on the client. section in
725 | # rbd_store_ceph_conf) (string value)
726 | #rbd_store_user=
727 |
728 | # Ceph configuration file path. If , librados will locate the
729 | # default config. If using cephx authentication, this file should
730 | # include a reference to the right keyring in a client. section
731 | # (string value)
732 | #rbd_store_ceph_conf=/etc/ceph/ceph.conf
733 |
734 | # Info to match when looking for cinder in the service catalog. Format
735 | # is : separated values of the form:
736 | # :: (string value)
737 | #cinder_catalog_info=volume:cinder:publicURL
738 |
739 | # Override service catalog lookup with template for cinder endpoint
740 | # e.g. http://localhost:8776/v1/%(project_id)s (string value)
741 | #cinder_endpoint_template=
742 |
743 | # Region name of this node (string value)
744 | #os_region_name=
745 |
746 | # Location of ca certicates file to use for cinder client requests.
747 | # (string value)
748 | #cinder_ca_certificates_file=
749 |
750 | # Number of cinderclient retries on failed http calls (integer value)
751 | #cinder_http_retries=3
752 |
753 | # Allow to perform insecure SSL requests to cinder (boolean value)
754 | #cinder_api_insecure=false
755 |
756 | # Hostname or IP address of the instance to connect to, or a mongodb
757 | # URI, or a list of hostnames / mongodb URIs. If host is an IPv6
758 | # literal it must be enclosed in '[' and ']' characters following the
759 | # RFC2732 URL syntax (e.g. '[::1]' for localhost) (string value)
760 | #mongodb_store_uri=
761 |
762 | # Database to use (string value)
763 | #mongodb_store_db=
764 |
765 |
766 | [keystone_authtoken]
767 | auth_uri = {{ openstack_keystone_auth_uri }}
768 | auth_url = {{ openstack_keystone_auth_url }}
769 | auth_plugin = password
770 | project_domain_id = default
771 | user_domain_id = default
772 | project_name = service
773 | username = glance
774 | password = {{ openstack_glance_keystone_password }}
775 |
776 | #
777 | # From keystonemiddleware.auth_token
778 | #
779 |
780 | # Complete public Identity API endpoint. (string value)
781 | #auth_uri=
782 |
783 | # API version of the admin Identity API endpoint. (string value)
784 | #auth_version=
785 |
786 | # Do not handle authorization requests within the middleware, but
787 | # delegate the authorization decision to downstream WSGI components.
788 | # (boolean value)
789 | #delay_auth_decision=false
790 |
791 | # Request timeout value for communicating with Identity API server.
792 | # (integer value)
793 | #http_connect_timeout=
794 |
795 | # How many times are we trying to reconnect when communicating with
796 | # Identity API Server. (integer value)
797 | #http_request_max_retries=3
798 |
799 | # Env key for the swift cache. (string value)
800 | #cache=
801 |
802 | # Required if identity server requires client certificate (string
803 | # value)
804 | #certfile=
805 |
806 | # Required if identity server requires client certificate (string
807 | # value)
808 | #keyfile=
809 |
810 | # A PEM encoded Certificate Authority to use when verifying HTTPs
811 | # connections. Defaults to system CAs. (string value)
812 | #cafile=
813 |
814 | # Verify HTTPS connections. (boolean value)
815 | #insecure=false
816 |
817 | # The region in which the identity server can be found. (string value)
818 | #region_name=
819 |
820 | # Directory used to cache files related to PKI tokens. (string value)
821 | #signing_dir=
822 |
823 | # Optionally specify a list of memcached server(s) to use for caching.
824 | # If left undefined, tokens will instead be cached in-process. (list
825 | # value)
826 | # Deprecated group/name - [DEFAULT]/memcache_servers
827 | #memcached_servers=
828 |
829 | # In order to prevent excessive effort spent validating tokens, the
830 | # middleware caches previously-seen tokens for a configurable duration
831 | # (in seconds). Set to -1 to disable caching completely. (integer
832 | # value)
833 | #token_cache_time=300
834 |
835 | # Determines the frequency at which the list of revoked tokens is
836 | # retrieved from the Identity service (in seconds). A high number of
837 | # revocation events combined with a low cache duration may
838 | # significantly reduce performance. (integer value)
839 | #revocation_cache_time=10
840 |
841 | # (Optional) If defined, indicate whether token data should be
842 | # authenticated or authenticated and encrypted. Acceptable values are
843 | # MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in
844 | # the cache. If ENCRYPT, token data is encrypted and authenticated in
845 | # the cache. If the value is not one of these options or empty,
846 | # auth_token will raise an exception on initialization. (string value)
847 | #memcache_security_strategy=
848 |
849 | # (Optional, mandatory if memcache_security_strategy is defined) This
850 | # string is used for key derivation. (string value)
851 | #memcache_secret_key=
852 |
853 | # (Optional) Number of seconds memcached server is considered dead
854 | # before it is tried again. (integer value)
855 | #memcache_pool_dead_retry=300
856 |
857 | # (Optional) Maximum total number of open connections to every
858 | # memcached server. (integer value)
859 | #memcache_pool_maxsize=10
860 |
861 | # (Optional) Socket timeout in seconds for communicating with a
862 | # memcached server. (integer value)
863 | #memcache_pool_socket_timeout=3
864 |
865 | # (Optional) Number of seconds a connection to memcached is held
866 | # unused in the pool before it is closed. (integer value)
867 | #memcache_pool_unused_timeout=60
868 |
869 | # (Optional) Number of seconds that an operation will wait to get a
870 | # memcached client connection from the pool. (integer value)
871 | #memcache_pool_conn_get_timeout=10
872 |
873 | # (Optional) Use the advanced (eventlet safe) memcached client pool.
874 | # The advanced pool will only work under python 2.x. (boolean value)
875 | #memcache_use_advanced_pool=false
876 |
877 | # (Optional) Indicate whether to set the X-Service-Catalog header. If
878 | # False, middleware will not ask for service catalog on token
879 | # validation and will not set the X-Service-Catalog header. (boolean
880 | # value)
881 | #include_service_catalog=true
882 |
883 | # Used to control the use and type of token binding. Can be set to:
884 | # "disabled" to not check token binding. "permissive" (default) to
885 | # validate binding information if the bind type is of a form known to
886 | # the server and ignore it if not. "strict" like "permissive" but if
887 | # the bind type is unknown the token will be rejected. "required" any
888 | # form of token binding is needed to be allowed. Finally the name of a
889 | # binding method that must be present in tokens. (string value)
890 | #enforce_token_bind=permissive
891 |
892 | # If true, the revocation list will be checked for cached tokens. This
893 | # requires that PKI tokens are configured on the identity server.
894 | # (boolean value)
895 | #check_revocations_for_cached=false
896 |
897 | # Hash algorithms to use for hashing PKI tokens. This may be a single
898 | # algorithm or multiple. The algorithms are those supported by Python
899 | # standard hashlib.new(). The hashes will be tried in the order given,
900 | # so put the preferred one first for performance. The result of the
901 | # first hash will be stored in the cache. This will typically be set
902 | # to multiple values only while migrating from a less secure algorithm
903 | # to a more secure one. Once all the old tokens are expired this
904 | # option should be set to a single value for better performance. (list
905 | # value)
906 | #hash_algorithms=md5
907 |
908 | # Prefix to prepend at the beginning of the path. Deprecated, use
909 | # identity_uri. (string value)
910 | #auth_admin_prefix =
911 |
912 | # Host providing the admin Identity API endpoint. Deprecated, use
913 | # identity_uri. (string value)
914 | #auth_host=127.0.0.1
915 |
916 | # Port of the admin Identity API endpoint. Deprecated, use
917 | # identity_uri. (integer value)
918 | #auth_port=35357
919 |
920 | # Protocol of the admin Identity API endpoint (http or https).
921 | # Deprecated, use identity_uri. (string value)
922 | #auth_protocol=http
923 |
924 | # Complete admin Identity API endpoint. This should specify the
925 | # unversioned root endpoint e.g. https://localhost:35357/ (string
926 | # value)
927 | #identity_uri=
928 |
929 | # This option is deprecated and may be removed in a future release.
930 | # Single shared secret with the Keystone configuration used for
931 | # bootstrapping a Keystone installation, or otherwise bypassing the
932 | # normal authentication process. This option should not be used, use
933 | # `admin_user` and `admin_password` instead. (string value)
934 | #admin_token=
935 |
936 | # Service username. (string value)
937 | #admin_user=%SERVICE_USER%
938 |
939 | # Service user password. (string value)
940 | #admin_password=%SERVICE_PASSWORD%
941 |
942 | # Service tenant name. (string value)
943 | #admin_tenant_name=%SERVICE_TENANT_NAME%
944 |
945 |
946 | [matchmaker_redis]
947 |
948 | #
949 | # From oslo.messaging
950 | #
951 |
952 | # Host to locate redis. (string value)
953 | #host=127.0.0.1
954 |
955 | # Use this port to connect to redis host. (integer value)
956 | #port=6379
957 |
958 | # Password for Redis server (optional). (string value)
959 | #password=
960 |
961 |
962 | [matchmaker_ring]
963 |
964 | #
965 | # From oslo.messaging
966 | #
967 |
968 | # Matchmaker ring file (JSON). (string value)
969 | # Deprecated group/name - [DEFAULT]/matchmaker_ringfile
970 | #ringfile=/etc/oslo/matchmaker_ring.json
971 |
972 |
973 | [oslo_messaging_amqp]
974 |
975 | #
976 | # From oslo.messaging
977 | #
978 |
979 | # address prefix used when sending to a specific server (string value)
980 | # Deprecated group/name - [amqp1]/server_request_prefix
981 | #server_request_prefix=exclusive
982 |
983 | # address prefix used when broadcasting to all servers (string value)
984 | # Deprecated group/name - [amqp1]/broadcast_prefix
985 | #broadcast_prefix=broadcast
986 |
987 | # address prefix when sending to any server in group (string value)
988 | # Deprecated group/name - [amqp1]/group_request_prefix
989 | #group_request_prefix=unicast
990 |
991 | # Name for the AMQP container (string value)
992 | # Deprecated group/name - [amqp1]/container_name
993 | #container_name=
994 |
995 | # Timeout for inactive connections (in seconds) (integer value)
996 | # Deprecated group/name - [amqp1]/idle_timeout
997 | #idle_timeout=0
998 |
999 | # Debug: dump AMQP frames to stdout (boolean value)
1000 | # Deprecated group/name - [amqp1]/trace
1001 | #trace=false
1002 |
1003 | # CA certificate PEM file to verify server certificate (string value)
1004 | # Deprecated group/name - [amqp1]/ssl_ca_file
1005 | #ssl_ca_file =
1006 |
1007 | # Identifying certificate PEM file to present to clients (string
1008 | # value)
1009 | # Deprecated group/name - [amqp1]/ssl_cert_file
1010 | #ssl_cert_file =
1011 |
1012 | # Private key PEM file used to sign cert_file certificate (string
1013 | # value)
1014 | # Deprecated group/name - [amqp1]/ssl_key_file
1015 | #ssl_key_file =
1016 |
1017 | # Password for decrypting ssl_key_file (if encrypted) (string value)
1018 | # Deprecated group/name - [amqp1]/ssl_key_password
1019 | #ssl_key_password=
1020 |
1021 | # Accept clients using either SSL or plain TCP (boolean value)
1022 | # Deprecated group/name - [amqp1]/allow_insecure_clients
1023 | #allow_insecure_clients=false
1024 |
1025 |
1026 | [oslo_messaging_qpid]
1027 |
1028 | #
1029 | # From oslo.messaging
1030 | #
1031 |
1032 | # Use durable queues in AMQP. (boolean value)
1033 | # Deprecated group/name - [DEFAULT]/amqp_durable_queues
1034 | # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
1035 | #amqp_durable_queues=false
1036 |
1037 | # Auto-delete queues in AMQP. (boolean value)
1038 | # Deprecated group/name - [DEFAULT]/amqp_auto_delete
1039 | #amqp_auto_delete=false
1040 |
1041 | # Send a single AMQP reply to call message. The current behaviour
1042 | # since oslo-incubator is to send two AMQP replies - first one with
1043 | # the payload, a second one to ensure the other have finish to send
1044 | # the payload. We are going to remove it in the N release, but we must
1045 | # keep backward compatible at the same time. This option provides such
1046 | # compatibility - it defaults to False in Liberty and can be turned on
1047 | # for early adopters with a new installations or for testing. Please
1048 | # note, that this option will be removed in the Mitaka release.
1049 | # (boolean value)
1050 | #send_single_reply=false
1051 |
1052 | # Qpid broker hostname. (string value)
1053 | # Deprecated group/name - [DEFAULT]/qpid_hostname
1054 | #qpid_hostname=localhost
1055 |
1056 | # Qpid broker port. (integer value)
1057 | # Deprecated group/name - [DEFAULT]/qpid_port
1058 | #qpid_port=5672
1059 |
1060 | # Qpid HA cluster host:port pairs. (list value)
1061 | # Deprecated group/name - [DEFAULT]/qpid_hosts
1062 | #qpid_hosts=$qpid_hostname:$qpid_port
1063 |
1064 | # Username for Qpid connection. (string value)
1065 | # Deprecated group/name - [DEFAULT]/qpid_username
1066 | #qpid_username =
1067 |
1068 | # Password for Qpid connection. (string value)
1069 | # Deprecated group/name - [DEFAULT]/qpid_password
1070 | #qpid_password =
1071 |
1072 | # Space separated list of SASL mechanisms to use for auth. (string
1073 | # value)
1074 | # Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
1075 | #qpid_sasl_mechanisms =
1076 |
1077 | # Seconds between connection keepalive heartbeats. (integer value)
1078 | # Deprecated group/name - [DEFAULT]/qpid_heartbeat
1079 | #qpid_heartbeat=60
1080 |
1081 | # Transport to use, either 'tcp' or 'ssl'. (string value)
1082 | # Deprecated group/name - [DEFAULT]/qpid_protocol
1083 | #qpid_protocol=tcp
1084 |
1085 | # Whether to disable the Nagle algorithm. (boolean value)
1086 | # Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
1087 | #qpid_tcp_nodelay=true
1088 |
1089 | # The number of prefetched messages held by receiver. (integer value)
1090 | # Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
1091 | #qpid_receiver_capacity=1
1092 |
1093 | # The qpid topology version to use. Version 1 is what was originally
1094 | # used by impl_qpid. Version 2 includes some backwards-incompatible
1095 | # changes that allow broker federation to work. Users should update
1096 | # to version 2 when they are able to take everything down, as it
1097 | # requires a clean break. (integer value)
1098 | # Deprecated group/name - [DEFAULT]/qpid_topology_version
1099 | #qpid_topology_version=1
1100 |
1101 |
1102 | [oslo_messaging_rabbit]
1103 |
1104 | #
1105 | # From oslo.messaging
1106 | #
1107 |
1108 | # Use durable queues in AMQP. (boolean value)
1109 | # Deprecated group/name - [DEFAULT]/amqp_durable_queues
1110 | # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
1111 | #amqp_durable_queues=false
1112 |
1113 | # Auto-delete queues in AMQP. (boolean value)
1114 | # Deprecated group/name - [DEFAULT]/amqp_auto_delete
1115 | #amqp_auto_delete=false
1116 |
1117 | # Send a single AMQP reply to call message. The current behaviour
1118 | # since oslo-incubator is to send two AMQP replies - first one with
1119 | # the payload, a second one to ensure the other have finish to send
1120 | # the payload. We are going to remove it in the N release, but we must
1121 | # keep backward compatible at the same time. This option provides such
1122 | # compatibility - it defaults to False in Liberty and can be turned on
1123 | # for early adopters with a new installations or for testing. Please
1124 | # note, that this option will be removed in the Mitaka release.
1125 | # (boolean value)
1126 | #send_single_reply=false
1127 |
1128 | # SSL version to use (valid only if SSL enabled). Valid values are
1129 | # TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
1130 | # available on some distributions. (string value)
1131 | # Deprecated group/name - [DEFAULT]/kombu_ssl_version
1132 | #kombu_ssl_version =
1133 |
1134 | # SSL key file (valid only if SSL enabled). (string value)
1135 | # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
1136 | #kombu_ssl_keyfile =
1137 |
1138 | # SSL cert file (valid only if SSL enabled). (string value)
1139 | # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
1140 | #kombu_ssl_certfile =
1141 |
1142 | # SSL certification authority file (valid only if SSL enabled).
1143 | # (string value)
1144 | # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
1145 | #kombu_ssl_ca_certs =
1146 |
1147 | # How long to wait before reconnecting in response to an AMQP consumer
1148 | # cancel notification. (floating point value)
1149 | # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
1150 | #kombu_reconnect_delay=1.0
1151 |
1152 | # How long to wait before considering a reconnect attempt to have
1153 | # failed. This value should not be longer than rpc_response_timeout.
1154 | # (integer value)
1155 | #kombu_reconnect_timeout=60
1156 |
1157 | # The RabbitMQ broker address where a single node is used. (string
1158 | # value)
1159 | # Deprecated group/name - [DEFAULT]/rabbit_host
1160 | #rabbit_host=localhost
1161 |
1162 | # The RabbitMQ broker port where a single node is used. (integer
1163 | # value)
1164 | # Deprecated group/name - [DEFAULT]/rabbit_port
1165 | #rabbit_port=5672
1166 |
1167 | # RabbitMQ HA cluster host:port pairs. (list value)
1168 | # Deprecated group/name - [DEFAULT]/rabbit_hosts
1169 | #rabbit_hosts=$rabbit_host:$rabbit_port
1170 |
1171 | # Connect over SSL for RabbitMQ. (boolean value)
1172 | # Deprecated group/name - [DEFAULT]/rabbit_use_ssl
1173 | #rabbit_use_ssl=false
1174 |
1175 | # The RabbitMQ userid. (string value)
1176 | # Deprecated group/name - [DEFAULT]/rabbit_userid
1177 | #rabbit_userid=guest
1178 |
1179 | # The RabbitMQ password. (string value)
1180 | # Deprecated group/name - [DEFAULT]/rabbit_password
1181 | #rabbit_password=guest
1182 |
1183 | # The RabbitMQ login method. (string value)
1184 | # Deprecated group/name - [DEFAULT]/rabbit_login_method
1185 | #rabbit_login_method=AMQPLAIN
1186 |
1187 | # The RabbitMQ virtual host. (string value)
1188 | # Deprecated group/name - [DEFAULT]/rabbit_virtual_host
1189 | #rabbit_virtual_host=/
1190 |
1191 | # How frequently to retry connecting with RabbitMQ. (integer value)
1192 | #rabbit_retry_interval=1
1193 |
1194 | # How long to backoff for between retries when connecting to RabbitMQ.
1195 | # (integer value)
1196 | # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
1197 | #rabbit_retry_backoff=2
1198 |
1199 | # Maximum number of RabbitMQ connection retries. Default is 0
1200 | # (infinite retry count). (integer value)
1201 | # Deprecated group/name - [DEFAULT]/rabbit_max_retries
1202 | #rabbit_max_retries=0
1203 |
1204 | # Use HA queues in RabbitMQ (x-ha-policy: all). If you change this
1205 | # option, you must wipe the RabbitMQ database. (boolean value)
1206 | # Deprecated group/name - [DEFAULT]/rabbit_ha_queues
1207 | #rabbit_ha_queues=false
1208 |
1209 | # Number of seconds after which the Rabbit broker is considered down
1210 | # if heartbeat's keep-alive fails (0 disable the heartbeat).
1211 | # EXPERIMENTAL (integer value)
1212 | #heartbeat_timeout_threshold=60
1213 |
1214 | # How often times during the heartbeat_timeout_threshold we check the
1215 | # heartbeat. (integer value)
1216 | #heartbeat_rate=2
1217 |
1218 | # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
1219 | # (boolean value)
1220 | # Deprecated group/name - [DEFAULT]/fake_rabbit
1221 | #fake_rabbit=false
1222 |
1223 |
1224 | [oslo_policy]
1225 |
1226 | #
1227 | # From oslo.policy
1228 | #
1229 |
1230 | # The JSON file that defines policies. (string value)
1231 | # Deprecated group/name - [DEFAULT]/policy_file
1232 | #policy_file=policy.json
1233 |
1234 | # Default rule. Enforced when a requested rule is not found. (string
1235 | # value)
1236 | # Deprecated group/name - [DEFAULT]/policy_default_rule
1237 | #policy_default_rule=default
1238 |
1239 | # Directories where policy configuration files are stored. They can be
1240 | # relative to any directory in the search path defined by the
1241 | # config_dir option, or absolute paths. The file defined by
1242 | # policy_file must exist for these directories to be searched.
1243 | # Missing or empty directories are ignored. (multi valued)
1244 | # Deprecated group/name - [DEFAULT]/policy_dirs
1245 | # This option is deprecated for removal.
1246 | # Its value may be silently ignored in the future.
1247 | #policy_dirs=policy.d
1248 |
1249 |
1250 | [paste_deploy]
1251 | flavor = keystone
1252 |
1253 | #
1254 | # From glance.registry
1255 | #
1256 |
1257 | # Partial name of a pipeline in your paste configuration file with the
1258 | # service name removed. For example, if your paste section name is
1259 | # [pipeline:glance-api-keystone] use the value "keystone" (string
1260 | # value)
1261 | #flavor=
1262 |
1263 | # Name of the paste configuration file. (string value)
1264 | #config_file=/usr/share/glance/glance-registry-dist-paste.ini
1265 |
--------------------------------------------------------------------------------